2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2017-06-10 08:26:39 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 Imagination Technologies
|
2017-10-26 08:04:33 +08:00
|
|
|
* Author: Paul Burton <paul.burton@mips.com>
|
2017-06-10 08:26:39 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <asm/cmpxchg.h>
|
|
|
|
|
|
|
|
unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
|
|
|
|
{
|
|
|
|
u32 old32, new32, load32, mask;
|
|
|
|
volatile u32 *ptr32;
|
|
|
|
unsigned int shift;
|
|
|
|
|
|
|
|
/* Check that ptr is naturally aligned */
|
|
|
|
WARN_ON((unsigned long)ptr & (size - 1));
|
|
|
|
|
|
|
|
/* Mask value to the correct size. */
|
|
|
|
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
|
|
|
val &= mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate a shift & mask that correspond to the value we wish to
|
2022-05-01 03:03:10 +08:00
|
|
|
* exchange within the naturally aligned 4 byte integer that includes
|
2017-06-10 08:26:39 +08:00
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
shift = (unsigned long)ptr & 0x3;
|
|
|
|
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
|
|
|
shift ^= sizeof(u32) - size;
|
|
|
|
shift *= BITS_PER_BYTE;
|
|
|
|
mask <<= shift;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate a pointer to the naturally aligned 4 byte integer that
|
|
|
|
* includes our byte of interest, and load its value.
|
|
|
|
*/
|
|
|
|
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
|
|
|
load32 = *ptr32;
|
|
|
|
|
|
|
|
do {
|
|
|
|
old32 = load32;
|
|
|
|
new32 = (load32 & ~mask) | (val << shift);
|
2021-05-25 22:02:21 +08:00
|
|
|
load32 = arch_cmpxchg(ptr32, old32, new32);
|
2017-06-10 08:26:39 +08:00
|
|
|
} while (load32 != old32);
|
|
|
|
|
|
|
|
return (load32 & mask) >> shift;
|
|
|
|
}
|
2017-06-10 08:26:40 +08:00
|
|
|
|
|
|
|
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
|
|
|
|
unsigned long new, unsigned int size)
|
|
|
|
{
|
2019-02-11 12:38:29 +08:00
|
|
|
u32 mask, old32, new32, load32, load;
|
2017-06-10 08:26:40 +08:00
|
|
|
volatile u32 *ptr32;
|
|
|
|
unsigned int shift;
|
|
|
|
|
|
|
|
/* Check that ptr is naturally aligned */
|
|
|
|
WARN_ON((unsigned long)ptr & (size - 1));
|
|
|
|
|
|
|
|
/* Mask inputs to the correct size. */
|
|
|
|
mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
|
|
|
|
old &= mask;
|
|
|
|
new &= mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate a shift & mask that correspond to the value we wish to
|
|
|
|
* compare & exchange within the naturally aligned 4 byte integer
|
|
|
|
* that includes it.
|
|
|
|
*/
|
|
|
|
shift = (unsigned long)ptr & 0x3;
|
|
|
|
if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
|
|
|
shift ^= sizeof(u32) - size;
|
|
|
|
shift *= BITS_PER_BYTE;
|
|
|
|
mask <<= shift;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate a pointer to the naturally aligned 4 byte integer that
|
|
|
|
* includes our byte of interest, and load its value.
|
|
|
|
*/
|
|
|
|
ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
|
|
|
|
load32 = *ptr32;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
/*
|
|
|
|
* Ensure the byte we want to exchange matches the expected
|
|
|
|
* old value, and if not then bail.
|
|
|
|
*/
|
|
|
|
load = (load32 & mask) >> shift;
|
|
|
|
if (load != old)
|
|
|
|
return load;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate the old & new values of the naturally aligned
|
|
|
|
* 4 byte integer that include the byte we want to exchange.
|
|
|
|
* Attempt to exchange the old value for the new value, and
|
|
|
|
* return if we succeed.
|
|
|
|
*/
|
|
|
|
old32 = (load32 & ~mask) | (old << shift);
|
|
|
|
new32 = (load32 & ~mask) | (new << shift);
|
2021-05-25 22:02:21 +08:00
|
|
|
load32 = arch_cmpxchg(ptr32, old32, new32);
|
2017-06-10 08:26:40 +08:00
|
|
|
if (load32 == old32)
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
}
|