1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_WORD_AT_A_TIME_H
#define _ASM_WORD_AT_A_TIME_H
#include <linux/bitops.h>
#include <linux/wordpart.h>
#include <asm/asm-extable.h>
#include <asm/bitsperlong.h>
struct word_at_a_time {
const unsigned long bits;
};
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x7f) }
static inline unsigned long prep_zero_mask(unsigned long val, unsigned long data, const struct word_at_a_time *c)
{
return data;
}
static inline unsigned long create_zero_mask(unsigned long data)
{
return __fls(data);
}
static inline unsigned long find_zero(unsigned long data)
{
return (data ^ (BITS_PER_LONG - 1)) >> 3;
}
static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
{
unsigned long mask = (val & c->bits) + c->bits;
*data = ~(mask | val | c->bits);
return *data;
}
static inline unsigned long zero_bytemask(unsigned long data)
{
return ~1UL << data;
}
/*
* Load an unaligned word from kernel space.
*
* In the (very unlikely) case of the word being a page-crosser
* and the next page not being mapped, take the exception and
* return zeroes in the non-existing part.
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long data;
asm volatile(
"0: lg %[data],0(%[addr])\n"
"1: nopr %%r7\n"
EX_TABLE_ZEROPAD(0b, 1b, %[data], %[addr])
EX_TABLE_ZEROPAD(1b, 1b, %[data], %[addr])
: [data] "=d" (data)
: [addr] "a" (addr), "m" (*(unsigned long *)addr));
return data;
}
#endif /* _ASM_WORD_AT_A_TIME_H */
|