about summary refs log tree commit diff
path: root/arch/sh/atomic.h
blob: f2e6dacbee9856103a6767f9409ccaf77de4fdde (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
#ifndef _INTERNAL_ATOMIC_H
#define _INTERNAL_ATOMIC_H

#include <stdint.h>

static inline int a_ctz_l(unsigned long x)
{
	static const char debruijn32[32] = {
		0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
		31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
	};
	return debruijn32[(x&-x)*0x076be629 >> 27];
}

static inline int a_ctz_64(uint64_t x)
{
	uint32_t y = x;
	if (!y) {
		y = x>>32;
		return 32 + a_ctz_l(y);
	}
	return a_ctz_l(y);
}

#define LLSC_CLOBBERS "r0", "t", "memory"
#define LLSC_START(mem) "synco\n"  \
	"0:	movli.l @" mem ", r0\n"
#define LLSC_END(mem)              \
	"1:	movco.l r0, @" mem "\n"    \
	"	bf 0b\n"                   \
	"	synco\n"

static inline int __sh_cas_llsc(volatile int *p, int t, int s)
{
	int old;
	__asm__ __volatile__(
		LLSC_START("%1")
		"	mov r0, %0\n"
		"	cmp/eq %0, %2\n"
		"	bf 1f\n"
		"	mov %3, r0\n"
		LLSC_END("%1")
		: "=&r"(old) : "r"(p), "r"(t), "r"(s) : LLSC_CLOBBERS);
	return old;
}

static inline int __sh_swap_llsc(volatile int *x, int v)
{
	int old;
	__asm__ __volatile__(
		LLSC_START("%1")
		"	mov r0, %0\n"
		"	mov %2, r0\n"
		LLSC_END("%1")
		: "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
	return old;
}

static inline int __sh_fetch_add_llsc(volatile int *x, int v)
{
	int old;
	__asm__ __volatile__(
		LLSC_START("%1")
		"	mov r0, %0\n"
		"	add %2, r0\n"
		LLSC_END("%1")
		: "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
	return old;
}

static inline void __sh_store_llsc(volatile int *p, int x)
{
	__asm__ __volatile__(
		"	synco\n"
		"	mov.l %1, @%0\n"
		"	synco\n"
		: : "r"(p), "r"(x) : "memory");
}

static inline void __sh_and_llsc(volatile int *x, int v)
{
	__asm__ __volatile__(
		LLSC_START("%0")
		"	and %1, r0\n"
		LLSC_END("%0")
		: : "r"(x), "r"(v) : LLSC_CLOBBERS);
}

static inline void __sh_or_llsc(volatile int *x, int v)
{
	__asm__ __volatile__(
		LLSC_START("%0")
		"	or %1, r0\n"
		LLSC_END("%0")
		: : "r"(x), "r"(v) : LLSC_CLOBBERS);
}

#ifdef __SH4A__
#define a_cas(p,t,s)     __sh_cas_llsc(p,t,s)
#define a_swap(x,v)      __sh_swap_llsc(x,v)
#define a_fetch_add(x,v) __sh_fetch_add_llsc(x, v)
#define a_store(x,v)     __sh_store_llsc(x, v)
#define a_and(x,v)       __sh_and_llsc(x, v)
#define a_or(x,v)        __sh_or_llsc(x, v)
#else

int  __sh_cas(volatile int *, int, int);
int  __sh_swap(volatile int *, int);
int  __sh_fetch_add(volatile int *, int);
void __sh_store(volatile int *, int);
void __sh_and(volatile int *, int);
void __sh_or(volatile int *, int);

#define a_cas(p,t,s)     __sh_cas(p,t,s)
#define a_swap(x,v)      __sh_swap(x,v)
#define a_fetch_add(x,v) __sh_fetch_add(x, v)
#define a_store(x,v)     __sh_store(x, v)
#define a_and(x,v)       __sh_and(x, v)
#define a_or(x,v)        __sh_or(x, v)
#endif

static inline void *a_cas_p(volatile void *p, void *t, void *s)
{
	return (void *)a_cas(p, (int)t, (int)s);
}

static inline void a_inc(volatile int *x)
{
	a_fetch_add(x, 1);
}

static inline void a_dec(volatile int *x)
{
	a_fetch_add(x, -1);
}

#define a_spin a_barrier

static inline void a_barrier()
{
	a_cas(&(int){0}, 0, 0);
}

static inline void a_crash()
{
	*(volatile char *)0=0;
}

static inline void a_or_l(volatile void *p, long v)
{
	a_or(p, v);
}

static inline void a_and_64(volatile uint64_t *p, uint64_t v)
{
	union { uint64_t v; uint32_t r[2]; } u = { v };
	a_and((int *)p,   u.r[0]);
	a_and((int *)p+1, u.r[1]);
}

static inline void a_or_64(volatile uint64_t *p, uint64_t v)
{
	union { uint64_t v; uint32_t r[2]; } u = { v };
	a_or((int *)p,   u.r[0]);
	a_or((int *)p+1, u.r[1]);
}

#endif