Root/
1 | /* |
2 | * Generic implementation of 64-bit atomics using spinlocks, |
3 | * useful on processors that don't have 64-bit atomic instructions. |
4 | * |
5 | * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. |
11 | */ |
12 | #include <linux/types.h> |
13 | #include <linux/cache.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/init.h> |
16 | #include <linux/export.h> |
17 | #include <linux/atomic.h> |
18 | |
19 | /* |
20 | * We use a hashed array of spinlocks to provide exclusive access |
21 | * to each atomic64_t variable. Since this is expected to used on |
22 | * systems with small numbers of CPUs (<= 4 or so), we use a |
23 | * relatively small array of 16 spinlocks to avoid wasting too much |
24 | * memory on the spinlock array. |
25 | */ |
26 | #define NR_LOCKS 16 |
27 | |
28 | /* |
29 | * Ensure each lock is in a separate cacheline. |
30 | */ |
31 | static union { |
32 | raw_spinlock_t lock; |
33 | char pad[L1_CACHE_BYTES]; |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { |
35 | [0 ... (NR_LOCKS - 1)] = { |
36 | .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), |
37 | }, |
38 | }; |
39 | |
40 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
41 | { |
42 | unsigned long addr = (unsigned long) v; |
43 | |
44 | addr >>= L1_CACHE_SHIFT; |
45 | addr ^= (addr >> 8) ^ (addr >> 16); |
46 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; |
47 | } |
48 | |
49 | long long atomic64_read(const atomic64_t *v) |
50 | { |
51 | unsigned long flags; |
52 | raw_spinlock_t *lock = lock_addr(v); |
53 | long long val; |
54 | |
55 | raw_spin_lock_irqsave(lock, flags); |
56 | val = v->counter; |
57 | raw_spin_unlock_irqrestore(lock, flags); |
58 | return val; |
59 | } |
60 | EXPORT_SYMBOL(atomic64_read); |
61 | |
62 | void atomic64_set(atomic64_t *v, long long i) |
63 | { |
64 | unsigned long flags; |
65 | raw_spinlock_t *lock = lock_addr(v); |
66 | |
67 | raw_spin_lock_irqsave(lock, flags); |
68 | v->counter = i; |
69 | raw_spin_unlock_irqrestore(lock, flags); |
70 | } |
71 | EXPORT_SYMBOL(atomic64_set); |
72 | |
73 | void atomic64_add(long long a, atomic64_t *v) |
74 | { |
75 | unsigned long flags; |
76 | raw_spinlock_t *lock = lock_addr(v); |
77 | |
78 | raw_spin_lock_irqsave(lock, flags); |
79 | v->counter += a; |
80 | raw_spin_unlock_irqrestore(lock, flags); |
81 | } |
82 | EXPORT_SYMBOL(atomic64_add); |
83 | |
84 | long long atomic64_add_return(long long a, atomic64_t *v) |
85 | { |
86 | unsigned long flags; |
87 | raw_spinlock_t *lock = lock_addr(v); |
88 | long long val; |
89 | |
90 | raw_spin_lock_irqsave(lock, flags); |
91 | val = v->counter += a; |
92 | raw_spin_unlock_irqrestore(lock, flags); |
93 | return val; |
94 | } |
95 | EXPORT_SYMBOL(atomic64_add_return); |
96 | |
97 | void atomic64_sub(long long a, atomic64_t *v) |
98 | { |
99 | unsigned long flags; |
100 | raw_spinlock_t *lock = lock_addr(v); |
101 | |
102 | raw_spin_lock_irqsave(lock, flags); |
103 | v->counter -= a; |
104 | raw_spin_unlock_irqrestore(lock, flags); |
105 | } |
106 | EXPORT_SYMBOL(atomic64_sub); |
107 | |
108 | long long atomic64_sub_return(long long a, atomic64_t *v) |
109 | { |
110 | unsigned long flags; |
111 | raw_spinlock_t *lock = lock_addr(v); |
112 | long long val; |
113 | |
114 | raw_spin_lock_irqsave(lock, flags); |
115 | val = v->counter -= a; |
116 | raw_spin_unlock_irqrestore(lock, flags); |
117 | return val; |
118 | } |
119 | EXPORT_SYMBOL(atomic64_sub_return); |
120 | |
121 | long long atomic64_dec_if_positive(atomic64_t *v) |
122 | { |
123 | unsigned long flags; |
124 | raw_spinlock_t *lock = lock_addr(v); |
125 | long long val; |
126 | |
127 | raw_spin_lock_irqsave(lock, flags); |
128 | val = v->counter - 1; |
129 | if (val >= 0) |
130 | v->counter = val; |
131 | raw_spin_unlock_irqrestore(lock, flags); |
132 | return val; |
133 | } |
134 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
135 | |
136 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) |
137 | { |
138 | unsigned long flags; |
139 | raw_spinlock_t *lock = lock_addr(v); |
140 | long long val; |
141 | |
142 | raw_spin_lock_irqsave(lock, flags); |
143 | val = v->counter; |
144 | if (val == o) |
145 | v->counter = n; |
146 | raw_spin_unlock_irqrestore(lock, flags); |
147 | return val; |
148 | } |
149 | EXPORT_SYMBOL(atomic64_cmpxchg); |
150 | |
151 | long long atomic64_xchg(atomic64_t *v, long long new) |
152 | { |
153 | unsigned long flags; |
154 | raw_spinlock_t *lock = lock_addr(v); |
155 | long long val; |
156 | |
157 | raw_spin_lock_irqsave(lock, flags); |
158 | val = v->counter; |
159 | v->counter = new; |
160 | raw_spin_unlock_irqrestore(lock, flags); |
161 | return val; |
162 | } |
163 | EXPORT_SYMBOL(atomic64_xchg); |
164 | |
165 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
166 | { |
167 | unsigned long flags; |
168 | raw_spinlock_t *lock = lock_addr(v); |
169 | int ret = 0; |
170 | |
171 | raw_spin_lock_irqsave(lock, flags); |
172 | if (v->counter != u) { |
173 | v->counter += a; |
174 | ret = 1; |
175 | } |
176 | raw_spin_unlock_irqrestore(lock, flags); |
177 | return ret; |
178 | } |
179 | EXPORT_SYMBOL(atomic64_add_unless); |
180 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9