@@ -15,6 +15,11 @@ typedef struct {
15
15
#endif
16
16
} local_lock_t ;
17
17
18
+ typedef struct {
19
+ local_lock_t llock ;
20
+ unsigned int acquired ;
21
+ } localtry_lock_t ;
22
+
18
23
#ifdef CONFIG_DEBUG_LOCK_ALLOC
19
24
# define LOCAL_LOCK_DEBUG_INIT (lockname ) \
20
25
.dep_map = { \
@@ -31,6 +36,13 @@ static inline void local_lock_acquire(local_lock_t *l)
31
36
l -> owner = current ;
32
37
}
33
38
39
+ static inline void local_trylock_acquire (local_lock_t * l )
40
+ {
41
+ lock_map_acquire_try (& l -> dep_map );
42
+ DEBUG_LOCKS_WARN_ON (l -> owner );
43
+ l -> owner = current ;
44
+ }
45
+
34
46
static inline void local_lock_release (local_lock_t * l )
35
47
{
36
48
DEBUG_LOCKS_WARN_ON (l -> owner != current );
@@ -45,11 +57,13 @@ static inline void local_lock_debug_init(local_lock_t *l)
45
57
#else /* CONFIG_DEBUG_LOCK_ALLOC */
46
58
# define LOCAL_LOCK_DEBUG_INIT (lockname )
47
59
static inline void local_lock_acquire (local_lock_t * l ) { }
60
+ static inline void local_trylock_acquire (local_lock_t * l ) { }
48
61
static inline void local_lock_release (local_lock_t * l ) { }
49
62
static inline void local_lock_debug_init (local_lock_t * l ) { }
50
63
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
51
64
52
65
#define INIT_LOCAL_LOCK (lockname ) { LOCAL_LOCK_DEBUG_INIT(lockname) }
66
+ #define INIT_LOCALTRY_LOCK (lockname ) { .llock = { LOCAL_LOCK_DEBUG_INIT(lockname.llock) }}
53
67
54
68
#define __local_lock_init (lock ) \
55
69
do { \
@@ -118,15 +132,115 @@ do { \
118
132
#define __local_unlock_nested_bh (lock ) \
119
133
local_lock_release(this_cpu_ptr(lock))
120
134
135
+ /* localtry_lock_t variants */
136
+
137
+ #define __localtry_lock_init (lock ) \
138
+ do { \
139
+ __local_lock_init(&(lock)->llock); \
140
+ WRITE_ONCE((lock)->acquired, 0); \
141
+ } while (0)
142
+
143
+ #define __localtry_lock (lock ) \
144
+ do { \
145
+ localtry_lock_t *lt; \
146
+ preempt_disable(); \
147
+ lt = this_cpu_ptr(lock); \
148
+ local_lock_acquire(<->llock); \
149
+ WRITE_ONCE(lt->acquired, 1); \
150
+ } while (0)
151
+
152
+ #define __localtry_lock_irq (lock ) \
153
+ do { \
154
+ localtry_lock_t *lt; \
155
+ local_irq_disable(); \
156
+ lt = this_cpu_ptr(lock); \
157
+ local_lock_acquire(<->llock); \
158
+ WRITE_ONCE(lt->acquired, 1); \
159
+ } while (0)
160
+
161
+ #define __localtry_lock_irqsave (lock , flags ) \
162
+ do { \
163
+ localtry_lock_t *lt; \
164
+ local_irq_save(flags); \
165
+ lt = this_cpu_ptr(lock); \
166
+ local_lock_acquire(<->llock); \
167
+ WRITE_ONCE(lt->acquired, 1); \
168
+ } while (0)
169
+
170
+ #define __localtry_trylock (lock ) \
171
+ ({ \
172
+ localtry_lock_t *lt; \
173
+ bool _ret; \
174
+ \
175
+ preempt_disable(); \
176
+ lt = this_cpu_ptr(lock); \
177
+ if (!READ_ONCE(lt->acquired)) { \
178
+ WRITE_ONCE(lt->acquired, 1); \
179
+ local_trylock_acquire(<->llock); \
180
+ _ret = true; \
181
+ } else { \
182
+ _ret = false; \
183
+ preempt_enable(); \
184
+ } \
185
+ _ret; \
186
+ })
187
+
188
+ #define __localtry_trylock_irqsave (lock , flags ) \
189
+ ({ \
190
+ localtry_lock_t *lt; \
191
+ bool _ret; \
192
+ \
193
+ local_irq_save(flags); \
194
+ lt = this_cpu_ptr(lock); \
195
+ if (!READ_ONCE(lt->acquired)) { \
196
+ WRITE_ONCE(lt->acquired, 1); \
197
+ local_trylock_acquire(<->llock); \
198
+ _ret = true; \
199
+ } else { \
200
+ _ret = false; \
201
+ local_irq_restore(flags); \
202
+ } \
203
+ _ret; \
204
+ })
205
+
206
+ #define __localtry_unlock (lock ) \
207
+ do { \
208
+ localtry_lock_t *lt; \
209
+ lt = this_cpu_ptr(lock); \
210
+ WRITE_ONCE(lt->acquired, 0); \
211
+ local_lock_release(<->llock); \
212
+ preempt_enable(); \
213
+ } while (0)
214
+
215
+ #define __localtry_unlock_irq (lock ) \
216
+ do { \
217
+ localtry_lock_t *lt; \
218
+ lt = this_cpu_ptr(lock); \
219
+ WRITE_ONCE(lt->acquired, 0); \
220
+ local_lock_release(<->llock); \
221
+ local_irq_enable(); \
222
+ } while (0)
223
+
224
+ #define __localtry_unlock_irqrestore (lock , flags ) \
225
+ do { \
226
+ localtry_lock_t *lt; \
227
+ lt = this_cpu_ptr(lock); \
228
+ WRITE_ONCE(lt->acquired, 0); \
229
+ local_lock_release(<->llock); \
230
+ local_irq_restore(flags); \
231
+ } while (0)
232
+
121
233
#else /* !CONFIG_PREEMPT_RT */
122
234
123
235
/*
124
236
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
125
237
* critical section while staying preemptible.
126
238
*/
127
239
typedef spinlock_t local_lock_t ;
240
+ typedef spinlock_t localtry_lock_t ;
128
241
129
242
#define INIT_LOCAL_LOCK (lockname ) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
243
+ #define INIT_LOCALTRY_LOCK (lockname ) INIT_LOCAL_LOCK(lockname)
130
244
131
245
#define __local_lock_init (l ) \
132
246
do { \
@@ -169,4 +283,36 @@ do { \
169
283
spin_unlock(this_cpu_ptr((lock))); \
170
284
} while (0)
171
285
286
+ /* localtry_lock_t variants */
287
+
288
+ #define __localtry_lock_init (lock ) __local_lock_init(lock)
289
+ #define __localtry_lock (lock ) __local_lock(lock)
290
+ #define __localtry_lock_irq (lock ) __local_lock(lock)
291
+ #define __localtry_lock_irqsave (lock , flags ) __local_lock_irqsave(lock, flags)
292
+ #define __localtry_unlock (lock ) __local_unlock(lock)
293
+ #define __localtry_unlock_irq (lock ) __local_unlock(lock)
294
+ #define __localtry_unlock_irqrestore (lock , flags ) __local_unlock_irqrestore(lock, flags)
295
+
296
+ #define __localtry_trylock (lock ) \
297
+ ({ \
298
+ int __locked; \
299
+ \
300
+ if (in_nmi() | in_hardirq()) { \
301
+ __locked = 0; \
302
+ } else { \
303
+ migrate_disable(); \
304
+ __locked = spin_trylock(this_cpu_ptr((lock))); \
305
+ if (!__locked) \
306
+ migrate_enable(); \
307
+ } \
308
+ __locked; \
309
+ })
310
+
311
+ #define __localtry_trylock_irqsave (lock , flags ) \
312
+ ({ \
313
+ typecheck(unsigned long, flags); \
314
+ flags = 0; \
315
+ __localtry_trylock(lock); \
316
+ })
317
+
172
318
#endif /* CONFIG_PREEMPT_RT */
0 commit comments