18
18
19
19
#define SPINLOCK_IS_RECURSIVE (-1)
20
20
#define SPINLOCK_IS_NOT_RECURSIVE (-2)
21
-
22
- typedef struct {
23
- int64_t s ;
24
- int64_t count ;
25
- } qthread_spinlock_state_t ;
26
21
27
- typedef struct {
28
- qt_spin_trylock_t lock ;
29
- qthread_spinlock_state_t state ;
30
- } qthread_spinlock_t ;
31
22
32
23
#define QTHREAD_CHOOSE_STRIPE2 (addr ) (qt_hash64((uint64_t)(uintptr_t)addr) & (QTHREAD_LOCKING_STRIPES - 1))
33
24
#define LOCKBIN (key ) QTHREAD_CHOOSE_STRIPE2(key)
@@ -66,16 +57,31 @@ INTERNAL int lock_hashmap_remove(const aligned_t * key) {
66
57
return QTHREAD_OPFAIL ;
67
58
}
68
59
69
- INTERNAL bool qthread_is_spin_lock (const aligned_t * a ) {
60
+ INTERNAL bool is_spin_lock_hashed (const aligned_t * a ) {
70
61
return (NULL != lock_hashmap_get (a ));
71
62
}
72
63
73
- INTERNAL int qthread_spinlock_initialize () {
64
+ INTERNAL int spinlocks_initialize () {
74
65
qthread_spinlock_buckets = NULL ;
75
66
return QTHREAD_SUCCESS ;
76
67
}
77
68
78
- INTERNAL int qthread_spinlock_init (const aligned_t * a , const bool is_recursive ) {
69
+ INTERNAL int spinlocks_finalize () {
70
+ if (qthread_spinlock_buckets ){
71
+ for (unsigned i = 0 ; i < QTHREAD_LOCKING_STRIPES ; i ++ ) {
72
+ assert (qthread_spinlock_buckets [i ]);
73
+ qt_hash_destroy_deallocate (qthread_spinlock_buckets [i ],
74
+ (qt_hash_deallocator_fn )
75
+ qthread_spinlock_destroy_fn );
76
+ }
77
+ qt_free (qthread_spinlock_buckets );
78
+ }
79
+ return QTHREAD_SUCCESS ;
80
+ }
81
+
82
+ /* locks over addresses using internal hashmaps */
83
+
84
+ INTERNAL int spinlock_init_hashed (const aligned_t * a , const bool is_recursive ) {
79
85
uint_fast8_t need_sync = 1 ;
80
86
81
87
if (!qthread_spinlock_buckets ){
@@ -87,7 +93,7 @@ INTERNAL int qthread_spinlock_init(const aligned_t * a, const bool is_recursive)
87
93
}
88
94
}
89
95
90
- if (!qthread_is_spin_lock (a )) {
96
+ if (!is_spin_lock_hashed (a )) {
91
97
qthread_spinlock_t * l = qt_malloc (sizeof (qthread_spinlock_t ));
92
98
assert (l );
93
99
l -> state .s = is_recursive ? SPINLOCK_IS_RECURSIVE : SPINLOCK_IS_NOT_RECURSIVE ;
@@ -99,25 +105,98 @@ INTERNAL int qthread_spinlock_init(const aligned_t * a, const bool is_recursive)
99
105
return QTHREAD_OPFAIL ;
100
106
}
101
107
102
- INTERNAL int qthread_spinlock_destroy (const aligned_t * a ) {
108
+ INTERNAL int spinlock_destroy_hashed (const aligned_t * a ) {
103
109
return lock_hashmap_remove (a );
104
110
}
105
111
106
- INTERNAL int qthread_spinlock_finalize () {
107
- if (qthread_spinlock_buckets ){
108
- for (unsigned i = 0 ; i < QTHREAD_LOCKING_STRIPES ; i ++ ) {
109
- assert (qthread_spinlock_buckets [i ]);
110
- qt_hash_destroy_deallocate (qthread_spinlock_buckets [i ],
111
- (qt_hash_deallocator_fn )
112
- qthread_spinlock_destroy_fn );
112
+ INTERNAL int spinlock_lock_hashed (const aligned_t * a ) {
113
+ qthread_spinlock_t * l = lock_hashmap_get (a );
114
+ if (l != NULL ) {
115
+ if (l -> state .s >= SPINLOCK_IS_RECURSIVE ) {
116
+ if (l -> state .s == qthread_readstate (CURRENT_UNIQUE_WORKER )){ // Reentrant
117
+ ++ l -> state .count ;
118
+ MACHINE_FENCE ;
119
+ }else {
120
+ QTHREAD_TRYLOCK_LOCK (& l -> lock );
121
+ l -> state .s = qthread_readstate (CURRENT_UNIQUE_WORKER );
122
+ ++ l -> state .count ;
123
+ MACHINE_FENCE ;
124
+ }
125
+ } else {
126
+ QTHREAD_TRYLOCK_LOCK (& l -> lock );
113
127
}
114
- qt_free ( qthread_spinlock_buckets ) ;
128
+ return QTHREAD_SUCCESS ;
115
129
}
116
- return QTHREAD_SUCCESS ;
130
+ return QTHREAD_OPFAIL ;
117
131
}
118
132
119
- INTERNAL int qthread_spinlock_lock (const aligned_t * a ) {
133
+ INTERNAL int spinlock_trylock_hashed (const aligned_t * a ) {
120
134
qthread_spinlock_t * l = lock_hashmap_get (a );
135
+ if (l != NULL ) {
136
+ if (l -> state .s >= SPINLOCK_IS_RECURSIVE ) {
137
+ if (l -> state .s == qthread_readstate (CURRENT_UNIQUE_WORKER )){ // Reentrant
138
+ ++ l -> state .count ;
139
+ MACHINE_FENCE ;
140
+ }else {
141
+ if (QTHREAD_TRYLOCK_TRY (& l -> lock )) {
142
+ l -> state .s = qthread_readstate (CURRENT_UNIQUE_WORKER );
143
+ ++ l -> state .count ;
144
+ MACHINE_FENCE ;
145
+ } else {
146
+ return QTHREAD_OPFAIL ;
147
+ }
148
+ }
149
+ } else {
150
+ return QTHREAD_TRYLOCK_TRY (& l -> lock );
151
+
152
+ }
153
+ return QTHREAD_SUCCESS ;
154
+ }
155
+ return QTHREAD_OPFAIL ;
156
+ }
157
+
158
+ INTERNAL int spinlock_unlock_hashed (const aligned_t * a ) {
159
+ qthread_spinlock_t * l = lock_hashmap_get (a );
160
+ if (l != NULL ) {
161
+ if (l -> state .s >= SPINLOCK_IS_RECURSIVE ) {
162
+ if (l -> state .s == qthread_readstate (CURRENT_UNIQUE_WORKER )){
163
+ -- l -> state .count ;
164
+ if (!l -> state .count ) {
165
+ l -> state .s = SPINLOCK_IS_RECURSIVE ; // Reset
166
+ MACHINE_FENCE ;
167
+ QTHREAD_TRYLOCK_UNLOCK (& l -> lock );
168
+ }
169
+ }else {
170
+ if (l -> state .count )
171
+ return QTHREAD_OPFAIL ;
172
+ }
173
+ } else {
174
+ QTHREAD_TRYLOCK_UNLOCK (& l -> lock );
175
+ }
176
+ return QTHREAD_SUCCESS ;
177
+ }
178
+ return QTHREAD_OPFAIL ;
179
+ }
180
+
181
+ /* locks over lock types externally allocated */
182
+
183
+ INTERNAL int spinlock_init (qthread_spinlock_t * a , const bool is_recursive ) {
184
+ if (is_recursive ) {
185
+ const qthread_spinlock_t init_mutex = QTHREAD_RECURSIVE_MUTEX_INITIALIZER ;
186
+ memcpy (a , & init_mutex , sizeof (qthread_spinlock_t ));
187
+ } else {
188
+ const qthread_spinlock_t init_mutex = QTHREAD_MUTEX_INITIALIZER ;
189
+ memcpy (a , & init_mutex , sizeof (qthread_spinlock_t ));
190
+ }
191
+ return QTHREAD_SUCCESS ;
192
+ }
193
+
194
+ INTERNAL int spinlock_destroy (qthread_spinlock_t * a ) {
195
+ return QTHREAD_SUCCESS ;
196
+ }
197
+
198
+ INTERNAL int spinlock_lock (qthread_spinlock_t * a ) {
199
+ qthread_spinlock_t * l = a ;
121
200
if (l != NULL ) {
122
201
if (l -> state .s >= SPINLOCK_IS_RECURSIVE ) {
123
202
if (l -> state .s == qthread_readstate (CURRENT_UNIQUE_WORKER )){ // Reentrant
@@ -137,8 +216,8 @@ INTERNAL int qthread_spinlock_lock(const aligned_t * a) {
137
216
return QTHREAD_OPFAIL ;
138
217
}
139
218
140
- INTERNAL int qthread_spinlock_trylock ( const aligned_t * a ) {
141
- qthread_spinlock_t * l = lock_hashmap_get ( a ) ;
219
+ INTERNAL int spinlock_trylock ( qthread_spinlock_t * a ) {
220
+ qthread_spinlock_t * l = a ;
142
221
if (l != NULL ) {
143
222
if (l -> state .s >= SPINLOCK_IS_RECURSIVE ) {
144
223
if (l -> state .s == qthread_readstate (CURRENT_UNIQUE_WORKER )){ // Reentrant
@@ -162,8 +241,8 @@ INTERNAL int qthread_spinlock_trylock(const aligned_t * a) {
162
241
return QTHREAD_OPFAIL ;
163
242
}
164
243
165
- INTERNAL int qthread_spinlock_unlock ( const aligned_t * a ) {
166
- qthread_spinlock_t * l = lock_hashmap_get ( a ) ;
244
+ INTERNAL int spinlock_unlock ( qthread_spinlock_t * a ) {
245
+ qthread_spinlock_t * l = a ;
167
246
if (l != NULL ) {
168
247
if (l -> state .s >= SPINLOCK_IS_RECURSIVE ) {
169
248
if (l -> state .s == qthread_readstate (CURRENT_UNIQUE_WORKER )){
@@ -189,39 +268,64 @@ INTERNAL int qthread_spinlock_unlock(const aligned_t * a) {
189
268
190
269
int API_FUNC qthread_lock_init (const aligned_t * a , const bool is_recursive )
191
270
{ /*{{{ */
192
- return qthread_spinlock_init (a , is_recursive );
271
+ return spinlock_init_hashed (a , is_recursive );
193
272
} /*}}} */
194
273
195
274
int API_FUNC qthread_lock_destroy (aligned_t * a )
196
275
{ /*{{{ */
197
- if (!qthread_is_spin_lock (a )) {
276
+ if (!is_spin_lock_hashed (a )) {
198
277
return QTHREAD_SUCCESS ;
199
278
}
200
- return qthread_spinlock_destroy (a );
279
+ return spinlock_destroy_hashed (a );
201
280
} /*}}} */
202
281
203
282
int API_FUNC qthread_lock (const aligned_t * a )
204
283
{ /*{{{ */
205
- if (!qthread_is_spin_lock (a )) {
284
+ if (!is_spin_lock_hashed (a )) {
206
285
return qthread_readFE (NULL , a );
207
286
}
208
- return qthread_spinlock_lock (a );
287
+ return spinlock_lock_hashed (a );
209
288
} /*}}} */
210
289
211
290
int API_FUNC qthread_trylock (const aligned_t * a )
212
291
{ /*{{{ */
213
- if (!qthread_is_spin_lock (a )){
292
+ if (!is_spin_lock_hashed (a )){
214
293
return qthread_readFE_nb (NULL , a );
215
294
}
216
- return qthread_spinlock_trylock (a );
295
+ return spinlock_trylock_hashed (a );
217
296
} /*}}} */
218
297
219
298
int API_FUNC qthread_unlock (const aligned_t * a )
220
299
{ /*{{{ */
221
- if (!qthread_is_spin_lock (a )) {
300
+ if (!is_spin_lock_hashed (a )) {
222
301
return qthread_fill (a );
223
302
}
224
- return qthread_spinlock_unlock (a );
303
+ return spinlock_unlock_hashed (a );
304
+ } /*}}} */
305
+
306
+ int API_FUNC qthread_spinlock_init (qthread_spinlock_t * a , const bool is_recursive )
307
+ { /*{{{ */
308
+ return spinlock_init (a , is_recursive );
309
+ } /*}}} */
310
+
311
+ int API_FUNC qthread_spinlock_destroy (qthread_spinlock_t * a )
312
+ { /*{{{ */
313
+ return spinlock_destroy (a );
314
+ } /*}}} */
315
+
316
+ int API_FUNC qthread_spinlock_lock (qthread_spinlock_t * a )
317
+ { /*{{{ */
318
+ return spinlock_lock (a );
319
+ } /*}}} */
320
+
321
+ int API_FUNC qthread_spinlock_trylock (qthread_spinlock_t * a )
322
+ { /*{{{ */
323
+ return spinlock_trylock (a );
324
+ } /*}}} */
325
+
326
+ int API_FUNC qthread_spinlock_unlock (qthread_spinlock_t * a )
327
+ { /*{{{ */
328
+ return spinlock_unlock (a );
225
329
} /*}}} */
226
330
227
331
#undef qt_hash_t
0 commit comments