Check-in Lasker-2.2.3 tar ball from samba.org
[capablanca.git] / lasker-2.2.3 / src / tdb / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3    Samba database functions
4    Copyright (C) Anton Blanchard                   2001
5    
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 2 of the License, or
9    (at your option) any later version.
10    
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15    
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #if HAVE_CONFIG_H
21 #include <config.h>
22 #endif
23
24 #if STANDALONE
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <unistd.h>
28 #include <string.h>
29 #include <fcntl.h>
30 #include <errno.h>
31 #include <sys/stat.h>
32 #include <time.h>
33 #include "tdb.h"
34 #include "spinlock.h"
35
36 #define DEBUG
37 #else
38 #include "includes.h"
39 #endif
40
41 #ifdef USE_SPINLOCKS
42
43 /*
44  * ARCH SPECIFIC
45  */
46
47 #if defined(SPARC_SPINLOCKS)
48
49 static inline int __spin_trylock(spinlock_t *lock)
50 {
51         unsigned int result;
52
53         asm volatile("ldstub    [%1], %0"
54                 : "=r" (result)
55                 : "r" (lock)
56                 : "memory");
57
58         return (result == 0) ? 0 : EBUSY;
59 }
60
61 static inline void __spin_unlock(spinlock_t *lock)
62 {
63         asm volatile("":::"memory");
64         *lock = 0;
65 }
66
67 static inline void __spin_lock_init(spinlock_t *lock)
68 {
69         *lock = 0;
70 }
71
72 static inline int __spin_is_locked(spinlock_t *lock)
73 {
74         return (*lock != 0);
75 }
76
77 #elif defined(POWERPC_SPINLOCKS) 
78
79 static inline int __spin_trylock(spinlock_t *lock)
80 {
81         unsigned int result;
82
83         __asm__ __volatile__(
84 "1:     lwarx           %0,0,%1\n\
85         cmpwi           0,%0,0\n\
86         li              %0,0\n\
87         bne-            2f\n\
88         li              %0,1\n\
89         stwcx.          %0,0,%1\n\
90         bne-            1b\n\
91         isync\n\
92 2:"     : "=&r"(result)
93         : "r"(lock)
94         : "cr0", "memory");
95
96         return (result == 1) ? 0 : EBUSY;
97 }
98
99 static inline void __spin_unlock(spinlock_t *lock)
100 {
101         asm volatile("eieio":::"memory");
102         *lock = 0;
103 }
104
105 static inline void __spin_lock_init(spinlock_t *lock)
106 {
107         *lock = 0;
108 }
109
110 static inline int __spin_is_locked(spinlock_t *lock)
111 {
112         return (*lock != 0);
113 }
114
115 #elif defined(INTEL_SPINLOCKS) 
116
117 static inline int __spin_trylock(spinlock_t *lock)
118 {
119         int oldval;
120
121         asm volatile("xchgl %0,%1"
122                 : "=r" (oldval), "=m" (*lock)
123                 : "0" (0)
124                 : "memory");
125
126         return oldval > 0 ? 0 : EBUSY;
127 }
128
129 static inline void __spin_unlock(spinlock_t *lock)
130 {
131         asm volatile("":::"memory");
132         *lock = 1;
133 }
134
135 static inline void __spin_lock_init(spinlock_t *lock)
136 {
137         *lock = 1;
138 }
139
140 static inline int __spin_is_locked(spinlock_t *lock)
141 {
142         return (*lock != 1);
143 }
144
145 #elif defined(MIPS_SPINLOCKS) 
146
147 static inline unsigned int load_linked(unsigned long addr)
148 {
149         unsigned int res;
150
151         __asm__ __volatile__("ll\t%0,(%1)"
152                 : "=r" (res)
153                 : "r" (addr));
154
155         return res;
156 }
157
158 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
159 {
160         unsigned int res;
161
162         __asm__ __volatile__("sc\t%0,(%2)"
163                 : "=r" (res)
164                 : "0" (value), "r" (addr));
165         return res;
166 }
167
168 static inline int __spin_trylock(spinlock_t *lock)
169 {
170         unsigned int mw;
171
172         do {
173                 mw = load_linked(lock);
174                 if (mw) 
175                         return EBUSY;
176         } while (!store_conditional(lock, 1));
177
178         asm volatile("":::"memory");
179
180         return 0;
181 }
182
183 static inline void __spin_unlock(spinlock_t *lock)
184 {
185         asm volatile("":::"memory");
186         *lock = 0;
187 }
188
189 static inline void __spin_lock_init(spinlock_t *lock)
190 {
191         *lock = 0;
192 }
193
194 static inline int __spin_is_locked(spinlock_t *lock)
195 {
196         return (*lock != 0);
197 }
198
199 #else
200 #error Need to implement spinlock code in spinlock.c
201 #endif
202
203 /*
204  * OS SPECIFIC
205  */
206
207 static void yield_cpu(void)
208 {
209         struct timespec tm;
210
211 #ifdef USE_SCHED_YIELD
212         sched_yield();
213 #else
214         /* Linux will busy loop for delays < 2ms on real time tasks */
215         tm.tv_sec = 0;
216         tm.tv_nsec = 2000000L + 1;
217         nanosleep(&tm, NULL);
218 #endif
219 }
220
221 static int this_is_smp(void)
222 {
223         return 0;
224 }
225
226 /*
227  * GENERIC
228  */
229
230 static int smp_machine = 0;
231
232 static inline void __spin_lock(spinlock_t *lock)
233 {
234         int ntries = 0;
235
236         while(__spin_trylock(lock)) {
237                 while(__spin_is_locked(lock)) {
238                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
239                                 continue;
240                         yield_cpu();
241                 }
242         }
243 }
244
245 static void __read_lock(tdb_rwlock_t *rwlock)
246 {
247         int ntries = 0;
248
249         while(1) {
250                 __spin_lock(&rwlock->lock);
251
252                 if (!(rwlock->count & RWLOCK_BIAS)) {
253                         rwlock->count++;
254                         __spin_unlock(&rwlock->lock);
255                         return;
256                 }
257         
258                 __spin_unlock(&rwlock->lock);
259
260                 while(rwlock->count & RWLOCK_BIAS) {
261                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
262                                 continue;
263                         yield_cpu();
264                 }
265         }
266 }
267
268 static void __write_lock(tdb_rwlock_t *rwlock)
269 {
270         int ntries = 0;
271
272         while(1) {
273                 __spin_lock(&rwlock->lock);
274
275                 if (rwlock->count == 0) {
276                         rwlock->count |= RWLOCK_BIAS;
277                         __spin_unlock(&rwlock->lock);
278                         return;
279                 }
280
281                 __spin_unlock(&rwlock->lock);
282
283                 while(rwlock->count != 0) {
284                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
285                                 continue;
286                         yield_cpu();
287                 }
288         }
289 }
290
291 static void __write_unlock(tdb_rwlock_t *rwlock)
292 {
293         __spin_lock(&rwlock->lock);
294
295 #ifdef DEBUG
296         if (!(rwlock->count & RWLOCK_BIAS))
297                 fprintf(stderr, "bug: write_unlock\n");
298 #endif
299
300         rwlock->count &= ~RWLOCK_BIAS;
301         __spin_unlock(&rwlock->lock);
302 }
303
304 static void __read_unlock(tdb_rwlock_t *rwlock)
305 {
306         __spin_lock(&rwlock->lock);
307
308 #ifdef DEBUG
309         if (!rwlock->count)
310                 fprintf(stderr, "bug: read_unlock\n");
311
312         if (rwlock->count & RWLOCK_BIAS)
313                 fprintf(stderr, "bug: read_unlock\n");
314 #endif
315
316         rwlock->count--;
317         __spin_unlock(&rwlock->lock);
318 }
319
320 /* TDB SPECIFIC */
321
322 /* lock a list in the database. list -1 is the alloc list */
323 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
324 {
325         tdb_rwlock_t *rwlocks;
326
327         if (!tdb->map_ptr) return -1;
328         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
329
330         switch(rw_type) {
331         case F_RDLCK:
332                 __read_lock(&rwlocks[list+1]);
333                 break;
334
335         case F_WRLCK:
336                 __write_lock(&rwlocks[list+1]);
337                 break;
338
339         default:
340                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
341         }
342         return 0;
343 }
344
345 /* unlock the database. */
346 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
347 {
348         tdb_rwlock_t *rwlocks;
349
350         if (!tdb->map_ptr) return -1;
351         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
352
353         switch(rw_type) {
354         case F_RDLCK:
355                 __read_unlock(&rwlocks[list+1]);
356                 break;
357
358         case F_WRLCK:
359                 __write_unlock(&rwlocks[list+1]);
360                 break;
361
362         default:
363                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
364         }
365
366         return 0;
367 }
368
369 int tdb_create_rwlocks(int fd, unsigned int hash_size)
370 {
371         unsigned size, i;
372         tdb_rwlock_t *rwlocks;
373
374         size = (hash_size + 1) * sizeof(tdb_rwlock_t);
375         rwlocks = malloc(size);
376         if (!rwlocks)
377                 return -1;
378
379         for(i = 0; i < hash_size+1; i++) {
380                 __spin_lock_init(&rwlocks[i].lock);
381                 rwlocks[i].count = 0;
382         }
383
384         /* Write it out (appending to end) */
385         if (write(fd, rwlocks, size) != size) {
386                 free(rwlocks);
387                 return -1;
388         }
389         smp_machine = this_is_smp();
390         free(rwlocks);
391         return 0;
392 }
393
394 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
395 {
396         tdb_rwlock_t *rwlocks;
397         unsigned i;
398
399         if (tdb->header.rwlocks == 0) return 0;
400         if (!tdb->map_ptr) return -1;
401
402         /* We're mmapped here */
403         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
404         for(i = 0; i < tdb->header.hash_size+1; i++) {
405                 __spin_lock_init(&rwlocks[i].lock);
406                 rwlocks[i].count = 0;
407         }
408         return 0;
409 }
410 #else
411 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
412 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
413 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
414
415 /* Non-spinlock version: remove spinlock pointer */
416 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
417 {
418         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
419                                 - (char *)&tdb->header);
420
421         tdb->header.rwlocks = 0;
422         if (lseek(tdb->fd, off, SEEK_SET) != off
423             || write(tdb->fd, (void *)&tdb->header.rwlocks,
424                      sizeof(tdb->header.rwlocks)) 
425             != sizeof(tdb->header.rwlocks))
426                 return -1;
427         return 0;
428 }
429 #endif