OSDN Git Service

runtime: If no sem_timedwait, use pthread_cond_timedwait.
[pf3gnuchains/gcc-fork.git] / libgo / runtime / lock_sema.c
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "runtime.h"
6
7 // This implementation depends on OS-specific implementations of
8 //
9 //      uintptr runtime.semacreate(void)
10 //              Create a semaphore, which will be assigned to m->waitsema.
11 //              The zero value is treated as absence of any semaphore,
12 //              so be sure to return a non-zero value.
13 //
14 //      int32 runtime.semasleep(int64 ns)
15 //              If ns < 0, acquire m->waitsema and return 0.
16 //              If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
17 //              Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
18 //
19 //      int32 runtime.semawakeup(M *mp)
20 //              Wake up mp, which is or will soon be sleeping on mp->waitsema.
21 //
22
23 enum
24 {
25         LOCKED = 1,
26
27         ACTIVE_SPIN = 4,
28         ACTIVE_SPIN_CNT = 30,
29         PASSIVE_SPIN = 1,
30 };
31
32 void
33 runtime_lock(Lock *l)
34 {
35         M *m;
36         uintptr v;
37         uint32 i, spin;
38
39         m = runtime_m();
40         if(m->locks++ < 0)
41                 runtime_throw("runtime_lock: lock count");
42
43         // Speculative grab for lock.
44         if(runtime_casp(&l->waitm, nil, (void*)LOCKED))
45                 return;
46
47         if(m->waitsema == 0)
48                 m->waitsema = runtime_semacreate();
49
50         // On uniprocessor's, no point spinning.
51         // On multiprocessors, spin for ACTIVE_SPIN attempts.
52         spin = 0;
53         if(runtime_ncpu > 1)
54                 spin = ACTIVE_SPIN;
55
56         for(i=0;; i++) {
57                 v = (uintptr)runtime_atomicloadp(&l->waitm);
58                 if((v&LOCKED) == 0) {
59 unlocked:
60                         if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED)))
61                                 return;
62                         i = 0;
63                 }
64                 if(i<spin)
65                         runtime_procyield(ACTIVE_SPIN_CNT);
66                 else if(i<spin+PASSIVE_SPIN)
67                         runtime_osyield();
68                 else {
69                         // Someone else has it.
70                         // l->waitm points to a linked list of M's waiting
71                         // for this lock, chained through m->nextwaitm.
72                         // Queue this M.
73                         for(;;) {
74                                 m->nextwaitm = (void*)(v&~LOCKED);
75                                 if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED)))
76                                         break;
77                                 v = (uintptr)runtime_atomicloadp(&l->waitm);
78                                 if((v&LOCKED) == 0)
79                                         goto unlocked;
80                         }
81                         if(v&LOCKED) {
82                                 // Queued.  Wait.
83                                 runtime_semasleep(-1);
84                                 i = 0;
85                         }
86                 }
87         }
88 }
89
90 void
91 runtime_unlock(Lock *l)
92 {
93         uintptr v;
94         M *mp;
95
96         if(--runtime_m()->locks < 0)
97                 runtime_throw("runtime_unlock: lock count");
98
99         for(;;) {
100                 v = (uintptr)runtime_atomicloadp(&l->waitm);
101                 if(v == LOCKED) {
102                         if(runtime_casp(&l->waitm, (void*)LOCKED, nil))
103                                 break;
104                 } else {
105                         // Other M's are waiting for the lock.
106                         // Dequeue an M.
107                         mp = (void*)(v&~LOCKED);
108                         if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) {
109                                 // Dequeued an M.  Wake it.
110                                 runtime_semawakeup(mp);
111                                 break;
112                         }
113                 }
114         }
115 }
116
117 // One-time notifications.
118 void
119 runtime_noteclear(Note *n)
120 {
121         n->waitm = nil;
122 }
123
124 void
125 runtime_notewakeup(Note *n)
126 {
127         M *mp;
128
129         do
130                 mp = runtime_atomicloadp(&n->waitm);
131         while(!runtime_casp(&n->waitm, mp, (void*)LOCKED));
132
133         // Successfully set waitm to LOCKED.
134         // What was it before?
135         if(mp == nil) {
136                 // Nothing was waiting.  Done.
137         } else if(mp == (M*)LOCKED) {
138                 // Two notewakeups!  Not allowed.
139                 runtime_throw("notewakeup - double wakeup");
140         } else {
141                 // Must be the waiting m.  Wake it up.
142                 runtime_semawakeup(mp);
143         }
144 }
145
146 void
147 runtime_notesleep(Note *n)
148 {
149         M *m;
150
151         m = runtime_m();
152         if(m->waitsema == 0)
153                 m->waitsema = runtime_semacreate();
154         if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup)
155                 if(n->waitm != (void*)LOCKED)
156                         runtime_throw("notesleep - waitm out of sync");
157                 return;
158         }
159         // Queued.  Sleep.
160         runtime_semasleep(-1);
161 }
162
163 void
164 runtime_notetsleep(Note *n, int64 ns)
165 {
166         M *m;
167         M *mp;
168         int64 deadline, now;
169
170         if(ns < 0) {
171                 runtime_notesleep(n);
172                 return;
173         }
174
175         m = runtime_m();
176         if(m->waitsema == 0)
177                 m->waitsema = runtime_semacreate();
178
179         // Register for wakeup on n->waitm.
180         if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup already)
181                 if(n->waitm != (void*)LOCKED)
182                         runtime_throw("notetsleep - waitm out of sync");
183                 return;
184         }
185
186         deadline = runtime_nanotime() + ns;
187         for(;;) {
188                 // Registered.  Sleep.
189                 if(runtime_semasleep(ns) >= 0) {
190                         // Acquired semaphore, semawakeup unregistered us.
191                         // Done.
192                         return;
193                 }
194
195                 // Interrupted or timed out.  Still registered.  Semaphore not acquired.
196                 now = runtime_nanotime();
197                 if(now >= deadline)
198                         break;
199
200                 // Deadline hasn't arrived.  Keep sleeping.
201                 ns = deadline - now;
202         }
203
204         // Deadline arrived.  Still registered.  Semaphore not acquired.
205         // Want to give up and return, but have to unregister first,
206         // so that any notewakeup racing with the return does not
207         // try to grant us the semaphore when we don't expect it.
208         for(;;) {
209                 mp = runtime_atomicloadp(&n->waitm);
210                 if(mp == m) {
211                         // No wakeup yet; unregister if possible.
212                         if(runtime_casp(&n->waitm, mp, nil))
213                                 return;
214                 } else if(mp == (M*)LOCKED) {
215                         // Wakeup happened so semaphore is available.
216                         // Grab it to avoid getting out of sync.
217                         if(runtime_semasleep(-1) < 0)
218                                 runtime_throw("runtime: unable to acquire - semaphore out of sync");
219                         return;
220                 } else {
221                         runtime_throw("runtime: unexpected waitm - semaphore out of sync");
222                 }
223         }
224 }