OSDN Git Service

2fa837d8b0e4ce0696f35e3ac39153c2ba7e882d
[pf3gnuchains/gcc-fork.git] / libgo / runtime / lock_sema.c
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 #include "runtime.h"
6
7 // This implementation depends on OS-specific implementations of
8 //
9 //      uintptr runtime.semacreate(void)
10 //              Create a semaphore, which will be assigned to m->waitsema.
11 //              The zero value is treated as absence of any semaphore,
12 //              so be sure to return a non-zero value.
13 //
14 //      int32 runtime.semasleep(int64 ns)
15 //              If ns < 0, acquire m->waitsema and return 0.
16 //              If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
17 //              Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
18 //
19 //      int32 runtime.semawakeup(M *mp)
20 //              Wake up mp, which is or will soon be sleeping on mp->waitsema.
21 //
22
23 enum
24 {
25         LOCKED = 1,
26
27         ACTIVE_SPIN = 4,
28         ACTIVE_SPIN_CNT = 30,
29         PASSIVE_SPIN = 1,
30 };
31
32 void
33 runtime_lock(Lock *l)
34 {
35         uintptr v;
36         uint32 i, spin;
37
38         if(m->locks++ < 0)
39                 runtime_throw("runtime_lock: lock count");
40
41         // Speculative grab for lock.
42         if(runtime_casp(&l->waitm, nil, (void*)LOCKED))
43                 return;
44
45         if(m->waitsema == 0)
46                 m->waitsema = runtime_semacreate();
47
48         // On uniprocessor's, no point spinning.
49         // On multiprocessors, spin for ACTIVE_SPIN attempts.
50         spin = 0;
51         if(runtime_ncpu > 1)
52                 spin = ACTIVE_SPIN;
53
54         for(i=0;; i++) {
55                 v = (uintptr)runtime_atomicloadp(&l->waitm);
56                 if((v&LOCKED) == 0) {
57 unlocked:
58                         if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED)))
59                                 return;
60                         i = 0;
61                 }
62                 if(i<spin)
63                         runtime_procyield(ACTIVE_SPIN_CNT);
64                 else if(i<spin+PASSIVE_SPIN)
65                         runtime_osyield();
66                 else {
67                         // Someone else has it.
68                         // l->waitm points to a linked list of M's waiting
69                         // for this lock, chained through m->nextwaitm.
70                         // Queue this M.
71                         for(;;) {
72                                 m->nextwaitm = (void*)(v&~LOCKED);
73                                 if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED)))
74                                         break;
75                                 v = (uintptr)runtime_atomicloadp(&l->waitm);
76                                 if((v&LOCKED) == 0)
77                                         goto unlocked;
78                         }
79                         if(v&LOCKED) {
80                                 // Queued.  Wait.
81                                 runtime_semasleep(-1);
82                                 i = 0;
83                         }
84                 }
85         }
86 }
87
88 void
89 runtime_unlock(Lock *l)
90 {
91         uintptr v;
92         M *mp;
93
94         if(--m->locks < 0)
95                 runtime_throw("runtime_unlock: lock count");
96
97         for(;;) {
98                 v = (uintptr)runtime_atomicloadp(&l->waitm);
99                 if(v == LOCKED) {
100                         if(runtime_casp(&l->waitm, (void*)LOCKED, nil))
101                                 break;
102                 } else {
103                         // Other M's are waiting for the lock.
104                         // Dequeue an M.
105                         mp = (void*)(v&~LOCKED);
106                         if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) {
107                                 // Dequeued an M.  Wake it.
108                                 runtime_semawakeup(mp);
109                                 break;
110                         }
111                 }
112         }
113 }
114
115 // One-time notifications.
116 void
117 runtime_noteclear(Note *n)
118 {
119         n->waitm = nil;
120 }
121
122 void
123 runtime_notewakeup(Note *n)
124 {
125         M *mp;
126
127         do
128                 mp = runtime_atomicloadp(&n->waitm);
129         while(!runtime_casp(&n->waitm, mp, (void*)LOCKED));
130
131         // Successfully set waitm to LOCKED.
132         // What was it before?
133         if(mp == nil) {
134                 // Nothing was waiting.  Done.
135         } else if(mp == (M*)LOCKED) {
136                 // Two notewakeups!  Not allowed.
137                 runtime_throw("notewakeup - double wakeup");
138         } else {
139                 // Must be the waiting m.  Wake it up.
140                 runtime_semawakeup(mp);
141         }
142 }
143
144 void
145 runtime_notesleep(Note *n)
146 {
147         if(m->waitsema == 0)
148                 m->waitsema = runtime_semacreate();
149         if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup)
150                 if(n->waitm != (void*)LOCKED)
151                         runtime_throw("notesleep - waitm out of sync");
152                 return;
153         }
154         // Queued.  Sleep.
155         runtime_semasleep(-1);
156 }
157
158 void
159 runtime_notetsleep(Note *n, int64 ns)
160 {
161         M *mp;
162         int64 deadline, now;
163
164         if(ns < 0) {
165                 runtime_notesleep(n);
166                 return;
167         }
168
169         if(m->waitsema == 0)
170                 m->waitsema = runtime_semacreate();
171
172         // Register for wakeup on n->waitm.
173         if(!runtime_casp(&n->waitm, nil, m)) {  // must be LOCKED (got wakeup already)
174                 if(n->waitm != (void*)LOCKED)
175                         runtime_throw("notetsleep - waitm out of sync");
176                 return;
177         }
178
179         deadline = runtime_nanotime() + ns;
180         for(;;) {
181                 // Registered.  Sleep.
182                 if(runtime_semasleep(ns) >= 0) {
183                         // Acquired semaphore, semawakeup unregistered us.
184                         // Done.
185                         return;
186                 }
187
188                 // Interrupted or timed out.  Still registered.  Semaphore not acquired.
189                 now = runtime_nanotime();
190                 if(now >= deadline)
191                         break;
192
193                 // Deadline hasn't arrived.  Keep sleeping.
194                 ns = deadline - now;
195         }
196
197         // Deadline arrived.  Still registered.  Semaphore not acquired.
198         // Want to give up and return, but have to unregister first,
199         // so that any notewakeup racing with the return does not
200         // try to grant us the semaphore when we don't expect it.
201         for(;;) {
202                 mp = runtime_atomicloadp(&n->waitm);
203                 if(mp == m) {
204                         // No wakeup yet; unregister if possible.
205                         if(runtime_casp(&n->waitm, mp, nil))
206                                 return;
207                 } else if(mp == (M*)LOCKED) {
208                         // Wakeup happened so semaphore is available.
209                         // Grab it to avoid getting out of sync.
210                         if(runtime_semasleep(-1) < 0)
211                                 runtime_throw("runtime: unable to acquire - semaphore out of sync");
212                         return;
213                 } else {
214                         runtime_throw("runtime: unexpected waitm - semaphore out of sync");
215                 }
216         }
217 }