varnish-cache/bin/varnishd/cache/cache_wrk.c
1
/*-
2
 * Copyright (c) 2006 Verdens Gang AS
3
 * Copyright (c) 2006-2011 Varnish Software AS
4
 * All rights reserved.
5
 *
6
 * Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Worker thread stuff unrelated to the worker thread pools.
30
 *
31
 * --
32
 * signaling_note:
33
 *
34
 * note on worker wakeup signaling through the wrk condition variable (cv)
35
 *
36
 * In the general case, a cv needs to be signaled while holding the
37
 * corresponding mutex, otherwise the signal may be posted before the waiting
38
 * thread could register itself on the cv and, consequently, the signal may be
39
 * missed.
40
 *
41
 * In our case, any worker thread which we wake up comes from the idle queue,
42
 * where it put itself under the mutex, releasing that mutex implicitly via
43
 * Lck_CondWait() (which calls some variant of pthread_cond_wait). So we avoid
44
 * additional mutex contention knowing that any worker thread on the idle queue
45
 * is blocking on the cv.
46
 *
47
 * Except -- when it isn't, because it woke up for releasing its VCL
48
 * Reference. To account for this case, we check if the task function has been
49
 * set in the meantime, which in turn requires all of the task preparation to be
50
 * done holding the pool mutex. (see also #2719)
51
 */
52
53
#include "config.h"
54
55
#include <errno.h>
56
#include <stdlib.h>
57
58
#include "cache_varnishd.h"
59
#include "cache_pool.h"
60
61
#include "vtim.h"
62
63
#include "hash/hash_slinger.h"
64
65
static void Pool_Work_Thread(struct pool *pp, struct worker *wrk);
66
67
/*--------------------------------------------------------------------
68
 * Create and start a back-ground thread which as its own worker and
69
 * session data structures;
70
 */
71
72
struct bgthread {
73
        unsigned        magic;
74
#define BGTHREAD_MAGIC  0x23b5152b
75
        const char      *name;
76
        bgthread_t      *func;
77
        void            *priv;
78
};
79
80
static void *
81 11152
wrk_bgthread(void *arg)
82
{
83
        struct bgthread *bt;
84
        struct worker wrk;
85
        struct VSC_main_wrk ds;
86
87 11152
        CAST_OBJ_NOTNULL(bt, arg, BGTHREAD_MAGIC);
88 11152
        THR_SetName(bt->name);
89 11152
        THR_Init();
90 11152
        INIT_OBJ(&wrk, WORKER_MAGIC);
91 11152
        memset(&ds, 0, sizeof ds);
92 11152
        wrk.stats = &ds;
93
94 11152
        (void)bt->func(&wrk, bt->priv);
95
96 0
        WRONG("BgThread terminated");
97
98
        NEEDLESS(return NULL);
99
}
100
101
void
102 11152
WRK_BgThread(pthread_t *thr, const char *name, bgthread_t *func, void *priv)
103
{
104
        struct bgthread *bt;
105
106 11152
        ALLOC_OBJ(bt, BGTHREAD_MAGIC);
107 11152
        AN(bt);
108
109 11152
        bt->name = name;
110 11152
        bt->func = func;
111 11152
        bt->priv = priv;
112 11152
        AZ(pthread_create(thr, NULL, wrk_bgthread, bt));
113 11152
}
114
115
/*--------------------------------------------------------------------*/
116
117
static void
118 55271
WRK_Thread(struct pool *qp, size_t stacksize, unsigned thread_workspace)
119 55271
{
120
        // child_signal_handler stack overflow check uses struct worker addr
121
        struct worker *w, ww;
122
        struct VSC_main_wrk ds;
123 55271
        unsigned char ws[thread_workspace];
124
125 55271
        AN(qp);
126 55271
        AN(stacksize);
127 55271
        AN(thread_workspace);
128
129 55271
        THR_SetName("cache-worker");
130 55296
        w = &ww;
131 55296
        INIT_OBJ(w, WORKER_MAGIC);
132 55296
        w->lastused = NAN;
133 55296
        memset(&ds, 0, sizeof ds);
134 55296
        w->stats = &ds;
135 55296
        AZ(pthread_cond_init(&w->cond, NULL));
136
137 55276
        WS_Init(w->aws, "wrk", ws, thread_workspace);
138
139 55240
        VSL(SLT_WorkThread, 0, "%p start", w);
140
141 55252
        Pool_Work_Thread(qp, w);
142 92
        AZ(w->pool);
143
144 92
        VSL(SLT_WorkThread, 0, "%p end", w);
145 92
        if (w->vcl != NULL)
146 4
                VCL_Rel(&w->vcl);
147 92
        AZ(pthread_cond_destroy(&w->cond));
148 92
        HSH_Cleanup(w);
149 92
        Pool_Sumstat(w);
150 92
}
151
152
/*--------------------------------------------------------------------
153
 * Summing of stats into pool counters
154
 */
155
156
static void
157 13899
pool_addstat(struct VSC_main_wrk *dst, struct VSC_main_wrk *src)
158
{
159
160 13899
        dst->summs++;
161 13899
        VSC_main_Summ_wrk_wrk(dst, src);
162 13899
        memset(src, 0, sizeof *src);
163 13899
}
164
165
static inline int
166 95957
pool_reserve(void)
167
{
168
        unsigned lim;
169
170 95957
        if (cache_param->wthread_reserve == 0)
171 95505
                return (cache_param->wthread_min / 20 + 1);
172 452
        lim = cache_param->wthread_min * 950 / 1000;
173 452
        if (cache_param->wthread_reserve > lim)
174 0
                return (lim);
175 452
        return (cache_param->wthread_reserve);
176
}
177
178
/*--------------------------------------------------------------------*/
179
180
static struct worker *
181 19427
pool_getidleworker(struct pool *pp, enum task_prio prio)
182
{
183 19427
        struct pool_task *pt = NULL;
184
        struct worker *wrk;
185
186 19427
        CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
187 19427
        Lck_AssertHeld(&pp->mtx);
188 19427
        if (prio <= TASK_QUEUE_RESERVE || pp->nidle > pool_reserve()) {
189 19399
                pt = VTAILQ_FIRST(&pp->idle_queue);
190 19399
                if (pt == NULL)
191 7
                        AZ(pp->nidle);
192
        }
193
194 19427
        if (pt == NULL) {
195 35
                if (pp->nthr < cache_param->wthread_max) {
196 15
                        pp->dry++;
197 15
                        AZ(pthread_cond_signal(&pp->herder_cond));
198
                }
199 35
                return (NULL);
200
        }
201 19392
        AZ(pt->func);
202 19392
        CAST_OBJ_NOTNULL(wrk, pt->priv, WORKER_MAGIC);
203 19392
        return (wrk);
204
}
205
206
/*--------------------------------------------------------------------
207
 * Special scheduling:  If no thread can be found, the current thread
208
 * will be prepared for rescheduling instead.
209
 * The selected threads workspace is reserved and the argument put there.
210
 * Return one if another thread was scheduled, otherwise zero.
211
 */
212
213
int
214 5620
Pool_Task_Arg(struct worker *wrk, enum task_prio prio, task_func_t *func,
215
    const void *arg, size_t arg_len)
216
{
217
        struct pool *pp;
218
        struct worker *wrk2;
219
        int retval;
220
221 5620
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
222 5620
        AN(arg);
223 5620
        AN(arg_len);
224 5620
        pp = wrk->pool;
225 5620
        CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
226
227 5620
        Lck_Lock(&pp->mtx);
228 5620
        wrk2 = pool_getidleworker(pp, prio);
229 5620
        if (wrk2 != NULL) {
230 5608
                AN(pp->nidle);
231 5608
                VTAILQ_REMOVE(&pp->idle_queue, &wrk2->task, list);
232 5608
                pp->nidle--;
233 5608
                retval = 1;
234
        } else {
235 12
                wrk2 = wrk;
236 12
                retval = 0;
237
        }
238 5620
        AZ(wrk2->task.func);
239 5620
        assert(arg_len <= WS_Reserve(wrk2->aws, arg_len));
240 5620
        memcpy(wrk2->aws->f, arg, arg_len);
241 5620
        wrk2->task.func = func;
242 5620
        wrk2->task.priv = wrk2->aws->f;
243 5620
        Lck_Unlock(&pp->mtx);
244
        // see signaling_note at the top for explanation
245 5620
        if (retval)
246 5608
                AZ(pthread_cond_signal(&wrk2->cond));
247 5620
        return (retval);
248
}
249
250
/*--------------------------------------------------------------------
251
 * Enter a new task to be done
252
 */
253
254
int
255 13807
Pool_Task(struct pool *pp, struct pool_task *task, enum task_prio prio)
256
{
257
        struct worker *wrk;
258 13807
        int retval = 0;
259 13807
        CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
260 13807
        AN(task);
261 13807
        AN(task->func);
262 13807
        assert(prio < TASK_QUEUE_END);
263
264 13807
        Lck_Lock(&pp->mtx);
265
266
        /* The common case first:  Take an idle thread, do it. */
267
268 13807
        wrk = pool_getidleworker(pp, prio);
269 13807
        if (wrk != NULL) {
270 13784
                AN(pp->nidle);
271 13784
                VTAILQ_REMOVE(&pp->idle_queue, &wrk->task, list);
272 13784
                pp->nidle--;
273 13784
                AZ(wrk->task.func);
274 13784
                wrk->task.func = task->func;
275 13784
                wrk->task.priv = task->priv;
276 13784
                Lck_Unlock(&pp->mtx);
277
                // see signaling_note at the top for explanation
278 13784
                AZ(pthread_cond_signal(&wrk->cond));
279 13784
                return (0);
280
        }
281
282
        /*
283
         * queue limits only apply to client threads - all other
284
         * work is vital and needs do be done at the earliest
285
         */
286 35
        if (!TASK_QUEUE_CLIENT(prio) ||
287 24
            pp->lqueue + pp->nthr < cache_param->wthread_max +
288 12
            cache_param->wthread_queue_limit) {
289 19
                pp->nqueued++;
290 19
                pp->lqueue++;
291 19
                VTAILQ_INSERT_TAIL(&pp->queues[prio], task, list);
292
        } else {
293 4
                if (prio == TASK_QUEUE_REQ)
294 0
                        pp->sdropped++;
295
                else
296 4
                        pp->rdropped++;
297 4
                retval = -1;
298
        }
299 23
        Lck_Unlock(&pp->mtx);
300 23
        return (retval);
301
}
302
303
/*--------------------------------------------------------------------
304
 * Empty function used as a pointer value for the thread exit condition.
305
 */
306
307
static void v_matchproto_(task_func_t)
308 0
pool_kiss_of_death(struct worker *wrk, void *priv)
309
{
310
        (void)wrk;
311
        (void)priv;
312 0
}
313
314
315
/*--------------------------------------------------------------------
316
 * This is the work function for worker threads in the pool.
317
 */
318
319
static void
320 55247
Pool_Work_Thread(struct pool *pp, struct worker *wrk)
321
{
322 55247
        struct pool_task *tp = NULL;
323
        struct pool_task tpx, tps;
324
        vtim_real tmo;
325
        int i, prio_lim;
326
327 55247
        CHECK_OBJ_NOTNULL(pp, POOL_MAGIC);
328 55247
        wrk->pool = pp;
329
        while (1) {
330 110841
                CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
331
332 83044
                WS_Reset(wrk->aws, 0);
333 83012
                AZ(wrk->vsl);
334
335 83012
                Lck_Lock(&pp->mtx);
336 83094
                if (pp->nidle < pool_reserve())
337 5500
                        prio_lim = TASK_QUEUE_RESERVE + 1;
338
                else
339 77594
                        prio_lim = TASK_QUEUE_END;
340
341 476557
                for (i = 0; i < prio_lim; i++) {
342 393470
                        tp = VTAILQ_FIRST(&pp->queues[i]);
343 393470
                        if (tp != NULL) {
344 7
                                pp->lqueue--;
345 7
                                pp->ndequeued--;
346 7
                                VTAILQ_REMOVE(&pp->queues[i], tp, list);
347 7
                                break;
348
                        }
349
                }
350
351 152289
                if ((tp == NULL && wrk->stats->summs > 0) ||
352 69195
                    (wrk->stats->summs >= cache_param->wthread_stats_rate))
353 13899
                        pool_addstat(pp->a_stat, wrk->stats);
354
355 83094
                if (tp != NULL) {
356 7
                        wrk->stats->summs++;
357 83087
                } else if (pp->b_stat != NULL && pp->a_stat->summs) {
358
                        /* Nothing to do, push pool stats into global pool */
359 13899
                        tps.func = pool_stat_summ;
360 13899
                        tps.priv = pp->a_stat;
361 13899
                        pp->a_stat = pp->b_stat;
362 13899
                        pp->b_stat = NULL;
363 13899
                        tp = &tps;
364
                } else {
365
                        /* Nothing to do: To sleep, perchance to dream ... */
366 69188
                        if (isnan(wrk->lastused))
367 55289
                                wrk->lastused = VTIM_real();
368 69188
                        wrk->task.func = NULL;
369 69188
                        wrk->task.priv = wrk;
370 69188
                        VTAILQ_INSERT_HEAD(&pp->idle_queue, &wrk->task, list);
371 69188
                        pp->nidle++;
372
                        do {
373
                                // see signaling_note at the top for explanation
374 70182
                                if (wrk->vcl == NULL)
375 64083
                                        tmo = 0;
376 6099
                                else if (DO_DEBUG(DBG_VTC_MODE))
377 6099
                                        tmo =  wrk->lastused+1.;
378
                                else
379 0
                                        tmo =  wrk->lastused+60.;
380 70182
                                i = Lck_CondWait(&wrk->cond, &pp->mtx, tmo);
381 20478
                                if (i == ETIMEDOUT)
382 995
                                        VCL_Rel(&wrk->vcl);
383 20478
                        } while (wrk->task.func == NULL);
384 19484
                        tpx = wrk->task;
385 19484
                        tp = &tpx;
386 19484
                        wrk->stats->summs++;
387
                }
388 33390
                Lck_Unlock(&pp->mtx);
389
390 33390
                if (tp->func == pool_kiss_of_death)
391 92
                        break;
392
393
                do {
394 45358
                        memset(&wrk->task, 0, sizeof wrk->task);
395 45358
                        assert(wrk->pool == pp);
396 45358
                        tp->func(wrk, tp->priv);
397 39856
                        if (DO_DEBUG(DBG_VCLREL) && wrk->vcl != NULL)
398 20
                                VCL_Rel(&wrk->vcl);
399 39857
                        tpx = wrk->task;
400 39857
                        tp = &tpx;
401 39857
                } while (tp->func != NULL);
402
403 27797
                if (WS_Overflowed(wrk->aws))
404 0
                        wrk->stats->ws_thread_overflow++;
405
                /* cleanup for next task */
406 27797
                wrk->seen_methods = 0;
407
        }
408 92
        wrk->pool = NULL;
409 92
}
410
411
/*--------------------------------------------------------------------
412
 * Create another worker thread.
413
 */
414
415
struct pool_info {
416
        unsigned                magic;
417
#define POOL_INFO_MAGIC         0x4e4442d3
418
        size_t                  stacksize;
419
        struct pool             *qp;
420
};
421
422
static void *
423 55276
pool_thread(void *priv)
424
{
425
        struct pool_info *pi;
426
427 55276
        CAST_OBJ_NOTNULL(pi, priv, POOL_INFO_MAGIC);
428 55276
        THR_Init();
429 55270
        WRK_Thread(pi->qp, pi->stacksize, cache_param->workspace_thread);
430 92
        FREE_OBJ(pi);
431 92
        return (NULL);
432
}
433
434
static void
435 55296
pool_breed(struct pool *qp)
436
{
437
        pthread_t tp;
438
        pthread_attr_t tp_attr;
439
        struct pool_info *pi;
440
441 55296
        AZ(pthread_attr_init(&tp_attr));
442 55296
        AZ(pthread_attr_setdetachstate(&tp_attr, PTHREAD_CREATE_DETACHED));
443
444
        /* Set the stacksize for worker threads we create */
445 55296
        if (cache_param->wthread_stacksize != UINT_MAX)
446 55296
                AZ(pthread_attr_setstacksize(&tp_attr,
447
                    cache_param->wthread_stacksize));
448
449 55296
        ALLOC_OBJ(pi, POOL_INFO_MAGIC);
450 55296
        AN(pi);
451 55296
        AZ(pthread_attr_getstacksize(&tp_attr, &pi->stacksize));
452 55295
        pi->qp = qp;
453
454 55295
        if (pthread_create(&tp, &tp_attr, pool_thread, pi)) {
455 0
                VSL(SLT_Debug, 0, "Create worker thread failed %d %s",
456 0
                    errno, strerror(errno));
457 0
                Lck_Lock(&pool_mtx);
458 0
                VSC_C_main->threads_failed++;
459 0
                Lck_Unlock(&pool_mtx);
460 0
                VTIM_sleep(cache_param->wthread_fail_delay);
461
        } else {
462 55296
                qp->dry = 0;
463 55296
                qp->nthr++;
464 55296
                Lck_Lock(&pool_mtx);
465 55296
                VSC_C_main->threads++;
466 55296
                VSC_C_main->threads_created++;
467 55296
                Lck_Unlock(&pool_mtx);
468 55296
                VTIM_sleep(cache_param->wthread_add_delay);
469
        }
470
471 55296
        AZ(pthread_attr_destroy(&tp_attr));
472 55296
}
473
474
/*--------------------------------------------------------------------
475
 * Herd a single pool
476
 *
477
 * This thread wakes up every thread_pool_timeout seconds, whenever a pool
478
 * queues and when threads need to be destroyed
479
 *
480
 * The trick here is to not be too aggressive about creating threads.  In
481
 * pool_breed(), we sleep whenever we create a thread and a little while longer
482
 * whenever we fail to, hopefully missing a lot of cond_signals in the meantime.
483
 *
484
 * Idle threads are destroyed at a rate determined by wthread_destroy_delay
485
 *
486
 * XXX: probably need a lot more work.
487
 *
488
 */
489
490
void*
491 5468
pool_herder(void *priv)
492
{
493
        struct pool *pp;
494
        struct pool_task *pt;
495
        double t_idle;
496
        struct worker *wrk;
497
        double delay;
498
        int wthread_min;
499 5468
        uintmax_t dq = (1ULL << 31);
500 5468
        vtim_mono dqt = 0;
501
502 5468
        CAST_OBJ_NOTNULL(pp, priv, POOL_MAGIC);
503
504 5468
        THR_SetName("pool_herder");
505 5468
        THR_Init();
506
507 90557
        while (!pp->die || pp->nthr > 0) {
508
                /*
509
                 * If the worker pool is configured too small, we can
510
                 * end up deadlocking it (see #2418 for details).
511
                 *
512
                 * Recovering from this would require a lot of complicated
513
                 * code, and fundamentally, either people configured their
514
                 * pools wrong, in which case we want them to notice, or
515
                 * they are under DoS, in which case recovering gracefully
516
                 * is unlikely be a major improvement.
517
                 *
518
                 * Instead we implement a watchdog and kill the worker if
519
                 * nothing has been dequeued for that long.
520
                 */
521 85081
                if (pp->lqueue == 0) {
522 85023
                        dq = pp->ndequeued + 1;
523 58
                } else if (dq != pp->ndequeued) {
524 11
                        dq = pp->ndequeued;
525 11
                        dqt = VTIM_mono();
526 47
                } else if (VTIM_mono() - dqt > cache_param->wthread_watchdog) {
527 4
                        VSL(SLT_Error, 0,
528
                            "Pool Herder: Queue does not move ql=%u dt=%f",
529 4
                            pp->lqueue, VTIM_mono() - dqt);
530 4
                        WRONG("Worker Pool Queue does not move");
531
                }
532 85077
                wthread_min = cache_param->wthread_min;
533 85077
                if (pp->die)
534 119
                        wthread_min = 0;
535
536
                /* Make more threads if needed and allowed */
537 114866
                if (pp->nthr < wthread_min ||
538 29797
                    (pp->dry && pp->nthr < cache_param->wthread_max)) {
539 55304
                        pool_breed(pp);
540 55296
                        continue;
541
                }
542
543 29773
                delay = cache_param->wthread_timeout;
544 29773
                assert(pp->nthr >= wthread_min);
545
546 29773
                if (pp->nthr > wthread_min) {
547
548 127
                        t_idle = VTIM_real() - cache_param->wthread_timeout;
549
550 127
                        Lck_Lock(&pp->mtx);
551
                        /* XXX: unsafe counters */
552 127
                        VSC_C_main->sess_queued += pp->nqueued;
553 127
                        VSC_C_main->sess_dropped += pp->sdropped;
554 127
                        VSC_C_main->req_dropped += pp->rdropped;
555 127
                        pp->nqueued = pp->sdropped = pp->rdropped = 0;
556
557 127
                        wrk = NULL;
558 127
                        pt = VTAILQ_LAST(&pp->idle_queue, taskhead);
559 127
                        if (pt != NULL) {
560 104
                                AN(pp->nidle);
561 104
                                AZ(pt->func);
562 104
                                CAST_OBJ_NOTNULL(wrk, pt->priv, WORKER_MAGIC);
563
564 116
                                if (pp->die || wrk->lastused < t_idle ||
565 12
                                    pp->nthr > cache_param->wthread_max) {
566
                                        /* Give it a kiss on the cheek... */
567 92
                                        VTAILQ_REMOVE(&pp->idle_queue,
568
                                            &wrk->task, list);
569 92
                                        pp->nidle--;
570 92
                                        wrk->task.func = pool_kiss_of_death;
571 92
                                        AZ(pthread_cond_signal(&wrk->cond));
572
                                } else {
573 12
                                        delay = wrk->lastused - t_idle;
574 12
                                        wrk = NULL;
575
                                }
576
                        }
577 127
                        Lck_Unlock(&pp->mtx);
578
579 127
                        if (wrk != NULL) {
580 92
                                pp->nthr--;
581 92
                                Lck_Lock(&pool_mtx);
582 92
                                VSC_C_main->threads--;
583 92
                                VSC_C_main->threads_destroyed++;
584 92
                                Lck_Unlock(&pool_mtx);
585 92
                                delay = cache_param->wthread_destroy_delay;
586 35
                        } else if (delay < cache_param->wthread_destroy_delay)
587 0
                                delay = cache_param->wthread_destroy_delay;
588
                }
589
590 29773
                if (pp->die) {
591 111
                        if (delay < 2)
592 88
                                delay = 10e-3;
593
                        else
594 23
                                delay = 1;
595 111
                        VTIM_sleep(delay);
596 111
                        continue;
597
                }
598 29662
                Lck_Lock(&pp->mtx);
599 29658
                if (!pp->dry) {
600 29658
                        if (DO_DEBUG(DBG_VTC_MODE))
601 29667
                                delay = 0.5;
602 29672
                        (void)Lck_CondWait(&pp->herder_cond, &pp->mtx,
603 29662
                                VTIM_real() + delay);
604
                } else {
605
                        /* XXX: unsafe counters */
606 0
                        VSC_C_main->threads_limited++;
607 0
                        pp->dry = 0;
608
                }
609 24215
                Lck_Unlock(&pp->mtx);
610
        }
611 8
        return (NULL);
612
}