r4914 - in trunk/varnish-cache: bin/varnishd lib/libvarnishapi
phk at varnish-cache.org
phk at varnish-cache.org
Sun Jun 6 15:14:02 CEST 2010
Author: phk
Date: 2010-06-06 15:14:02 +0200 (Sun, 06 Jun 2010)
New Revision: 4914
Modified:
trunk/varnish-cache/bin/varnishd/cache_shmlog.c
trunk/varnish-cache/lib/libvarnishapi/vsl_log.c
Log:
Clean up and unify shmlog writing in the worker process.
Always wrap the log on worker process startup.
Detect such wraps in libvarnishapi
Modified: trunk/varnish-cache/bin/varnishd/cache_shmlog.c
===================================================================
--- trunk/varnish-cache/bin/varnishd/cache_shmlog.c 2010-06-06 12:39:15 UTC (rev 4913)
+++ trunk/varnish-cache/bin/varnishd/cache_shmlog.c 2010-06-06 13:14:02 UTC (rev 4914)
@@ -50,23 +50,25 @@
return (((type & 0xff) << 24) | length);
}
-#define LOCKSHM(foo) \
- do { \
- if (pthread_mutex_trylock(foo)) { \
- AZ(pthread_mutex_lock(foo)); \
- VSL_stats->shm_cont++; \
- } \
- } while (0);
+/*--------------------------------------------------------------------*/
-#define UNLOCKSHM(foo) AZ(pthread_mutex_unlock(foo))
+static inline void
+vsl_hdr(enum shmlogtag tag, uint32_t *p, unsigned len, unsigned id)
+{
+ assert(((uintptr_t)p & 0x3) == 0);
+
+ p[1] = id;
+ VMB();
+ p[0] = vsl_w0(tag, len);
+}
+
+/*--------------------------------------------------------------------*/
+
static void
vsl_wrap(void)
{
- assert(vsl_log_nxt < vsl_log_end);
- assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
-
vsl_log_start[1] = vsl_w0(SLT_ENDMARKER, 0);
do
vsl_log_start[0]++;
@@ -77,46 +79,39 @@
VSL_stats->shm_cycles++;
}
-/*--------------------------------------------------------------------*/
-
-static inline void
-vsl_hdr(enum shmlogtag tag, uint32_t *p, unsigned len, unsigned id)
-{
-
- assert(((uintptr_t)p & 0x3) == 0);
-
- p[1] = id;
- VMB();
- p[0] = vsl_w0(tag, len);
-}
-
/*--------------------------------------------------------------------
* Reserve bytes for a record, wrap if necessary
*/
static uint32_t *
-vsl_get(unsigned len)
+vsl_get(unsigned len, unsigned records, unsigned flushes)
{
uint32_t *p;
- uint32_t u;
+ if (pthread_mutex_trylock(&vsl_mtx)) {
+ AZ(pthread_mutex_lock(&vsl_mtx));
+ VSL_stats->shm_cont++;
+ }
assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
- u = VSL_WORDS(len);
+ VSL_stats->shm_writes++;
+ VSL_stats->shm_flushes += flushes;
+ VSL_stats->shm_records += records;
/* Wrap if necessary */
- if (VSL_NEXT(vsl_log_nxt, len) >= vsl_log_end)
+ if (VSL_NEXT(vsl_log_nxt, len) >= vsl_log_end)
vsl_wrap();
p = vsl_log_nxt;
vsl_log_nxt = VSL_NEXT(vsl_log_nxt, len);
+ *vsl_log_nxt = vsl_w0(SLT_ENDMARKER, 0);
+
assert(vsl_log_nxt < vsl_log_end);
assert(((uintptr_t)vsl_log_nxt & 0x3) == 0);
+ AZ(pthread_mutex_unlock(&vsl_mtx));
- *vsl_log_nxt = vsl_w0(SLT_ENDMARKER, 0);
- printf("GET %p -> %p\n", p, vsl_log_nxt);
return (p);
}
@@ -137,12 +132,7 @@
if (len > mlen)
len = mlen;
- /* Only hold the lock while we find our space */
- LOCKSHM(&vsl_mtx);
- VSL_stats->shm_writes++;
- VSL_stats->shm_records++;
- p = vsl_get(len);
- UNLOCKSHM(&vsl_mtx);
+ p = vsl_get(len, 1, 0);
memcpy(p + 2, b, len);
vsl_hdr(tag, p, len, id);
@@ -189,12 +179,7 @@
assert(l >= 8);
- LOCKSHM(&vsl_mtx);
- VSL_stats->shm_flushes += overflow;
- VSL_stats->shm_writes++;
- VSL_stats->shm_records += w->wlr;
- p = vsl_get(l - 8);
- UNLOCKSHM(&vsl_mtx);
+ p = vsl_get(l - 8, w->wlr, overflow);
memcpy(p + 1, w->wlb + 1, l - 4);
VWMB();
@@ -281,7 +266,8 @@
{
AZ(pthread_mutex_init(&vsl_mtx, NULL));
- loghead->starttime = TIM_real();
+ vsl_wrap();
+ loghead->starttime = (intmax_t)TIM_real();
loghead->panicstr[0] = '\0';
memset(VSL_stats, 0, sizeof *VSL_stats);
loghead->child_pid = getpid();
Modified: trunk/varnish-cache/lib/libvarnishapi/vsl_log.c
===================================================================
--- trunk/varnish-cache/lib/libvarnishapi/vsl_log.c 2010-06-06 12:39:15 UTC (rev 4913)
+++ trunk/varnish-cache/lib/libvarnishapi/vsl_log.c 2010-06-06 13:14:02 UTC (rev 4914)
@@ -90,6 +90,7 @@
unsigned w, l;
uint8_t t;
int i;
+ uint32_t seq;
CHECK_OBJ_NOTNULL(vd, VSL_MAGIC);
if (vd->r_fd != -1) {
@@ -110,23 +111,28 @@
*pp = vd->rbuf;
return (1);
}
+ seq = vd->log_start[0];
for (w = 0; w < TIMEOUT_USEC;) {
t = VSL_TAG(vd->log_ptr);
- if (t == SLT_WRAPMARKER) {
+
+ if (t != SLT_ENDMARKER) {
+ *pp = vd->log_ptr;
+ vd->log_ptr = VSL_NEXT(vd->log_ptr, VSL_LEN(vd->log_ptr));
+ return (1);
+ }
+
+ if (t == SLT_WRAPMARKER || vd->log_start[0] != seq) {
vd->log_ptr = vd->log_start + 1;
+ seq = vd->log_start[0];
continue;
}
- if (t == SLT_ENDMARKER) {
- /* XXX: check log_start[0] */
- if (vd->flags & F_NON_BLOCKING)
- return (-1);
- w += SLEEP_USEC;
- usleep(SLEEP_USEC);
- continue;
- }
- *pp = vd->log_ptr;
- vd->log_ptr = VSL_NEXT(vd->log_ptr, VSL_LEN(vd->log_ptr));
- return (1);
+
+
+ /* XXX: check log_start[0] */
+ if (vd->flags & F_NON_BLOCKING)
+ return (-1);
+ w += SLEEP_USEC;
+ usleep(SLEEP_USEC);
}
*pp = NULL;
return (0);
More information about the varnish-commit
mailing list