[master] 5810817d3 Fix unnecessary log buffer flushing
Dridi Boukelmoune
dridi.boukelmoune at gmail.com
Mon Nov 29 14:48:07 UTC 2021
commit 5810817d37f134d23c0a1f56d9fac560d1a94c0b
Author: Martin Blix Grydeland <martin at varnish-software.com>
Date: Mon Oct 2 17:18:23 2017 +0200
Fix unnecessary log buffer flushing
When vsl_reclen (maximum length of a record) is close to vsl_buffer,
the vsl buffer is flushed to the main shared memory log for every
record. This is because we test if there is space for vsl_reclen, not
the actual size of the formatted log record (which likely is much
smaller).
Fix this by doing the formatting in up to two stages. If the first
stage shows that we overflowed, but would not have overflown
vcl_reclen if there was more room in the buffer, then flush and redo
the formatting. This limits the extra work of formatting twice only to
the one record that happened to overflow the buffer.
diff --git a/bin/varnishd/cache/cache_shmlog.c b/bin/varnishd/cache/cache_shmlog.c
index b301c5a2f..0db4d5284 100644
--- a/bin/varnishd/cache/cache_shmlog.c
+++ b/bin/varnishd/cache/cache_shmlog.c
@@ -409,6 +409,8 @@ VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
const char *u, *f;
unsigned n, mlen;
txt t;
+ va_list ap2;
+ int first;
vsl_sanity(vsl);
AN(fmt);
@@ -437,14 +439,38 @@ VSLbv(struct vsl_log *vsl, enum VSL_tag_e tag, const char *fmt, va_list ap)
return;
}
- mlen = cache_param->vsl_reclen;
+ assert(vsl->wlp <= vsl->wle);
- /* Flush if we cannot fit a full size record */
- if (VSL_END(vsl->wlp, mlen + 1) >= vsl->wle)
+ /* Flush if we can't fit any bytes */
+ if (vsl->wle - vsl->wlp <= VSL_OVERHEAD)
VSL_Flush(vsl, 1);
- p = VSL_DATA(vsl->wlp);
- n = vsnprintf(p, mlen, fmt, ap);
+ /* Do the vsnprintf formatting in one or two stages. If the first
+ stage shows that we overflowed, and the available space to work
+ with was less than vsl_reclen, flush and do the formatting
+ again. */
+ first = 1;
+ while (1) {
+ assert(vsl->wle - vsl->wlp > VSL_OVERHEAD);
+ mlen = VSL_BYTES((vsl->wle - vsl->wlp) - VSL_OVERHEAD);
+ if (mlen > cache_param->vsl_reclen)
+ mlen = cache_param->vsl_reclen;
+ assert(mlen > 0);
+ assert(VSL_END(vsl->wlp, mlen) <= vsl->wle);
+ p = VSL_DATA(vsl->wlp);
+ va_copy(ap2, ap);
+ n = vsnprintf(p, mlen, fmt, ap2);
+ va_end(ap2);
+
+ if (first && n >= mlen && mlen < cache_param->vsl_reclen) {
+ first = 0;
+ VSL_Flush(vsl, 1);
+ continue;
+ }
+
+ break;
+ }
+
if (n > mlen - 1)
n = mlen - 1; /* we truncate long fields */
p[n++] = '\0'; /* NUL-terminated */
diff --git a/include/vapi/vsl_int.h b/include/vapi/vsl_int.h
index f4a0c86b8..dbd043908 100644
--- a/include/vapi/vsl_int.h
+++ b/include/vapi/vsl_int.h
@@ -67,19 +67,20 @@
#define VSL_IDENTMASK (~(3U<<30))
#define VSL_LENMASK 0xffff
+#define VSL_OVERHEAD 2
#define VSL_WORDS(len) (((len) + 3) / 4)
#define VSL_BYTES(words) ((words) * 4)
-#define VSL_END(ptr, len) ((ptr) + 2 + VSL_WORDS(len))
+#define VSL_END(ptr, len) ((ptr) + VSL_OVERHEAD + VSL_WORDS(len))
#define VSL_NEXT(ptr) VSL_END(ptr, VSL_LEN(ptr))
#define VSL_LEN(ptr) ((ptr)[0] & VSL_LENMASK)
#define VSL_TAG(ptr) ((enum VSL_tag_e)((ptr)[0] >> 24))
#define VSL_ID(ptr) (((ptr)[1]) & VSL_IDENTMASK)
#define VSL_CLIENT(ptr) (((ptr)[1]) & VSL_CLIENTMARKER)
#define VSL_BACKEND(ptr) (((ptr)[1]) & VSL_BACKENDMARKER)
-#define VSL_DATA(ptr) ((char*)((ptr)+2))
-#define VSL_CDATA(ptr) ((const char*)((ptr)+2))
+#define VSL_DATA(ptr) ((char*)((ptr)+VSL_OVERHEAD))
+#define VSL_CDATA(ptr) ((const char*)((ptr)+VSL_OVERHEAD))
#define VSL_BATCHLEN(ptr) ((ptr)[1])
-#define VSL_BATCHID(ptr) (VSL_ID((ptr) + 2))
+#define VSL_BATCHID(ptr) (VSL_ID((ptr) + VSL_OVERHEAD))
#define VSL_ENDMARKER (((uint32_t)SLT__Reserved << 24) | 0x454545) /* "EEE" */
#define VSL_WRAPMARKER (((uint32_t)SLT__Reserved << 24) | 0x575757) /* "WWW" */
More information about the varnish-commit
mailing list