From phk at FreeBSD.org Tue Jul 2 11:41:06 2024 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 2 Jul 2024 11:41:06 +0000 (UTC) Subject: [master] 2e91455d6 Sync libvgz with github/madler/zlib Message-ID: <20240702114106.8F84411914C@lists.varnish-cache.org> commit 2e91455d6907b80938262d17697e55c4827f79fb Author: Poul-Henning Kamp Date: Tue Jul 2 11:40:26 2024 +0000 Sync libvgz with github/madler/zlib diff --git a/lib/libvgz/deflate.c b/lib/libvgz/deflate.c index a2b341d98..05a2d4517 100644 --- a/lib/libvgz/deflate.c +++ b/lib/libvgz/deflate.c @@ -501,7 +501,7 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method, */ s->pending_buf = (uchf *) ZALLOC(strm, s->lit_bufsize, LIT_BUFS); - s->pending_buf_size = (ulg)s->lit_bufsize * LIT_BUFS; + s->pending_buf_size = (ulg)s->lit_bufsize * 4; // Pretty sure this should be LIT_BUFS /phk if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || s->pending_buf == Z_NULL) { @@ -735,6 +735,14 @@ int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) { return Z_OK; } +/* ========================================================================= */ +int ZEXPORT deflateUsed(z_streamp strm, int *bits) { + if (deflateStateCheck(strm)) return Z_STREAM_ERROR; + if (bits != Z_NULL) + *bits = strm->state->bi_used; + return Z_OK; +} + /* ========================================================================= */ int ZEXPORT deflatePrime(z_streamp strm, int bits, int value) { deflate_state *s; @@ -1743,7 +1751,7 @@ local block_state deflate_stored(deflate_state *s, int flush) { s->strm->total_out += len; } } while (last == 0); - if (last) + if (last) s->strm->stop_bit = (s->strm->total_out + s->pending) * 8 + s->bi_valid; diff --git a/lib/libvgz/deflate.h b/lib/libvgz/deflate.h index 720dca7e4..4d93b2acf 100644 --- a/lib/libvgz/deflate.h +++ b/lib/libvgz/deflate.h @@ -271,6 +271,9 @@ typedef struct internal_state { /* Number of valid bits in bi_buf. All bits above the last valid bit * are always zero. */ + int bi_used; + /* Last number of used bits when going to a byte boundary. + */ ulg high_water; /* High water mark offset in window for initialized bytes -- bytes above diff --git a/lib/libvgz/inffast.c b/lib/libvgz/inffast.c index 1f49bd5a5..f7b550b8b 100644 --- a/lib/libvgz/inffast.c +++ b/lib/libvgz/inffast.c @@ -155,7 +155,7 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) { dist += (unsigned)hold & ((1U << op) - 1); #ifdef INFLATE_STRICT if (dist > dmax) { - strm->msg = "invalid distance too far back"; + strm->msg = (z_const char *)"invalid distance too far back"; state->mode = BAD; break; } @@ -169,7 +169,7 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) { if (op > whave) { if (state->sane) { strm->msg = - "invalid distance too far back"; + (z_const char *)"invalid distance too far back"; state->mode = BAD; break; } @@ -265,7 +265,7 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) { goto dodist; } else { - strm->msg = "invalid distance code"; + strm->msg = (z_const char *)"invalid distance code"; state->mode = BAD; break; } @@ -280,7 +280,7 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) { break; } else { - strm->msg = "invalid literal/length code"; + strm->msg = (z_const char *)"invalid literal/length code"; state->mode = BAD; break; } diff --git a/lib/libvgz/inflate.c b/lib/libvgz/inflate.c index 07a5f36c9..5c7494f47 100644 --- a/lib/libvgz/inflate.c +++ b/lib/libvgz/inflate.c @@ -647,12 +647,12 @@ int ZEXPORT inflate(z_streamp strm, int flush) { if ( #endif ((BITS(8) << 8) + (hold >> 8)) % 31) { - strm->msg = "incorrect header check"; + strm->msg = (z_const char *)"incorrect header check"; state->mode = BAD; break; } if (BITS(4) != Z_DEFLATED) { - strm->msg = "unknown compression method"; + strm->msg = (z_const char *)"unknown compression method"; state->mode = BAD; break; } @@ -661,7 +661,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { if (state->wbits == 0) state->wbits = len; if (len > 15 || len > state->wbits) { - strm->msg = "invalid window size"; + strm->msg = (z_const char *)"invalid window size"; state->mode = BAD; break; } @@ -677,12 +677,12 @@ int ZEXPORT inflate(z_streamp strm, int flush) { NEEDBITS(16); state->flags = (int)(hold); if ((state->flags & 0xff) != Z_DEFLATED) { - strm->msg = "unknown compression method"; + strm->msg = (z_const char *)"unknown compression method"; state->mode = BAD; break; } if (state->flags & 0xe000) { - strm->msg = "unknown header flags set"; + strm->msg = (z_const char *)"unknown header flags set"; state->mode = BAD; break; } @@ -798,7 +798,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { if (state->flags & 0x0200) { NEEDBITS(16); if ((state->wrap & 4) && hold != (state->check & 0xffff)) { - strm->msg = "header crc mismatch"; + strm->msg = (z_const char *)"header crc mismatch"; state->mode = BAD; break; } @@ -865,7 +865,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { state->mode = TABLE; break; case 3: - strm->msg = "invalid block type"; + strm->msg = (z_const char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); @@ -874,7 +874,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { - strm->msg = "invalid stored block lengths"; + strm->msg = (z_const char *)"invalid stored block lengths"; state->mode = BAD; break; } @@ -915,7 +915,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { - strm->msg = "too many length or distance symbols"; + strm->msg = (z_const char *)"too many length or distance symbols"; state->mode = BAD; break; } @@ -938,7 +938,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { - strm->msg = "invalid code lengths set"; + strm->msg = (z_const char *)"invalid code lengths set"; state->mode = BAD; break; } @@ -962,7 +962,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { - strm->msg = "invalid bit length repeat"; + strm->msg = (z_const char *)"invalid bit length repeat"; state->mode = BAD; break; } @@ -985,7 +985,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { - strm->msg = "invalid bit length repeat"; + strm->msg = (z_const char *)"invalid bit length repeat"; state->mode = BAD; break; } @@ -999,7 +999,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { - strm->msg = "invalid code -- missing end-of-block"; + strm->msg = (z_const char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } @@ -1013,7 +1013,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { - strm->msg = "invalid literal/lengths set"; + strm->msg = (z_const char *)"invalid literal/lengths set"; state->mode = BAD; break; } @@ -1022,7 +1022,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { - strm->msg = "invalid distances set"; + strm->msg = (z_const char *)"invalid distances set"; state->mode = BAD; break; } @@ -1076,7 +1076,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { break; } if (here.op & 64) { - strm->msg = "invalid literal/length code"; + strm->msg = (z_const char *)"invalid literal/length code"; state->mode = BAD; break; } @@ -1114,7 +1114,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { DROPBITS(here.bits); state->back += here.bits; if (here.op & 64) { - strm->msg = "invalid distance code"; + strm->msg = (z_const char *)"invalid distance code"; state->mode = BAD; break; } @@ -1131,7 +1131,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { - strm->msg = "invalid distance too far back"; + strm->msg = (z_const char *)"invalid distance too far back"; state->mode = BAD; break; } @@ -1146,7 +1146,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { copy = state->offset - copy; if (copy > state->whave) { if (state->sane) { - strm->msg = "invalid distance too far back"; + strm->msg = (z_const char *)"invalid distance too far back"; state->mode = BAD; break; } @@ -1205,7 +1205,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { state->flags ? hold : #endif ZSWAP32(hold)) != state->check) { - strm->msg = "incorrect data check"; + strm->msg = (z_const char *)"incorrect data check"; state->mode = BAD; break; } @@ -1219,7 +1219,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) { if (state->wrap && state->flags) { NEEDBITS(32); if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) { - strm->msg = "incorrect length check"; + strm->msg = (z_const char *)"incorrect length check"; state->mode = BAD; break; } diff --git a/lib/libvgz/trees.c b/lib/libvgz/trees.c index bbaa24870..76e45b36d 100644 --- a/lib/libvgz/trees.c +++ b/lib/libvgz/trees.c @@ -184,6 +184,7 @@ local void bi_windup(deflate_state *s) { } else if (s->bi_valid > 0) { put_byte(s, (Byte)s->bi_buf); } + s->bi_used = ((s->bi_valid - 1) & 7) + 1; s->bi_buf = 0; s->bi_valid = 0; #ifdef ZLIB_DEBUG @@ -466,6 +467,7 @@ void ZLIB_INTERNAL _tr_init(deflate_state *s) { s->bi_buf = 0; s->bi_valid = 0; + s->bi_used = 0; #ifdef ZLIB_DEBUG s->compressed_len = 0L; s->bits_sent = 0L; diff --git a/lib/libvgz/vgz.h b/lib/libvgz/vgz.h index 97b140531..e339f0e10 100644 --- a/lib/libvgz/vgz.h +++ b/lib/libvgz/vgz.h @@ -602,11 +602,11 @@ ZEXTERN int ZEXPORT deflateInit2 (z_streamp strm, Z_RLE is almost as fast as Z_HUFFMAN_ONLY, but should give better compression for PNG image data than Huffman only. The degree of string matching from most to none is: Z_DEFAULT_STRATEGY, Z_FILTERED, Z_RLE, then - Z_HUFFMAN. The strategy parameter affects the compression ratio but never - the correctness of the compressed output, even if it is not set optimally - for the given data. Z_FIXED uses the default string matching, but prevents - the use of dynamic Huffman codes, allowing for a simpler decoder for special - applications. + Z_HUFFMAN_ONLY. The strategy parameter affects the compression ratio but + never the correctness of the compressed output, even if it is not set + optimally for the given data. Z_FIXED uses the default string matching, but + prevents the use of dynamic Huffman codes, allowing for a simpler decoder + for special applications. deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid @@ -796,6 +796,18 @@ ZEXTERN int ZEXPORT deflatePending (z_streamp strm, stream state was inconsistent. */ +ZEXTERN int ZEXPORT deflateUsed(z_streamp strm, + int *bits); +/* + deflateUsed() returns in *bits the most recent number of deflate bits used + in the last byte when flushing to a byte boundary. The result is in 1..8, or + 0 if there has not yet been a flush. This helps determine the location of + the last bit of a deflate stream. + + deflateUsed returns Z_OK if success, or Z_STREAM_ERROR if the source + stream state was inconsistent. + */ + ZEXTERN int ZEXPORT deflatePrime (z_streamp strm, int bits, int value); diff --git a/lib/libvgz/zconf.h b/lib/libvgz/zconf.h index ea3b6d685..4a52aed7c 100644 --- a/lib/libvgz/zconf.h +++ b/lib/libvgz/zconf.h @@ -59,6 +59,7 @@ # define deflateSetDictionary z_deflateSetDictionary # define deflateSetHeader z_deflateSetHeader # define deflateTune z_deflateTune +# define deflateUsed z_deflateUsed # define deflate_copyright z_deflate_copyright # define get_crc_table z_get_crc_table # ifndef Z_SOLO @@ -506,7 +507,7 @@ typedef uLong FAR uLongf; #endif #ifndef z_off_t -# define z_off_t long +# define z_off_t long long #endif #if !defined(_WIN32) && defined(Z_LARGE64) diff --git a/tools/cmp_libz.sh b/tools/cmp_libz.sh deleted file mode 100644 index b9c5c6b0d..000000000 --- a/tools/cmp_libz.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -# This script compares libvgz to zlib in FreeBSD source tree - -LZ=/usr/src/contrib/zlib - -if [ ! -d lib/libvgz ] ; then - echo "Run this from to of tree" 1>&2 - exit 2 -fi - -for i in lib/libvgz/*.[ch] -do - b=`basename $i` - if [ "$b" == "vgz.h" ] ; then - b="zlib.h" - fi - if [ -f ${LZ}/$b ] ; then - echo "==== $b" - sed ' - s/vgz.h/zlib.h/ - /strm->msg =/s/"/(char *)"/ - ' $i | - diff -u ${LZ}/$b - - else - echo "#### $b #### NOT FOUND ####" - fi -done diff --git a/tools/cmp_zlib.sh b/tools/cmp_zlib.sh new file mode 100644 index 000000000..a3acbd2b8 --- /dev/null +++ b/tools/cmp_zlib.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +# Compare libvgz with github/madler/zlib + +LZ=/tmp/zlib + +if [ "${LZ}" = "/tmp/zlib" -a ! -d ${LZ} ] ; then + rm -rf ${LZ} + git clone https://github.com/madler/zlib ${LZ} +else + (cd ${LZ} && git pull) +fi + +for i in varnish-cache/lib/libvgz/*.[ch] +do + b=`basename $i` + if [ "$b" == "vgz.h" ] ; then + b="zlib.h" + fi + if [ -f ${LZ}/$b ] ; then + echo "#################################### $b" + sed ' + s/vgz.h/zlib.h/ + # /strm->msg =/s/"/(char *)"/ + ' $i | + diff -wu - ${LZ}/$b + else + echo "#### $b #### NOT FOUND ####" + fi +done From phk at FreeBSD.org Tue Jul 2 12:05:06 2024 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 2 Jul 2024 12:05:06 +0000 (UTC) Subject: [master] 36be307a1 fix path to be top of github repos Message-ID: <20240702120506.6D539119F08@lists.varnish-cache.org> commit 36be307a1f0efe5984fb1a01b7855e6a57c046b7 Author: Poul-Henning Kamp Date: Tue Jul 2 12:01:49 2024 +0000 fix path to be top of github repos diff --git a/tools/cmp_zlib.sh b/tools/cmp_zlib.sh index a3acbd2b8..038497dbc 100644 --- a/tools/cmp_zlib.sh +++ b/tools/cmp_zlib.sh @@ -11,7 +11,7 @@ else (cd ${LZ} && git pull) fi -for i in varnish-cache/lib/libvgz/*.[ch] +for i in lib/libvgz/*.[ch] do b=`basename $i` if [ "$b" == "vgz.h" ] ; then From dridi.boukelmoune at gmail.com Wed Jul 3 09:32:08 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:32:08 +0000 (UTC) Subject: [master] f72dcc886 Allow VEV_Stop() called from within a VEV callback Message-ID: <20240703093208.15D4F11C59D@lists.varnish-cache.org> commit f72dcc8860547a3fa48f43d6ea0a3d65c1e8a5b3 Author: Dag Haavi Finstad Date: Fri Jun 14 12:58:02 2024 +0200 Allow VEV_Stop() called from within a VEV callback The routine in VEV looping to report on all of the fd events gets confused if an event is removed while in another callback event. This will typically cause the mgt event loop to just spin, making the manager unresponsive. Original patch and commit message by Martin Blix Grydeland. diff --git a/lib/libvarnish/vev.c b/lib/libvarnish/vev.c index aacdbbb23..8ce5837fb 100644 --- a/lib/libvarnish/vev.c +++ b/lib/libvarnish/vev.c @@ -67,6 +67,7 @@ static int vev_nsig; struct vev_root { unsigned magic; #define VEV_BASE_MAGIC 0x477bcf3d + unsigned n_fd_events; struct pollfd *pfd; struct vev **pev; unsigned npfd; @@ -325,6 +326,12 @@ VEV_Stop(struct vev_root *evb, struct vev *e) assert(e->__binheap_idx == VBH_NOIDX); evb->lpfd--; + if (e->fd_events) { + assert(evb->n_fd_events > 0); + evb->n_fd_events--; + e->fd_events = 0; + } + if (e->sig > 0) { assert(e->sig < vev_nsig); es = &vev_sigs[e->sig]; @@ -409,6 +416,7 @@ VEV_Once(struct vev_root *evb) struct vev *e; int i, k, tmo, retval = 1; unsigned u; + int progress; CHECK_OBJ_NOTNULL(evb, VEV_BASE_MAGIC); assert(pthread_equal(evb->thread, pthread_self())); @@ -447,16 +455,18 @@ VEV_Once(struct vev_root *evb) return (vev_sched_timeout(evb, e, t)); } - k = 0; + AZ(evb->n_fd_events); for (u = 1; u < evb->lpfd; u++) { + AZ(evb->pev[u]->fd_events); evb->pev[u]->fd_events = evb->pfd[u].revents; if (evb->pev[u]->fd_events) - k++; + evb->n_fd_events++; } - assert(k == i); + assert(evb->n_fd_events == i); DBG(evb, "EVENTS %d\n", i); - while (i > 0) { + while (evb->n_fd_events > 0) { + progress = 0; for (u = VBH_NOIDX + 1; u < evb->lpfd; u++) { e = evb->pev[u]; if (e->fd_events == 0) @@ -465,7 +475,9 @@ VEV_Once(struct vev_root *evb) e, u, e->fd, e->fd_events, i); k = e->callback(e, e->fd_events); e->fd_events = 0; - i--; + assert(evb->n_fd_events > 0); + evb->n_fd_events--; + progress++; if (k) { VEV_Stop(evb, e); free(e); @@ -473,7 +485,8 @@ VEV_Once(struct vev_root *evb) if (k < 0) retval = k; } + assert(progress > 0); } - AZ(i); + return (retval); } From dridi.boukelmoune at gmail.com Wed Jul 3 09:33:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:33:05 +0000 (UTC) Subject: [master] 99770c310 varnishtest: added flag indicating if varnish output logger is running Message-ID: <20240703093305.4A1B711C793@lists.varnish-cache.org> commit 99770c310d12270e0f554c0648cf220a21f79704 Author: Stephane Cance Date: Thu May 2 13:20:24 2024 +0200 varnishtest: added flag indicating if varnish output logger is running diff --git a/bin/varnishtest/vtc_varnish.c b/bin/varnishtest/vtc_varnish.c index 6f8223859..0564ec61a 100644 --- a/bin/varnishtest/vtc_varnish.c +++ b/bin/varnishtest/vtc_varnish.c @@ -71,6 +71,7 @@ struct varnish { pthread_t tp; pthread_t tp_vsl; + int tp_started; int expect_exit; @@ -473,6 +474,8 @@ varnish_launch(struct varnish *v) v->fds[0] = v->fds[2]; v->fds[2] = v->fds[3] = -1; VSB_destroy(&vsb); + AZ(v->tp_started); + v->tp_started = 1; PTOK(pthread_create(&v->tp, NULL, varnish_thread, v)); /* Wait for the varnish to call home */ @@ -682,8 +685,10 @@ varnish_cleanup(struct varnish *v) closefd(&v->fds[1]); /* Wait until STDOUT+STDERR closes */ + AN(v->tp_started); PTOK(pthread_join(v->tp, &p)); closefd(&v->fds[0]); + v->tp_started = 0; /* Pick up the VSL thread */ PTOK(pthread_join(v->tp_vsl, &p)); From dridi.boukelmoune at gmail.com Wed Jul 3 09:33:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:33:05 +0000 (UTC) Subject: [master] ce3e446f4 varnishtest: make varnish fatal error wait for output Message-ID: <20240703093305.66C3211C797@lists.varnish-cache.org> commit ce3e446f4e088059ee6e532758da3f6e10501b3d Author: Stephane Cance Date: Thu May 2 13:33:37 2024 +0200 varnishtest: make varnish fatal error wait for output diff --git a/bin/varnishtest/vtc_varnish.c b/bin/varnishtest/vtc_varnish.c index 0564ec61a..d72242196 100644 --- a/bin/varnishtest/vtc_varnish.c +++ b/bin/varnishtest/vtc_varnish.c @@ -97,6 +97,45 @@ struct varnish { static VTAILQ_HEAD(, varnish) varnishes = VTAILQ_HEAD_INITIALIZER(varnishes); +/********************************************************************** + * Fatal condition cleanup + * Invalid to call in any code path not followed by vtc_fatal(). + */ + +static void +varnish_fatal_cleanup(const struct varnish *v) +{ + struct pollfd fd[1]; + int n; + + if (!pthread_equal(pthread_self(), vtc_thread)) + return; + + if (!v->tp_started) + return; + + memset(fd, 0, sizeof(fd)); + fd[0].fd = v->fds[0]; + fd[0].events = POLLIN; + + do { + n = poll(fd, sizeof(fd)/sizeof(fd[0]), 10); + if (n == 1 && (fd[0].revents & (POLLHUP|POLLERR)) != 0) { + PTOK(pthread_join(v->tp, NULL)); + break; + } + + if (n == 1) + usleep(10000); + } while (n > 0); +} + +#define varnish_fatal(v, ...) \ + do { \ + varnish_fatal_cleanup((v)); \ + vtc_fatal((v)->vl, __VA_ARGS__); \ + } while (0) + /********************************************************************** * Ask a question over CLI */ @@ -112,16 +151,16 @@ varnish_ask_cli(const struct varnish *v, const char *cmd, char **repl) vtc_dump(v->vl, 4, "CLI TX", cmd, -1); i = write(v->cli_fd, cmd, strlen(cmd)); if (i != strlen(cmd) && !vtc_stop) - vtc_fatal(v->vl, "CLI write failed (%s) = %u %s", + varnish_fatal(v, "CLI write failed (%s) = %u %s", cmd, errno, strerror(errno)); i = write(v->cli_fd, "\n", 1); if (i != 1 && !vtc_stop) - vtc_fatal(v->vl, "CLI write failed (%s) = %u %s", + varnish_fatal(v, "CLI write failed (%s) = %u %s", cmd, errno, strerror(errno)); } i = VCLI_ReadResult(v->cli_fd, &retval, &r, vtc_maxdur); if (i != 0 && !vtc_stop) - vtc_fatal(v->vl, "CLI failed (%s) = %d %u %s", + varnish_fatal(v, "CLI failed (%s) = %d %u %s", cmd != NULL ? cmd : "NULL", i, retval, r); vtc_log(v->vl, 3, "CLI RX %u", retval); vtc_dump(v->vl, 4, "CLI RX", r, -1); @@ -146,7 +185,7 @@ wait_stopped(const struct varnish *v) while (1) { st = varnish_ask_cli(v, "status", &r); if (st != CLIS_OK) - vtc_fatal(v->vl, + varnish_fatal(v, "CLI status command failed: %u %s", st, r); if (!strcmp(r, "Child in state stopped")) { free(r); @@ -171,17 +210,17 @@ wait_running(const struct varnish *v) vtc_log(v->vl, 3, "wait-running"); st = varnish_ask_cli(v, "status", &r); if (st != CLIS_OK) - vtc_fatal(v->vl, + varnish_fatal(v, "CLI status command failed: %u %s", st, r); if (!strcmp(r, "Child in state stopped")) - vtc_fatal(v->vl, + varnish_fatal(v, "Child stopped before running: %u %s", st, r); if (!strcmp(r, "Child in state running")) { free(r); r = NULL; st = varnish_ask_cli(v, "debug.listen_address", &r); if (st != CLIS_OK) - vtc_fatal(v->vl, + varnish_fatal(v, "CLI status command failed: %u %s", st, r); free(r); break; @@ -399,7 +438,7 @@ varnish_launch(struct varnish *v) /* Create listener socket */ asock = VTCP_listen_on(default_listen_addr, NULL, 1, &err); if (err != NULL) - vtc_fatal(v->vl, "Create CLI listen socket failed: %s", err); + varnish_fatal(v, "Create CLI listen socket failed: %s", err); assert(asock > 0); VTCP_myname(asock, abuf, sizeof abuf, pbuf, sizeof pbuf); @@ -490,15 +529,15 @@ varnish_launch(struct varnish *v) vtc_log(v->vl, 4, "CLIPOLL %d 0x%x 0x%x 0x%x", i, fd[0].revents, fd[1].revents, fd[2].revents); if (i == 0) - vtc_fatal(v->vl, "FAIL timeout waiting for CLI connection"); + varnish_fatal(v, "FAIL timeout waiting for CLI connection"); if (fd[1].revents & POLLHUP) - vtc_fatal(v->vl, "FAIL debug pipe closed"); + varnish_fatal(v, "FAIL debug pipe closed"); if (!(fd[0].revents & POLLIN)) - vtc_fatal(v->vl, "FAIL CLI connection wait failure"); + varnish_fatal(v, "FAIL CLI connection wait failure"); nfd = accept(asock, NULL, NULL); closefd(&asock); if (nfd < 0) - vtc_fatal(v->vl, "FAIL no CLI connection accepted"); + varnish_fatal(v, "FAIL no CLI connection accepted"); v->cli_fd = nfd; @@ -510,7 +549,7 @@ varnish_launch(struct varnish *v) if (vtc_error) return; if (u != CLIS_AUTH) - vtc_fatal(v->vl, "CLI auth demand expected: %u %s", u, r); + varnish_fatal(v, "CLI auth demand expected: %u %s", u, r); bprintf(lbuf, "%s/_.secret", v->workdir); nfd = open(lbuf, O_RDONLY); @@ -528,7 +567,7 @@ varnish_launch(struct varnish *v) if (vtc_error) return; if (u != CLIS_OK) - vtc_fatal(v->vl, "CLI auth command failed: %u %s", u, r); + varnish_fatal(v, "CLI auth command failed: %u %s", u, r); free(r); v->vsm_vsc = VSM_New(); @@ -630,7 +669,7 @@ varnish_start(struct varnish *v) if (vtc_error) return; if (u != CLIS_OK) - vtc_fatal(v->vl, "CLI start command failed: %u %s", u, resp); + varnish_fatal(v, "CLI start command failed: %u %s", u, resp); wait_running(v); free(resp); resp = NULL; @@ -638,7 +677,7 @@ varnish_start(struct varnish *v) if (vtc_error) return; if (u != CLIS_OK) - vtc_fatal(v->vl, "CLI debug.xid command failed: %u %s", + varnish_fatal(v, "CLI debug.xid command failed: %u %s", u, resp); free(resp); resp = NULL; @@ -646,7 +685,7 @@ varnish_start(struct varnish *v) if (vtc_error) return; if (u != CLIS_OK) - vtc_fatal(v->vl, + varnish_fatal(v, "CLI debug.listen_address command failed: %u %s", u, resp); varnish_listen(v, resp); free(resp); @@ -718,7 +757,7 @@ varnish_wait(struct varnish *v) varnish_stop(v); if (varnish_ask_cli(v, "panic.show", NULL) != CLIS_CANT) - vtc_fatal(v->vl, "Unexpected panic"); + varnish_fatal(v, "Unexpected panic"); varnish_cleanup(v); } @@ -740,11 +779,11 @@ varnish_cli_json(struct varnish *v, const char *cli) u = varnish_ask_cli(v, cli, &resp); vtc_log(v->vl, 2, "CLI %03u <%s>", u, cli); if (u != CLIS_OK) - vtc_fatal(v->vl, + varnish_fatal(v, "FAIL CLI response %u expected %u", u, CLIS_OK); vj = vjsn_parse(resp, &errptr); if (vj == NULL) - vtc_fatal(v->vl, "FAIL CLI, not good JSON: %s", errptr); + varnish_fatal(v, "FAIL CLI, not good JSON: %s", errptr); vjsn_delete(&vj); free(resp); } @@ -770,14 +809,14 @@ varnish_cli(struct varnish *v, const char *cli, unsigned exp, const char *re) AZ(VRE_error(vsb, err)); AZ(VSB_finish(vsb)); VSB_fini(vsb); - vtc_fatal(v->vl, "Illegal regexp: %s (@%d)", + varnish_fatal(v, "Illegal regexp: %s (@%d)", errbuf, erroff); } } u = varnish_ask_cli(v, cli, &resp); vtc_log(v->vl, 2, "CLI %03u <%s>", u, cli); if (exp != 0 && exp != (unsigned)u) - vtc_fatal(v->vl, "FAIL CLI response %u expected %u", u, exp); + varnish_fatal(v, "FAIL CLI response %u expected %u", u, exp); if (vre != NULL) { err = VRE_match(vre, resp, 0, 0, NULL); if (err < 1) { @@ -785,7 +824,7 @@ varnish_cli(struct varnish *v, const char *cli, unsigned exp, const char *re) AZ(VRE_error(vsb, err)); AZ(VSB_finish(vsb)); VSB_fini(vsb); - vtc_fatal(v->vl, "Expect failed (%s)", errbuf); + varnish_fatal(v, "Expect failed (%s)", errbuf); } VRE_free(&vre); } @@ -819,10 +858,10 @@ varnish_vcl(struct varnish *v, const char *vcl, int fail, char **resp) } if (u == CLIS_OK && fail) { VSB_destroy(&vsb); - vtc_fatal(v->vl, "VCL compilation succeeded expected failure"); + varnish_fatal(v, "VCL compilation succeeded expected failure"); } else if (u != CLIS_OK && !fail) { VSB_destroy(&vsb); - vtc_fatal(v->vl, "VCL compilation failed expected success"); + varnish_fatal(v, "VCL compilation failed expected success"); } else if (fail) vtc_log(v->vl, 2, "VCL compilation failed (as expected)"); VSB_destroy(&vsb); @@ -859,7 +898,7 @@ varnish_vclbackend(struct varnish *v, const char *vcl) if (u != CLIS_OK) { VSB_destroy(&vsb); VSB_destroy(&vsb2); - vtc_fatal(v->vl, "FAIL VCL does not compile"); + varnish_fatal(v, "FAIL VCL does not compile"); } VSB_clear(vsb); VSB_printf(vsb, "vcl.use vcl%d", v->vcl_nbr); @@ -1011,7 +1050,7 @@ varnish_expect(struct varnish *v, char * const *av) continue; if (not) - vtc_fatal(v->vl, "Found (not expected): %s", l); + varnish_fatal(v, "Found (not expected): %s", l); good = -1; if (!strcmp(av[1], "==")) good = (sp.lhs.val == sp.rhs.val); @@ -1021,19 +1060,19 @@ varnish_expect(struct varnish *v, char * const *av) if (!strcmp(av[1], ">=")) good = (sp.lhs.val >= sp.rhs.val); if (!strcmp(av[1], "<=")) good = (sp.lhs.val <= sp.rhs.val); if (good == -1) - vtc_fatal(v->vl, "comparison %s unknown", av[1]); + varnish_fatal(v, "comparison %s unknown", av[1]); if (good) break; } if (good == -1) { - vtc_fatal(v->vl, "VSM error: %s", VSM_Error(v->vsm_vsc)); + varnish_fatal(v, "VSM error: %s", VSM_Error(v->vsm_vsc)); } if (good == -2) { if (not) { vtc_log(v->vl, 2, "not found (as expected): %s", l); return; } - vtc_fatal(v->vl, "stats field %s unknown", + varnish_fatal(v, "stats field %s unknown", sp.lhs.good ? sp.rhs.pattern : sp.lhs.pattern); } @@ -1041,7 +1080,7 @@ varnish_expect(struct varnish *v, char * const *av) vtc_log(v->vl, 2, "as expected: %s (%ju) %s %s (%ju)", av[0], sp.lhs.val, av[1], av[2], sp.rhs.val); } else { - vtc_fatal(v->vl, "Not true: %s (%ju) %s %s (%ju)", + varnish_fatal(v, "Not true: %s (%ju) %s %s (%ju)", av[0], sp.lhs.val, av[1], av[2], sp.rhs.val); } } @@ -1272,7 +1311,7 @@ cmd_varnish(CMD_ARGS) AN(av[2]); varnish_vcl(v, av[2], 1, &r); if (strstr(r, av[1]) == NULL) - vtc_fatal(v->vl, + varnish_fatal(v, "Did not find expected string: (\"%s\")", av[1]); else @@ -1356,7 +1395,7 @@ cmd_varnish(CMD_ARGS) vsl_catchup(v); continue; } - vtc_fatal(v->vl, "Unknown varnish argument: %s", *av); + varnish_fatal(v, "Unknown varnish argument: %s", *av); } } From dridi.boukelmoune at gmail.com Wed Jul 3 09:39:04 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:39:04 +0000 (UTC) Subject: [master] 40bd73b8f fetch: Apply the debug::flush_head flag to bereq Message-ID: <20240703093904.E9A1711CD6C@lists.varnish-cache.org> commit 40bd73b8fc08ae5d0880b1693da27e46f083e248 Author: Dridi Boukelmoune Date: Fri Apr 8 19:15:50 2022 +0200 fetch: Apply the debug::flush_head flag to bereq diff --git a/bin/varnishd/http1/cache_http1_fetch.c b/bin/varnishd/http1/cache_http1_fetch.c index 1a2ced1e8..5ea77b936 100644 --- a/bin/varnishd/http1/cache_http1_fetch.c +++ b/bin/varnishd/http1/cache_http1_fetch.c @@ -111,6 +111,8 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes, bo, vbf_iter_req_body, 0); } else if (bo->req != NULL && bo->req->req_body_status != BS_NONE) { + if (DO_DEBUG(DBG_FLUSH_HEAD)) + (void)V1L_Flush(wrk); if (do_chunked) V1L_Chunked(wrk); i = VRB_Iterate(wrk, bo->vsl, bo->req, vbf_iter_req_body, bo); From dridi.boukelmoune at gmail.com Wed Jul 3 09:39:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:39:05 +0000 (UTC) Subject: [master] dd5b7056a fetch: New debug::slow_bereq flag Message-ID: <20240703093905.09CC611CD6F@lists.varnish-cache.org> commit dd5b7056a647f53708acd02d33ff03f3bb85d305 Author: Dridi Boukelmoune Date: Fri Apr 8 19:29:37 2022 +0200 fetch: New debug::slow_bereq flag It is otherwise very challenging to coordinate certain behaviors between a backend fetch and a VTC server for example. The slow_acceptor delay is 2s, which is probably more than needed for a bereq, hence the 1s delay. diff --git a/bin/varnishd/http1/cache_http1_fetch.c b/bin/varnishd/http1/cache_http1_fetch.c index 5ea77b936..4ea37e1d3 100644 --- a/bin/varnishd/http1/cache_http1_fetch.c +++ b/bin/varnishd/http1/cache_http1_fetch.c @@ -54,6 +54,8 @@ vbf_iter_req_body(void *priv, unsigned flush, const void *ptr, ssize_t l) CAST_OBJ_NOTNULL(bo, priv, BUSYOBJ_MAGIC); if (l > 0) { + if (DO_DEBUG(DBG_SLOW_BEREQ)) + VTIM_sleep(1.0); (void)V1L_Write(bo->wrk, ptr, l); if (flush && V1L_Flush(bo->wrk) != SC_NULL) return (-1); diff --git a/include/tbl/debug_bits.h b/include/tbl/debug_bits.h index 0e60d2547..869399a6f 100644 --- a/include/tbl/debug_bits.h +++ b/include/tbl/debug_bits.h @@ -52,6 +52,7 @@ DEBUG_BIT(PROCESSORS, processors, "Fetch/Deliver processors") DEBUG_BIT(PROTOCOL, protocol, "Protocol debugging") DEBUG_BIT(VCL_KEEP, vcl_keep, "Keep VCL C and so files") DEBUG_BIT(LCK, lck, "Additional lock statistics") +DEBUG_BIT(SLOW_BEREQ, slow_bereq, "Slow down bereq") #undef DEBUG_BIT /*lint -restore */ From dridi.boukelmoune at gmail.com Wed Jul 3 09:39:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:39:05 +0000 (UTC) Subject: [master] f4b045031 fetch: A beresp body cannot be BS_TAKEN Message-ID: <20240703093905.2321411CD72@lists.varnish-cache.org> commit f4b045031cbd343fabca4e7d8ef0a7e14c4dbb62 Author: Dridi Boukelmoune Date: Mon Jun 24 17:24:30 2024 +0200 fetch: A beresp body cannot be BS_TAKEN This state is only used to determine whether a req body can be reused for a VCL retry. diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c index 0f86d0305..b78566a69 100644 --- a/bin/varnishd/cache/cache_fetch.c +++ b/bin/varnishd/cache/cache_fetch.c @@ -141,10 +141,11 @@ Bereq_Rollback(VRT_CTX) bo = ctx->bo; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); - if (bo->htc != NULL && - bo->htc->body_status != BS_NONE && - bo->htc->body_status != BS_TAKEN) - bo->htc->doclose = SC_RESP_CLOSE; + if (bo->htc != NULL) { + assert(bo->htc->body_status != BS_TAKEN); + if (bo->htc->body_status != BS_NONE) + bo->htc->doclose = SC_RESP_CLOSE; + } vbf_cleanup(bo); VCL_TaskLeave(ctx, bo->privs); From dridi.boukelmoune at gmail.com Wed Jul 3 09:39:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:39:05 +0000 (UTC) Subject: [master] dbd9bd12d vrb: Don't cache an empty body Message-ID: <20240703093905.3EABC11CD77@lists.varnish-cache.org> commit dbd9bd12d7e15bd364d23c0f32b1190154c7cc3f Author: Dridi Boukelmoune Date: Fri Apr 8 19:40:55 2022 +0200 vrb: Don't cache an empty body It's either guaranteed to fail a fetch or cache nothing, in the sense of accessing to the request body through a VMOD. It is still possible to end up with an empty body cached, if req.body turned out to be empty and couldn't be predicted. It is no longer possible to actively try caching an empty one. diff --git a/bin/varnishd/cache/cache_req_body.c b/bin/varnishd/cache/cache_req_body.c index 51d45471e..f2c894cc0 100644 --- a/bin/varnishd/cache/cache_req_body.c +++ b/bin/varnishd/cache/cache_req_body.c @@ -66,6 +66,7 @@ vrb_pull(struct req *req, ssize_t maxsize, objiterate_f *func, void *priv) CHECK_OBJ_NOTNULL(req->htc, HTTP_CONN_MAGIC); CHECK_OBJ_NOTNULL(req->vfc, VFP_CTX_MAGIC); vfc = req->vfc; + AN(maxsize); req->body_oc = HSH_Private(req->wrk); AN(req->body_oc); @@ -300,7 +301,11 @@ VRB_Cache(struct req *req, ssize_t maxsize) CHECK_OBJ_NOTNULL(req, REQ_MAGIC); assert (req->req_step == R_STP_RECV); - assert(maxsize >= 0); + + if (maxsize <= 0) { + VSLb(req->vsl, SLT_VCL_Error, "Cannot cache an empty req.body"); + return (-1); + } /* * We only allow caching to happen the first time through vcl_recv{} diff --git a/bin/varnishtest/tests/c00055.vtc b/bin/varnishtest/tests/c00055.vtc index 048421967..1656baa61 100644 --- a/bin/varnishtest/tests/c00055.vtc +++ b/bin/varnishtest/tests/c00055.vtc @@ -17,7 +17,8 @@ varnish v1 -vcl+backend { import std; sub vcl_recv { - set req.http.stored = std.cache_req_body(1KB); + set req.http.stored = std.cache_req_body( + std.bytes(req.http.cache, 1KB)); return (pass); } @@ -74,3 +75,45 @@ client c5 { txreq -req POST -hdr "Content-Length: 1025" expect_close } -run + +server s1 { + rxreq + expect req.body == chunked_body + txresp +} -start + +client c6 { + txreq -req POST -nolen -hdr "Transfer-Encoding: chunked" + chunked chunked + chunked _ + chunked body + chunkedlen 0 + rxresp + expect resp.http.stored == true +} -run + +server s1 { + rxreq + expect req.bodylen == 0 + txresp +} -start + +client c7 { + txreq -req POST -nolen -hdr "Transfer-Encoding: chunked" + chunkedlen 0 + rxresp + expect resp.http.stored == true +} -run + +server s1 { + rxreq + expect req.bodylen == 0 + txresp +} -start + +client c8 { + txreq -req POST -nolen -hdr "cache: 0B" -hdr "Transfer-Encoding: chunked" + chunkedlen 0 + rxresp + expect resp.http.stored == false +} -run From dridi.boukelmoune at gmail.com Wed Jul 3 09:39:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:39:05 +0000 (UTC) Subject: [master] 58ed5c9f6 vrb: Turn BS_CACHED into a request flag Message-ID: <20240703093905.5C35211CD7C@lists.varnish-cache.org> commit 58ed5c9f617d0c11b4cd96f9278e6b5babd833ec Author: Dridi Boukelmoune Date: Tue Apr 12 07:01:28 2022 +0200 vrb: Turn BS_CACHED into a request flag This way we can keep track of how we got the body (until it is taken by a busyobj) separately from the fact that we cached it. diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c index b78566a69..05dc40ce9 100644 --- a/bin/varnishd/cache/cache_fetch.c +++ b/bin/varnishd/cache/cache_fetch.c @@ -289,15 +289,15 @@ vbf_stp_mkbereq(struct worker *wrk, struct busyobj *bo) bo->ws_bo = WS_Snapshot(bo->ws); HTTP_Clone(bo->bereq, bo->bereq0); - if (bo->req->req_body_status->avail == 0) { - bo->req = NULL; - ObjSetState(bo->wrk, oc, BOS_REQ_DONE); - } else if (bo->req->req_body_status == BS_CACHED) { + if (bo->req->req_body_cached) { AN(bo->req->body_oc); bo->bereq_body = bo->req->body_oc; HSH_Ref(bo->bereq_body); bo->req = NULL; ObjSetState(bo->wrk, oc, BOS_REQ_DONE); + } else if (bo->req->req_body_status->avail == 0) { + bo->req = NULL; + ObjSetState(bo->wrk, oc, BOS_REQ_DONE); } return (F_STP_STARTFETCH); } diff --git a/bin/varnishd/cache/cache_req_body.c b/bin/varnishd/cache/cache_req_body.c index f2c894cc0..fa4b4062b 100644 --- a/bin/varnishd/cache/cache_req_body.c +++ b/bin/varnishd/cache/cache_req_body.c @@ -177,7 +177,7 @@ vrb_pull(struct req *req, ssize_t maxsize, objiterate_f *func, void *priv) (uintmax_t)req_bodybytes); } - req->req_body_status = BS_CACHED; + req->req_body_cached = 1; return (req_bodybytes); } @@ -199,7 +199,7 @@ VRB_Iterate(struct worker *wrk, struct vsl_log *vsl, CHECK_OBJ_NOTNULL(req, REQ_MAGIC); AN(func); - if (req->req_body_status == BS_CACHED) { + if (req->req_body_cached) { AN(req->body_oc); if (ObjIterate(wrk, req->body_oc, priv, func, 0)) return (-1); @@ -277,10 +277,13 @@ VRB_Free(struct req *req) CHECK_OBJ_NOTNULL(req, REQ_MAGIC); - if (req->body_oc == NULL) + if (req->body_oc == NULL) { + AZ(req->req_body_cached); return; + } r = HSH_DerefObjCore(req->wrk, &req->body_oc, 0); + req->req_body_cached = 0; // each busyobj may have gained a reference assert (r >= 0); @@ -312,13 +315,13 @@ VRB_Cache(struct req *req, ssize_t maxsize) * where we know we will have no competition or conflicts for the * updates to req.http.* etc. */ - if (req->restarts > 0 && req->req_body_status != BS_CACHED) { + if (req->restarts > 0 && !req->req_body_cached) { VSLb(req->vsl, SLT_VCL_Error, "req.body must be cached before restarts"); return (-1); } - if (req->req_body_status == BS_CACHED) { + if (req->req_body_cached) { AZ(ObjGetU64(req->wrk, req->body_oc, OA_LEN, &u)); return (u); } diff --git a/bin/varnishd/cache/cache_req_fsm.c b/bin/varnishd/cache/cache_req_fsm.c index ed97a0110..31929e123 100644 --- a/bin/varnishd/cache/cache_req_fsm.c +++ b/bin/varnishd/cache/cache_req_fsm.c @@ -813,7 +813,7 @@ cnt_pipe(struct worker *wrk, struct req *req) bo->req = req; bo->wrk = wrk; /* Unless cached, reqbody is not our job */ - if (req->req_body_status != BS_CACHED) + if (!req->req_body_cached) req->req_body_status = BS_NONE; SES_Close(req->sp, VDI_Http1Pipe(req, bo)); nxt = REQ_FSM_DONE; @@ -900,6 +900,7 @@ cnt_recv_prep(struct req *req, const char *ci) req->client_identity = NULL; req->storage = NULL; req->trace = FEATURE(FEATURE_TRACE); + AZ(req->req_body_cached); } req->is_hit = 0; diff --git a/bin/varnishd/http1/cache_http1_fetch.c b/bin/varnishd/http1/cache_http1_fetch.c index 4ea37e1d3..b1918253d 100644 --- a/bin/varnishd/http1/cache_http1_fetch.c +++ b/bin/varnishd/http1/cache_http1_fetch.c @@ -119,7 +119,7 @@ V1F_SendReq(struct worker *wrk, struct busyobj *bo, uint64_t *ctr_hdrbytes, V1L_Chunked(wrk); i = VRB_Iterate(wrk, bo->vsl, bo->req, vbf_iter_req_body, bo); - if (bo->req->req_body_status != BS_CACHED) + if (!bo->req->req_body_cached) bo->no_retry = "req.body not cached"; if (bo->req->req_body_status == BS_ERROR) { diff --git a/include/tbl/body_status.h b/include/tbl/body_status.h index 108345c6c..39c45b4fb 100644 --- a/include/tbl/body_status.h +++ b/include/tbl/body_status.h @@ -39,7 +39,6 @@ BODYSTATUS(CHUNKED, chunked, 2, 1, 0) BODYSTATUS(LENGTH, length, 3, 1, 1) BODYSTATUS(EOF, eof, 4, 1, 0) BODYSTATUS(TAKEN, taken, 5, 0, 0) -BODYSTATUS(CACHED, cached, 6, 2, 1) #undef BODYSTATUS /*lint -restore */ diff --git a/include/tbl/req_flags.h b/include/tbl/req_flags.h index 6a9e6acbb..282bd92ea 100644 --- a/include/tbl/req_flags.h +++ b/include/tbl/req_flags.h @@ -41,6 +41,7 @@ REQ_FLAG(waitinglist, 0, 0, "") REQ_FLAG(want100cont, 0, 0, "") REQ_FLAG(late100cont, 0, 0, "") REQ_FLAG(req_reset, 0, 0, "") +REQ_FLAG(req_body_cached, 0, 0, "") #define REQ_BEREQ_FLAG(lower, vcl_r, vcl_w, doc) \ REQ_FLAG(lower, vcl_r, vcl_w, doc) #include "tbl/req_bereq_flags.h" From dridi.boukelmoune at gmail.com Wed Jul 3 09:39:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 09:39:05 +0000 (UTC) Subject: [master] f0e9df843 vrb: Turn BS_TAKEN into a request flag Message-ID: <20240703093905.76D7511CD81@lists.varnish-cache.org> commit f0e9df843d28bb55e9acea9b73ae4ccc658d033f Author: Dridi Boukelmoune Date: Mon Jun 24 17:53:24 2024 +0200 vrb: Turn BS_TAKEN into a request flag This body status would conflict with a BS_TRAILERS body status in the h2 case where the body is received and sent asynchronously by two different workers. diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c index 05dc40ce9..914ce8010 100644 --- a/bin/varnishd/cache/cache_fetch.c +++ b/bin/varnishd/cache/cache_fetch.c @@ -141,11 +141,8 @@ Bereq_Rollback(VRT_CTX) bo = ctx->bo; CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); - if (bo->htc != NULL) { - assert(bo->htc->body_status != BS_TAKEN); - if (bo->htc->body_status != BS_NONE) - bo->htc->doclose = SC_RESP_CLOSE; - } + if (bo->htc != NULL && bo->htc->body_status != BS_NONE) + bo->htc->doclose = SC_RESP_CLOSE; vbf_cleanup(bo); VCL_TaskLeave(ctx, bo->privs); diff --git a/bin/varnishd/cache/cache_req_body.c b/bin/varnishd/cache/cache_req_body.c index fa4b4062b..49179a9d9 100644 --- a/bin/varnishd/cache/cache_req_body.c +++ b/bin/varnishd/cache/cache_req_body.c @@ -207,7 +207,7 @@ VRB_Iterate(struct worker *wrk, struct vsl_log *vsl, } if (req->req_body_status == BS_NONE) return (0); - if (req->req_body_status == BS_TAKEN) { + if (req->req_body_taken) { VSLb(vsl, SLT_VCL_Error, "Uncached req.body can only be consumed once."); return (-1); @@ -219,7 +219,7 @@ VRB_Iterate(struct worker *wrk, struct vsl_log *vsl, } Lck_Lock(&req->sp->mtx); if (req->req_body_status->avail > 0) { - req->req_body_status = BS_TAKEN; + req->req_body_taken = 1; i = 0; } else i = -1; @@ -277,6 +277,7 @@ VRB_Free(struct req *req) CHECK_OBJ_NOTNULL(req, REQ_MAGIC); + req->req_body_taken = 0; if (req->body_oc == NULL) { AZ(req->req_body_cached); return; diff --git a/include/tbl/body_status.h b/include/tbl/body_status.h index 39c45b4fb..e9464381f 100644 --- a/include/tbl/body_status.h +++ b/include/tbl/body_status.h @@ -38,7 +38,6 @@ BODYSTATUS(ERROR, error, 1, -1, 0) BODYSTATUS(CHUNKED, chunked, 2, 1, 0) BODYSTATUS(LENGTH, length, 3, 1, 1) BODYSTATUS(EOF, eof, 4, 1, 0) -BODYSTATUS(TAKEN, taken, 5, 0, 0) #undef BODYSTATUS /*lint -restore */ diff --git a/include/tbl/req_flags.h b/include/tbl/req_flags.h index 282bd92ea..cee216749 100644 --- a/include/tbl/req_flags.h +++ b/include/tbl/req_flags.h @@ -42,6 +42,7 @@ REQ_FLAG(want100cont, 0, 0, "") REQ_FLAG(late100cont, 0, 0, "") REQ_FLAG(req_reset, 0, 0, "") REQ_FLAG(req_body_cached, 0, 0, "") +REQ_FLAG(req_body_taken, 0, 0, "") #define REQ_BEREQ_FLAG(lower, vcl_r, vcl_w, doc) \ REQ_FLAG(lower, vcl_r, vcl_w, doc) #include "tbl/req_bereq_flags.h" From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:05 +0000 (UTC) Subject: [master] c79b4c116 vsc: Revamp vsc.c and VSC_Iter() Message-ID: <20240703100405.8984E11E14A@lists.varnish-cache.org> commit c79b4c11637f8332440b482e59b3746db158da7b Author: Martin Blix Grydeland Date: Mon Jun 3 15:10:31 2024 +0200 vsc: Revamp vsc.c and VSC_Iter() This patch polishes and fixes up the VSC parts of libvarnishapi. Main points are: 1) The merging of the VSM list and the VSC segment list is done in a separate loop from where the callbacks are executed. This prevents a VSM_Status() being executed in a callback from interfering with the update of the VSC segment list to match the VSM list. 2) Failures to map counters does not remove the counter from the VSC seg list. Previously we would remove VSC seg list elements on map failures, but doing so would then cause issues when attempting to consolidate the VSM list and the VSC seg list on the next call to VSC_Iter(). With this patch, map failures are recorded as a state without removing the VSC seg list, and map failures will be retried on subsequenct VSC_Iter() calls. 3) Handle failures to map the corresponding DOC seg during mapping of a counter seg gracefully. Previously this would cause an assert. Now it will cause a map failure of the counter seg. diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index 7bda424ce..26335a715 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -79,18 +79,26 @@ struct vsc_pt { char *name; }; +enum vsc_seg_type { + VSC_SEG_COUNTERS = 1, + VSC_SEG_DOCS, +}; + struct vsc_seg { unsigned magic; #define VSC_SEG_MAGIC 0x801177d4 + enum vsc_seg_type type; VTAILQ_ENTRY(vsc_seg) list; struct vsm_fantom fantom[1]; - struct vsc_head *head; - char *body; + const struct vsc_head *head; + const char *body; struct vjsn *vj; unsigned npoints; struct vsc_pt *points; + + int mapped; int exposed; }; @@ -307,36 +315,74 @@ vsc_fill_point(const struct vsc *vsc, const struct vsc_seg *seg, vt = vjsn_child(vv, "index"); AN(vt); - point->point.ptr = (volatile void*)(seg->body + atoi(vt->value)); + point->point.ptr = (volatile const void*)(seg->body + atoi(vt->value)); point->point.raw = vsc->raw; } static void -vsc_del_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg **spp) +vsc_del_seg(struct vsc_seg *sp) +{ + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); + AZ(sp->exposed); + AZ(sp->mapped); + FREE_OBJ(sp); +} + +static struct vsc_seg * +vsc_new_seg(const struct vsm_fantom *fp, enum vsc_seg_type type) +{ + struct vsc_seg *sp; + + ALLOC_OBJ(sp, VSC_SEG_MAGIC); + AN(sp); + *sp->fantom = *fp; + sp->type = type; + + return (sp); +} + +static void +vsc_unmap_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) { unsigned u; struct vsc_pt *pp; - struct vsc_seg *sp; CHECK_OBJ_NOTNULL(vsc, VSC_MAGIC); AN(vsm); - TAKE_OBJ_NOTNULL(sp, spp, VSC_SEG_MAGIC); - AZ(VSM_Unmap(vsm, sp->fantom)); - if (sp->vj != NULL) { - vjsn_delete(&sp->vj); - } else { + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); + + AZ(sp->exposed); + if (!sp->mapped) + return; + + if (sp->type == VSC_SEG_COUNTERS) { pp = sp->points; for (u = 0; u < sp->npoints; u++, pp++) vsc_clean_point(pp); free(sp->points); + sp->points = NULL; + sp->npoints = 0; + AZ(sp->vj); + } else if (sp->type == VSC_SEG_DOCS) { + if (sp->vj != NULL) + vjsn_delete(&sp->vj); + AZ(sp->vj); + AZ(sp->points); + } else { + WRONG("Invalid segment type"); } - FREE_OBJ(sp); + + AZ(VSM_Unmap(vsm, sp->fantom)); + sp->head = NULL; + sp->body = NULL; + sp->mapped = 0; } -static struct vsc_seg * -vsc_add_seg(const struct vsc *vsc, struct vsm *vsm, const struct vsm_fantom *fp) +static int +vsc_map_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) { - struct vsc_seg *sp, *spd; + const struct vsc_head *head; + struct vsc_seg *spd; const char *e; struct vjsn_val *vv, *vve; struct vsb *vsb; @@ -344,32 +390,56 @@ vsc_add_seg(const struct vsc *vsc, struct vsm *vsm, const struct vsm_fantom *fp) CHECK_OBJ_NOTNULL(vsc, VSC_MAGIC); AN(vsm); + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); - ALLOC_OBJ(sp, VSC_SEG_MAGIC); - AN(sp); - *sp->fantom = *fp; - if (VSM_Map(vsm, sp->fantom)) { - /* - * If the seg was removed between our call to VSM_Status() - * and now, we won't be able to map it. - */ - FREE_OBJ(sp); - return (NULL); - } - sp->head = sp->fantom->b; - if (sp->head->ready == 0) { - VRMB(); + if (sp->mapped) + return (0); + + AZ(sp->exposed); + + if (VSM_Map(vsm, sp->fantom)) + return (-1); + head = sp->fantom->b; + if (head->ready == 0) { + /* It isn't ready yet. Sleep and try again. If it still + * isn't ready, fail the mapping. The transitions inside + * varnishd that we are waiting for are just some memcpy() + * operations, so there is no reason to allow a long retry + * time. */ usleep(100000); + if (head->ready == 0) { + VSM_Unmap(vsm, sp->fantom); + return (-1); + } } - assert(sp->head->ready > 0); - sp->body = (char*)sp->fantom->b + sp->head->body_offset; - if (!strcmp(fp->category, VSC_CLASS)) { - VTAILQ_FOREACH(spd, &vsc->segs, list) + sp->head = head; + sp->body = (char*)sp->fantom->b + sp->head->body_offset; + sp->mapped = 1; + + if (sp->type == VSC_SEG_COUNTERS) { + /* Find the corresponding DOCS seg. We are not able to + * read and match on the doc_id until the DOCS section is + * mapped. Iterate over all the DOCS sections, attempt to + * map if needed, and then check the doc_id. */ + VTAILQ_FOREACH(spd, &vsc->segs, list) { + CHECK_OBJ_NOTNULL(spd, VSC_SEG_MAGIC); + if (spd->type != VSC_SEG_DOCS) + continue; + if (!spd->mapped && vsc_map_seg(vsc, vsm, spd)) + continue; /* Failed to map it */ + AN(spd->mapped); if (spd->head->doc_id == sp->head->doc_id) - break; - AN(spd); - // XXX: Refcount ? + break; /* We have a match */ + } + if (spd == NULL) { + /* Could not find the right DOCS seg. Leave this + * seg as unmapped. */ + vsc_unmap_seg(vsc, vsm, sp); + return (-1); + } + + /* Create the VSC points list */ vve = vjsn_child(spd->vj->value, "elements"); AN(vve); sp->npoints = strtoul(vve->value, NULL, 0); @@ -385,13 +455,16 @@ vsc_add_seg(const struct vsc *vsc, struct vsm *vsm, const struct vsm_fantom *fp) pp++; } VSB_destroy(&vsb); - return (sp); + } else if (sp->type == VSC_SEG_DOCS) { + /* Parse the DOCS json */ + sp->vj = vjsn_parse(sp->body, &e); + XXXAZ(e); + AN(sp->vj); + } else { + WRONG(""); } - assert(!strcmp(fp->category, VSC_DOC_CLASS)); - sp->vj = vjsn_parse(sp->body, &e); - XXXAZ(e); - AN(sp->vj); - return (sp); + + return (0); } /*-------------------------------------------------------------------- @@ -404,6 +477,11 @@ vsc_expose(const struct vsc *vsc, struct vsc_seg *sp, int del) unsigned u; int expose; + if (!sp->mapped) { + AZ(sp->exposed); + return; + } + if (vsc->fnew != NULL && !sp->exposed && !del && sp->head->ready == 1) expose = 1; @@ -450,52 +528,105 @@ vsc_iter_seg(const struct vsc *vsc, const struct vsc_seg *sp, int VSC_Iter(struct vsc *vsc, struct vsm *vsm, VSC_iter_f *fiter, void *priv) { + enum vsc_seg_type type; struct vsm_fantom ifantom; struct vsc_seg *sp, *sp2; + VTAILQ_HEAD(, vsc_seg) removed; int i = 0; CHECK_OBJ_NOTNULL(vsc, VSC_MAGIC); AN(vsm); + + /* First walk the VSM segment list and consolidate with the shadow + * VSC seg list. We avoid calling any of the callback functions + * while iterating the VSMs. This removes any headaches wrt to + * callbacks calling VSM_Status(). */ + VTAILQ_INIT(&removed); sp = VTAILQ_FIRST(&vsc->segs); VSM_FOREACH(&ifantom, vsm) { AN(ifantom.category); - if (strcmp(ifantom.category, VSC_CLASS) && - strcmp(ifantom.category, VSC_DOC_CLASS)) + if (!strcmp(ifantom.category, VSC_CLASS)) + type = VSC_SEG_COUNTERS; + else if (!strcmp(ifantom.category, VSC_DOC_CLASS)) + type = VSC_SEG_DOCS; + else { + /* Not one of the categories we care about */ continue; - while (sp != NULL && - (strcmp(ifantom.ident, sp->fantom->ident) || - VSM_StillValid(vsm, sp->fantom) != VSM_valid)) { + } + + while (sp != NULL) { + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); + if (VSM_StillValid(vsm, sp->fantom) == VSM_valid && + !strcmp(ifantom.ident, sp->fantom->ident)) { + /* sp matches the expected value */ + break; + } + + /* sp is no longer in the VSM list. Remove it from + * our list. */ sp2 = sp; sp = VTAILQ_NEXT(sp, list); VTAILQ_REMOVE(&vsc->segs, sp2, list); - vsc_expose(vsc, sp2, 1); - vsc_del_seg(vsc, vsm, &sp2); + VTAILQ_INSERT_TAIL(&removed, sp2, list); } + if (sp == NULL) { - sp = vsc_add_seg(vsc, vsm, &ifantom); - if (sp != NULL) { - VTAILQ_INSERT_TAIL(&vsc->segs, sp, list); - vsc_expose(vsc, sp, 0); - } - } else { - vsc_expose(vsc, sp, 0); - } - if (sp != NULL) { - if (fiter != NULL && sp->head->ready < 2) - i = vsc_iter_seg(vsc, sp, fiter, priv); - sp = VTAILQ_NEXT(sp, list); + /* New entries are always appended last in the VSM + * list. Since we have iterated past all the + * entries in our shadow list, the VSM entry is a + * new entry we have not seen before. */ + sp = vsc_new_seg(&ifantom, type); + AN(sp); + VTAILQ_INSERT_TAIL(&vsc->segs, sp, list); } - if (i) - break; + assert(sp->type == type); + sp = VTAILQ_NEXT(sp, list); } while (sp != NULL) { + /* Clean up the tail end of the shadow list. */ + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); sp2 = sp; sp = VTAILQ_NEXT(sp, list); + VTAILQ_REMOVE(&vsc->segs, sp2, list); - vsc_expose(vsc, sp2, 1); - vsc_del_seg(vsc, vsm, &sp2); + VTAILQ_INSERT_TAIL(&removed, sp2, list); + } + + /* Clean up any removed segs */ + while (!VTAILQ_EMPTY(&removed)) { + sp = VTAILQ_FIRST(&removed); + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); + VTAILQ_REMOVE(&removed, sp, list); + + vsc_expose(vsc, sp, 1); + vsc_unmap_seg(vsc, vsm, sp); + vsc_del_seg(sp); + } + + /* Iterate our shadow list, reporting on each pointer value */ + VTAILQ_FOREACH(sp, &vsc->segs, list) { + CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); + + if (sp->type != VSC_SEG_COUNTERS) + continue; + + /* Attempt to map the VSM. This is a noop if it was + * already mapped. If we fail we skip this seg on this + * call to VSC_Iter(), but will attempt again the next + * time VSC_Iter() is called. */ + if (vsc_map_seg(vsc, vsm, sp)) + continue; + + /* Expose the counters if necessary */ + vsc_expose(vsc, sp, 0); + + if (fiter != NULL && sp->head->ready == 1) + i = vsc_iter_seg(vsc, sp, fiter, priv); + if (i) + break; } + return (i); } @@ -562,8 +693,8 @@ VSC_Destroy(struct vsc **vscp, struct vsm *vsm) VTAILQ_FOREACH_SAFE(sp, &vsc->segs, list, sp2) { VTAILQ_REMOVE(&vsc->segs, sp, list); vsc_expose(vsc, sp, 1); - vsc_del_seg(vsc, vsm, &sp); + vsc_unmap_seg(vsc, vsm, sp); + vsc_del_seg(sp); } FREE_OBJ(vsc); } - From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:05 +0000 (UTC) Subject: [master] 733905cec vsc: More reactive readiness check after mapping Message-ID: <20240703100405.A193F11E14D@lists.varnish-cache.org> commit 733905ceca55cfc4905901cbbc80d8977e59efba Author: Dridi Boukelmoune Date: Mon Jun 3 22:07:14 2024 +0200 vsc: More reactive readiness check after mapping Better diff with the --ignore-all-space option. diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index 26335a715..6229b8d86 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -387,6 +387,7 @@ vsc_map_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) struct vjsn_val *vv, *vve; struct vsb *vsb; struct vsc_pt *pp; + int retry; CHECK_OBJ_NOTNULL(vsc, VSC_MAGIC); AN(vsm); @@ -400,17 +401,18 @@ vsc_map_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) if (VSM_Map(vsm, sp->fantom)) return (-1); head = sp->fantom->b; + + /* It isn't ready yet. Sleep and try again. If it still + * isn't ready, fail the mapping. The transitions inside + * varnishd that we are waiting for are just some memcpy() + * operations, so there is no reason to allow a long retry + * time. */ + for (retry = 10; retry > 0 && head->ready == 0; retry--) + usleep(10000); + if (head->ready == 0) { - /* It isn't ready yet. Sleep and try again. If it still - * isn't ready, fail the mapping. The transitions inside - * varnishd that we are waiting for are just some memcpy() - * operations, so there is no reason to allow a long retry - * time. */ - usleep(100000); - if (head->ready == 0) { - VSM_Unmap(vsm, sp->fantom); - return (-1); - } + VSM_Unmap(vsm, sp->fantom); + return (-1); } sp->head = head; From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:05 +0000 (UTC) Subject: [master] 9ab658796 vsc: Flatten vsc_map_seg() Message-ID: <20240703100405.BFB5811E151@lists.varnish-cache.org> commit 9ab658796fd1eeea1aff4e06a82201c35e31a53e Author: Dridi Boukelmoune Date: Mon Jun 3 22:12:56 2024 +0200 vsc: Flatten vsc_map_seg() Better diff with the --ignore-all-space option. diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index 6229b8d86..c6de69656 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -419,53 +419,53 @@ vsc_map_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) sp->body = (char*)sp->fantom->b + sp->head->body_offset; sp->mapped = 1; - if (sp->type == VSC_SEG_COUNTERS) { - /* Find the corresponding DOCS seg. We are not able to - * read and match on the doc_id until the DOCS section is - * mapped. Iterate over all the DOCS sections, attempt to - * map if needed, and then check the doc_id. */ - VTAILQ_FOREACH(spd, &vsc->segs, list) { - CHECK_OBJ_NOTNULL(spd, VSC_SEG_MAGIC); - if (spd->type != VSC_SEG_DOCS) - continue; - if (!spd->mapped && vsc_map_seg(vsc, vsm, spd)) - continue; /* Failed to map it */ - AN(spd->mapped); - if (spd->head->doc_id == sp->head->doc_id) - break; /* We have a match */ - } - if (spd == NULL) { - /* Could not find the right DOCS seg. Leave this - * seg as unmapped. */ - vsc_unmap_seg(vsc, vsm, sp); - return (-1); - } - - /* Create the VSC points list */ - vve = vjsn_child(spd->vj->value, "elements"); - AN(vve); - sp->npoints = strtoul(vve->value, NULL, 0); - sp->points = calloc(sp->npoints, sizeof *sp->points); - AN(sp->points); - vsb = VSB_new_auto(); - AN(vsb); - vve = vjsn_child(spd->vj->value, "elem"); - AN(vve); - pp = sp->points; - VTAILQ_FOREACH(vv, &vve->children, list) { - vsc_fill_point(vsc, sp, vv, vsb, pp); - pp++; - } - VSB_destroy(&vsb); - } else if (sp->type == VSC_SEG_DOCS) { + if (sp->type == VSC_SEG_DOCS) { /* Parse the DOCS json */ sp->vj = vjsn_parse(sp->body, &e); XXXAZ(e); AN(sp->vj); - } else { - WRONG(""); + return (0); } + assert(sp->type == VSC_SEG_COUNTERS); + + /* Find the corresponding DOCS seg. We are not able to + * read and match on the doc_id until the DOCS section is + * mapped. Iterate over all the DOCS sections, attempt to + * map if needed, and then check the doc_id. */ + VTAILQ_FOREACH(spd, &vsc->segs, list) { + CHECK_OBJ_NOTNULL(spd, VSC_SEG_MAGIC); + if (spd->type != VSC_SEG_DOCS) + continue; + if (!spd->mapped && vsc_map_seg(vsc, vsm, spd)) + continue; /* Failed to map it */ + AN(spd->mapped); + if (spd->head->doc_id == sp->head->doc_id) + break; /* We have a match */ + } + if (spd == NULL) { + /* Could not find the right DOCS seg. Leave this + * seg as unmapped. */ + vsc_unmap_seg(vsc, vsm, sp); + return (-1); + } + + /* Create the VSC points list */ + vve = vjsn_child(spd->vj->value, "elements"); + AN(vve); + sp->npoints = strtoul(vve->value, NULL, 0); + sp->points = calloc(sp->npoints, sizeof *sp->points); + AN(sp->points); + vsb = VSB_new_auto(); + AN(vsb); + vve = vjsn_child(spd->vj->value, "elem"); + AN(vve); + pp = sp->points; + VTAILQ_FOREACH(vv, &vve->children, list) { + vsc_fill_point(vsc, sp, vv, vsb, pp); + pp++; + } + VSB_destroy(&vsb); return (0); } From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:05 +0000 (UTC) Subject: [master] b57d746e4 vsc: Delete lists of segments Message-ID: <20240703100405.E01C211E155@lists.varnish-cache.org> commit b57d746e4c7752a8fc404a839082ce93e9e6a20a Author: Dridi Boukelmoune Date: Mon Jun 3 22:41:51 2024 +0200 vsc: Delete lists of segments Co-authored-by: Martin Blix Grydeland diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index c6de69656..25f9680c0 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -101,6 +101,7 @@ struct vsc_seg { int mapped; int exposed; }; +VTAILQ_HEAD(vsc_seg_head, vsc_seg); struct vsc { unsigned magic; @@ -108,7 +109,7 @@ struct vsc { unsigned raw; struct vsc_sf_head sf_list; - VTAILQ_HEAD(,vsc_seg) segs; + struct vsc_seg_head segs; VSC_new_f *fnew; VSC_destroy_f *fdestroy; @@ -319,15 +320,6 @@ vsc_fill_point(const struct vsc *vsc, const struct vsc_seg *seg, point->point.raw = vsc->raw; } -static void -vsc_del_seg(struct vsc_seg *sp) -{ - CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); - AZ(sp->exposed); - AZ(sp->mapped); - FREE_OBJ(sp); -} - static struct vsc_seg * vsc_new_seg(const struct vsm_fantom *fp, enum vsc_seg_type type) { @@ -505,6 +497,23 @@ vsc_expose(const struct vsc *vsc, struct vsc_seg *sp, int del) sp->exposed = expose; } +/*-------------------------------------------------------------------- + */ + +static void +vsc_del_segs(struct vsc *vsc, struct vsm *vsm, struct vsc_seg_head *head) +{ + struct vsc_seg *sp, *sp2; + + VTAILQ_FOREACH_SAFE(sp, head, list, sp2) { + CHECK_OBJ(sp, VSC_SEG_MAGIC); + VTAILQ_REMOVE(head, sp, list); + vsc_expose(vsc, sp, 1); + vsc_unmap_seg(vsc, vsm, sp); + FREE_OBJ(sp); + } +} + /*-------------------------------------------------------------------- */ @@ -533,7 +542,7 @@ VSC_Iter(struct vsc *vsc, struct vsm *vsm, VSC_iter_f *fiter, void *priv) enum vsc_seg_type type; struct vsm_fantom ifantom; struct vsc_seg *sp, *sp2; - VTAILQ_HEAD(, vsc_seg) removed; + struct vsc_seg_head removed; int i = 0; CHECK_OBJ_NOTNULL(vsc, VSC_MAGIC); @@ -595,16 +604,7 @@ VSC_Iter(struct vsc *vsc, struct vsm *vsm, VSC_iter_f *fiter, void *priv) VTAILQ_INSERT_TAIL(&removed, sp2, list); } - /* Clean up any removed segs */ - while (!VTAILQ_EMPTY(&removed)) { - sp = VTAILQ_FIRST(&removed); - CHECK_OBJ_NOTNULL(sp, VSC_SEG_MAGIC); - VTAILQ_REMOVE(&removed, sp, list); - - vsc_expose(vsc, sp, 1); - vsc_unmap_seg(vsc, vsm, sp); - vsc_del_seg(sp); - } + vsc_del_segs(vsc, vsm, &removed); /* Iterate our shadow list, reporting on each pointer value */ VTAILQ_FOREACH(sp, &vsc->segs, list) { @@ -682,7 +682,6 @@ VSC_Destroy(struct vsc **vscp, struct vsm *vsm) { struct vsc *vsc; struct vsc_sf *sf, *sf2; - struct vsc_seg *sp, *sp2; TAKE_OBJ_NOTNULL(vsc, vscp, VSC_MAGIC); @@ -692,11 +691,7 @@ VSC_Destroy(struct vsc **vscp, struct vsm *vsm) free(sf->pattern); FREE_OBJ(sf); } - VTAILQ_FOREACH_SAFE(sp, &vsc->segs, list, sp2) { - VTAILQ_REMOVE(&vsc->segs, sp, list); - vsc_expose(vsc, sp, 1); - vsc_unmap_seg(vsc, vsm, sp); - vsc_del_seg(sp); - } + + vsc_del_segs(vsc, vsm, &vsc->segs); FREE_OBJ(vsc); } From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:06 +0000 (UTC) Subject: [master] cb339b61b vsc: Keep track of doc segments in an additional list Message-ID: <20240703100406.06B8511E159@lists.varnish-cache.org> commit cb339b61b59ee45fa72b137a5f6d2c088ef6ff49 Author: Dridi Boukelmoune Date: Tue Jun 4 14:33:14 2024 +0200 vsc: Keep track of doc segments in an additional list This avoids iterating over all segments when we are looking for a specific doc_id. diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index 25f9680c0..015872bf4 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -89,6 +89,7 @@ struct vsc_seg { #define VSC_SEG_MAGIC 0x801177d4 enum vsc_seg_type type; VTAILQ_ENTRY(vsc_seg) list; + VTAILQ_ENTRY(vsc_seg) doc_list; struct vsm_fantom fantom[1]; const struct vsc_head *head; const char *body; @@ -110,6 +111,7 @@ struct vsc { unsigned raw; struct vsc_sf_head sf_list; struct vsc_seg_head segs; + struct vsc_seg_head docs; VSC_new_f *fnew; VSC_destroy_f *fdestroy; @@ -144,6 +146,7 @@ VSC_New(void) return (vsc); VTAILQ_INIT(&vsc->sf_list); VTAILQ_INIT(&vsc->segs); + VTAILQ_INIT(&vsc->docs); return (vsc); } @@ -425,10 +428,9 @@ vsc_map_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) * read and match on the doc_id until the DOCS section is * mapped. Iterate over all the DOCS sections, attempt to * map if needed, and then check the doc_id. */ - VTAILQ_FOREACH(spd, &vsc->segs, list) { + VTAILQ_FOREACH(spd, &vsc->docs, doc_list) { CHECK_OBJ_NOTNULL(spd, VSC_SEG_MAGIC); - if (spd->type != VSC_SEG_DOCS) - continue; + assert(spd->type == VSC_SEG_DOCS); if (!spd->mapped && vsc_map_seg(vsc, vsm, spd)) continue; /* Failed to map it */ AN(spd->mapped); @@ -508,6 +510,8 @@ vsc_del_segs(struct vsc *vsc, struct vsm *vsm, struct vsc_seg_head *head) VTAILQ_FOREACH_SAFE(sp, head, list, sp2) { CHECK_OBJ(sp, VSC_SEG_MAGIC); VTAILQ_REMOVE(head, sp, list); + if (sp->type == VSC_SEG_DOCS) + VTAILQ_REMOVE(&vsc->docs, sp, doc_list); vsc_expose(vsc, sp, 1); vsc_unmap_seg(vsc, vsm, sp); FREE_OBJ(sp); @@ -589,6 +593,8 @@ VSC_Iter(struct vsc *vsc, struct vsm *vsm, VSC_iter_f *fiter, void *priv) sp = vsc_new_seg(&ifantom, type); AN(sp); VTAILQ_INSERT_TAIL(&vsc->segs, sp, list); + if (type == VSC_SEG_DOCS) + VTAILQ_INSERT_TAIL(&vsc->docs, sp, doc_list); } assert(sp->type == type); @@ -693,5 +699,6 @@ VSC_Destroy(struct vsc **vscp, struct vsm *vsm) } vsc_del_segs(vsc, vsm, &vsc->segs); + assert(VTAILQ_EMPTY(&vsc->docs)); FREE_OBJ(vsc); } From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:06 +0000 (UTC) Subject: [master] 18388284b vsc: Remove unused vmb.h include Message-ID: <20240703100406.2516211E15D@lists.varnish-cache.org> commit 18388284b128d83182d29d3fa6a51a63a8827382 Author: Dridi Boukelmoune Date: Mon Jun 10 19:07:39 2024 +0200 vsc: Remove unused vmb.h include diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index 015872bf4..663d5c23a 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -48,7 +48,6 @@ #include "vjsn.h" #include "vsb.h" #include "vsc_priv.h" -#include "vmb.h" #include "vapi/vsc.h" #include "vapi/vsm.h" From dridi.boukelmoune at gmail.com Wed Jul 3 10:04:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 3 Jul 2024 10:04:06 +0000 (UTC) Subject: [master] 1cfa4e678 vsc: Missing check for VSM_Unmap()'s result Message-ID: <20240703100406.4552611E167@lists.varnish-cache.org> commit 1cfa4e67880721d6018d9fbde610dd9746ffddcd Author: Dridi Boukelmoune Date: Mon Jun 10 19:08:32 2024 +0200 vsc: Missing check for VSM_Unmap()'s result diff --git a/lib/libvarnishapi/vsc.c b/lib/libvarnishapi/vsc.c index 663d5c23a..668b0b676 100644 --- a/lib/libvarnishapi/vsc.c +++ b/lib/libvarnishapi/vsc.c @@ -405,7 +405,7 @@ vsc_map_seg(const struct vsc *vsc, struct vsm *vsm, struct vsc_seg *sp) usleep(10000); if (head->ready == 0) { - VSM_Unmap(vsm, sp->fantom); + AZ(VSM_Unmap(vsm, sp->fantom)); return (-1); } From nils.goroll at uplex.de Mon Jul 8 13:25:06 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 8 Jul 2024 13:25:06 +0000 (UTC) Subject: [master] 78ac71a91 Add a parameter for the panic buffer size Message-ID: <20240708132506.811F4B1D48@lists.varnish-cache.org> commit 78ac71a91d5eb042ecd8e5abc0e6157265879bdd Author: Nils Goroll Date: Mon Jul 8 11:26:01 2024 +0200 Add a parameter for the panic buffer size 64k ought to be enough for everyone. /s diff --git a/bin/varnishd/mgt/mgt_shmem.c b/bin/varnishd/mgt/mgt_shmem.c index 858dfafdd..d67fca001 100644 --- a/bin/varnishd/mgt/mgt_shmem.c +++ b/bin/varnishd/mgt/mgt_shmem.c @@ -131,7 +131,7 @@ mgt_SHM_ChildNew(void) AN(heritage.param); *heritage.param = mgt_param; - heritage.panic_str_len = 64 * 1024; + heritage.panic_str_len = mgt_param.panic_buffer; heritage.panic_str = VSMW_Allocf(mgt_vsmw, NULL, "Panic", heritage.panic_str_len, ""); AN(heritage.panic_str); diff --git a/include/tbl/params.h b/include/tbl/params.h index 938b00f4b..68da7d742 100644 --- a/include/tbl/params.h +++ b/include/tbl/params.h @@ -732,6 +732,22 @@ PARAM_SIMPLE( /* flags */ EXPERIMENTAL ) +PARAM_SIMPLE( + /* name */ panic_buffer, + /* type */ bytes_u, + /* min */ "4k", + /* max */ "10m", + /* def */ "64k", + /* units */ "bytes", + /* descr */ + "Size of the panic message buffer.\n" + "The panic buffer is allocated in the working directory as memory " + "shared between the management and worker process, so sufficient " + "working directory space should be accounted for if this value is " + "adjusted. Panic messages are truncated to the configured size.", + /* flags */ MUST_RESTART +) + PARAM_SIMPLE( /* name */ pcre2_jit_compilation, /* type */ boolean, From dridi.boukelmoune at gmail.com Mon Jul 8 17:12:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 8 Jul 2024 17:12:06 +0000 (UTC) Subject: [master] 57c814de0 ws: Generalize WS_Pipeline() beyond req keep-alive Message-ID: <20240708171206.BB5F597A1@lists.varnish-cache.org> commit 57c814de07e875af8d8ab7d55a7e873b6b3e66f4 Author: Dridi Boukelmoune Date: Mon Jun 17 10:50:50 2024 +0200 ws: Generalize WS_Pipeline() beyond req keep-alive This puts the caller in charge of triggering the workspace rollback before moving pipelined bytes around. This will open the door to pipelining for req and beresp trailers. The workspace emulator version of WS_ReqPipeline() had a systematic rollback instead of a conditional one. The reason why this didn't pose a problem was that fetch tasks never need pipelining and always take the shortcut where the rollback was properly optional. This is a good occasion to simplify the emulated variant and document why it is so different from the original. Co-authored-by: Walid Boudebouda diff --git a/bin/varnishd/cache/cache_session.c b/bin/varnishd/cache/cache_session.c index 43d5ab60e..898242da3 100644 --- a/bin/varnishd/cache/cache_session.c +++ b/bin/varnishd/cache/cache_session.c @@ -258,12 +258,20 @@ HTC_Status(enum htc_status_e e, const char **name, const char **desc) void HTC_RxInit(struct http_conn *htc, struct ws *ws) { - unsigned l; + unsigned rollback; + int l; CHECK_OBJ_NOTNULL(htc, HTTP_CONN_MAGIC); htc->ws = ws; - l = WS_ReqPipeline(htc->ws, htc->pipeline_b, htc->pipeline_e); + /* NB: HTTP/1 keep-alive triggers a rollback, so does the first + * request of a session or an h2 request where the rollback is a + * no-op in terms of workspace usage. + */ + rollback = !strcasecmp(ws->id, "req") && htc->body_status == NULL; + l = WS_Pipeline(htc->ws, htc->pipeline_b, htc->pipeline_e, rollback); + xxxassert(l >= 0); + htc->rxbuf_b = WS_Reservation(ws); htc->rxbuf_e = htc->rxbuf_b + l; htc->pipeline_b = NULL; diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h index 9bd5e2490..2469cdcec 100644 --- a/bin/varnishd/cache/cache_varnishd.h +++ b/bin/varnishd/cache/cache_varnishd.h @@ -555,7 +555,7 @@ WS_IsReserved(const struct ws *ws) void *WS_AtOffset(const struct ws *ws, unsigned off, unsigned len); unsigned WS_ReservationOffset(const struct ws *ws); -unsigned WS_ReqPipeline(struct ws *, const void *b, const void *e); +int WS_Pipeline(struct ws *, const void *b, const void *e, unsigned rollback); /* cache_ws_common.c */ void WS_Id(const struct ws *ws, char *id); diff --git a/bin/varnishd/cache/cache_ws.c b/bin/varnishd/cache/cache_ws.c index d6ff67370..3f2cc5309 100644 --- a/bin/varnishd/cache/cache_ws.c +++ b/bin/varnishd/cache/cache_ws.c @@ -135,17 +135,15 @@ WS_Reset(struct ws *ws, uintptr_t pp) * may not originate from the same workspace. */ -unsigned -WS_ReqPipeline(struct ws *ws, const void *b, const void *e) +int +WS_Pipeline(struct ws *ws, const void *b, const void *e, unsigned rollback) { unsigned r, l; WS_Assert(ws); - if (!strcasecmp(ws->id, "req")) + if (rollback) WS_Rollback(ws, 0); - else - AZ(b); r = WS_ReserveAll(ws); @@ -156,7 +154,8 @@ WS_ReqPipeline(struct ws *ws, const void *b, const void *e) AN(e); l = pdiff(b, e); - assert(l <= r); + if (l > r) + return (-1); memmove(ws->f, b, l); return (l); } diff --git a/bin/varnishd/cache/cache_ws_emu.c b/bin/varnishd/cache/cache_ws_emu.c index 830f8c450..767839d1e 100644 --- a/bin/varnishd/cache/cache_ws_emu.c +++ b/bin/varnishd/cache/cache_ws_emu.c @@ -221,45 +221,46 @@ WS_Reset(struct ws *ws, uintptr_t pp) WS_Assert(ws); } -unsigned -WS_ReqPipeline(struct ws *ws, const void *b, const void *e) +int +WS_Pipeline(struct ws *ws, const void *b, const void *e, unsigned rollback) { - struct ws_emu *we; - struct ws_alloc *wa; - unsigned l; + void *tmp; + unsigned r, l; WS_Assert(ws); AZ(ws->f); AZ(ws->r); - if (strcasecmp(ws->id, "req")) - AZ(b); - - if (b == NULL) { + /* NB: the pipeline cannot be moved if it comes from the same + * workspace because a rollback would free the memory. This is + * emulated with two copies instead. + */ + + if (b != NULL) { + AN(e); + l = pdiff(b, e); + tmp = malloc(l); + AN(tmp); + memcpy(tmp, b, l); + } else { AZ(e); - if (!strcasecmp(ws->id, "req")) - WS_Rollback(ws, 0); - (void)WS_ReserveAll(ws); - return (0); + l = 0; + tmp = NULL; } - we = ws_emu(ws); - ALLOC_OBJ(wa, WS_ALLOC_MAGIC); - AN(wa); - wa->len = we->len; - wa->ptr = malloc(wa->len); - AN(wa->ptr); + if (rollback) + WS_Rollback(ws, 0); - AN(e); - l = pdiff(b, e); - assert(l <= wa->len); - memcpy(wa->ptr, b, l); + r = WS_ReserveAll(ws); - WS_Rollback(ws, 0); - ws->f = wa->ptr; - ws->r = ws->f + wa->len; - VTAILQ_INSERT_TAIL(&we->head, wa, list); - WS_Assert(ws); + if (l > r) { + free(tmp); + return (-1); + } + + if (l > 0) + memcpy(ws->f, tmp, l); + free(tmp); return (l); } From dridi.boukelmoune at gmail.com Mon Jul 8 17:12:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 8 Jul 2024 17:12:06 +0000 (UTC) Subject: [master] 0c8448e0e hpack: Track the workspace where headers are decoded Message-ID: <20240708171206.C4D0297A3@lists.varnish-cache.org> commit 0c8448e0ef2f9a1bb2e23eb1f6440b75e6c4d5a7 Author: Walid Boudebouda Date: Fri Jun 14 20:35:22 2024 +0200 hpack: Track the workspace where headers are decoded The reset pointer is effectively the beginning of the reservation, so in order to better generalize HEADERS frames processing between headers and trailers, we reference the workspace directly. diff --git a/bin/varnishd/http2/cache_http2.h b/bin/varnishd/http2/cache_http2.h index 89e32309c..db0afcb43 100644 --- a/bin/varnishd/http2/cache_http2.h +++ b/bin/varnishd/http2/cache_http2.h @@ -234,8 +234,8 @@ struct h2h_decode { unsigned has_scheme:1; h2_error error; enum vhd_ret_e vhd_ret; + struct ws *ws; char *out; - char *reset; int64_t limit; size_t out_l; size_t out_u; @@ -243,8 +243,8 @@ struct h2h_decode { struct vhd_decode vhd[1]; }; -void h2h_decode_init(const struct h2_sess *h2); -h2_error h2h_decode_fini(const struct h2_sess *h2); +void h2h_decode_hdr_init(const struct h2_sess *h2); +h2_error h2h_decode_hdr_fini(const struct h2_sess *h2); h2_error h2h_decode_bytes(struct h2_sess *h2, const uint8_t *ptr, size_t len); diff --git a/bin/varnishd/http2/cache_http2_hpack.c b/bin/varnishd/http2/cache_http2_hpack.c index c49f9d2b9..0bbedb27b 100644 --- a/bin/varnishd/http2/cache_http2_hpack.c +++ b/bin/varnishd/http2/cache_http2_hpack.c @@ -257,27 +257,25 @@ h2h_addhdr(struct http *hp, struct h2h_decode *d) return (0); } -void -h2h_decode_init(const struct h2_sess *h2) +static void +h2h_decode_init(const struct h2_sess *h2, struct ws *ws) { struct h2h_decode *d; CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); - CHECK_OBJ_NOTNULL(h2->new_req, REQ_MAGIC); - CHECK_OBJ_NOTNULL(h2->new_req->http, HTTP_MAGIC); + CHECK_OBJ_NOTNULL(ws, WS_MAGIC); + AN(h2->decode); d = h2->decode; INIT_OBJ(d, H2H_DECODE_MAGIC); VHD_Init(d->vhd); - d->out_l = WS_ReserveSize(h2->new_req->http->ws, - cache_param->http_req_size); + d->out_l = WS_ReserveSize(ws, cache_param->http_req_size); /* * Can't do any work without any buffer * space. Require non-zero size. */ XXXAN(d->out_l); - d->out = WS_Reservation(h2->new_req->http->ws); - d->reset = d->out; + d->out = WS_Reservation(ws); if (cache_param->h2_max_header_list_size == 0) d->limit = @@ -287,6 +285,18 @@ h2h_decode_init(const struct h2_sess *h2) if (d->limit < h2->local_settings.max_header_list_size) d->limit = INT64_MAX; + + d->ws = ws; +} + +void +h2h_decode_hdr_init(const struct h2_sess *h2) +{ + + CHECK_OBJ_NOTNULL(h2, H2_SESS_MAGIC); + CHECK_OBJ_NOTNULL(h2->new_req, REQ_MAGIC); + CHECK_OBJ_NOTNULL(h2->new_req->http, HTTP_MAGIC); + h2h_decode_init(h2, h2->new_req->ws); } /* Possible error returns: @@ -298,7 +308,7 @@ h2h_decode_init(const struct h2_sess *h2) * is a stream level error. */ h2_error -h2h_decode_fini(const struct h2_sess *h2) +h2h_decode_hdr_fini(const struct h2_sess *h2) { h2_error ret; struct h2h_decode *d; @@ -307,7 +317,7 @@ h2h_decode_fini(const struct h2_sess *h2) d = h2->decode; CHECK_OBJ_NOTNULL(h2->new_req, REQ_MAGIC); CHECK_OBJ_NOTNULL(d, H2H_DECODE_MAGIC); - WS_ReleaseP(h2->new_req->http->ws, d->out); + WS_ReleaseP(d->ws, d->out); if (d->vhd_ret != VHD_OK) { /* HPACK header block didn't finish at an instruction boundary */ @@ -347,12 +357,12 @@ h2h_decode_bytes(struct h2_sess *h2, const uint8_t *in, size_t in_l) CHECK_OBJ_NOTNULL(h2->new_req, REQ_MAGIC); hp = h2->new_req->http; CHECK_OBJ_NOTNULL(hp, HTTP_MAGIC); - CHECK_OBJ_NOTNULL(hp->ws, WS_MAGIC); - r = WS_Reservation(hp->ws); - AN(r); - e = r + WS_ReservationSize(hp->ws); d = h2->decode; CHECK_OBJ_NOTNULL(d, H2H_DECODE_MAGIC); + CHECK_OBJ_NOTNULL(d->ws, WS_MAGIC); + r = WS_Reservation(d->ws); + AN(r); + e = r + WS_ReservationSize(d->ws); /* Only H2E_ENHANCE_YOUR_CALM indicates that we should continue processing. Other errors should have been returned and handled @@ -427,7 +437,7 @@ h2h_decode_bytes(struct h2_sess *h2, const uint8_t *in, size_t in_l) } if (H2_ERROR_MATCH(d->error, H2SE_ENHANCE_YOUR_CALM)) { - d->out = d->reset; + d->out = WS_Reservation(d->ws); d->out_l = e - d->out; d->limit -= d->out_u; d->out_u = 0; @@ -444,7 +454,7 @@ h2h_decode_bytes(struct h2_sess *h2, const uint8_t *in, size_t in_l) } if (H2_ERROR_MATCH(d->error, H2SE_ENHANCE_YOUR_CALM)) { - /* Stream error, delay reporting until h2h_decode_fini so + /* Stream error, delay reporting until h2h_decode_hdr_fini so * that we can process the complete header block. */ return (NULL); } diff --git a/bin/varnishd/http2/cache_http2_proto.c b/bin/varnishd/http2/cache_http2_proto.c index 5d0321160..25937cebe 100644 --- a/bin/varnishd/http2/cache_http2_proto.c +++ b/bin/varnishd/http2/cache_http2_proto.c @@ -234,7 +234,7 @@ h2_kill_req(struct worker *wrk, struct h2_sess *h2, r2 = NULL; } else { if (r2->state == H2_S_OPEN && h2->new_req == r2->req) - (void)h2h_decode_fini(h2); + (void)h2h_decode_hdr_fini(h2); } Lck_Unlock(&h2->sess->mtx); if (r2 != NULL) @@ -634,7 +634,7 @@ h2_end_headers(struct worker *wrk, struct h2_sess *h2, ASSERT_RXTHR(h2); assert(r2->state == H2_S_OPEN); - h2e = h2h_decode_fini(h2); + h2e = h2h_decode_hdr_fini(h2); h2->new_req = NULL; if (h2e != NULL) { Lck_Lock(&h2->sess->mtx); @@ -767,7 +767,7 @@ h2_rx_headers(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2) HTTP_Setup(req->http, req->ws, req->vsl, SLT_ReqMethod); http_SetH(req->http, HTTP_HDR_PROTO, "HTTP/2.0"); - h2h_decode_init(h2); + h2h_decode_hdr_init(h2); p = h2->rxf_data; l = h2->rxf_len; @@ -788,7 +788,7 @@ h2_rx_headers(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2) Lck_Lock(&h2->sess->mtx); VSLb(h2->vsl, SLT_Debug, "HPACK(hdr) %s", h2e->name); Lck_Unlock(&h2->sess->mtx); - (void)h2h_decode_fini(h2); + (void)h2h_decode_hdr_fini(h2); assert(!WS_IsReserved(r2->req->ws)); h2_del_req(wrk, r2); return (h2e); @@ -823,7 +823,7 @@ h2_rx_continuation(struct worker *wrk, struct h2_sess *h2, struct h2_req *r2) Lck_Lock(&h2->sess->mtx); VSLb(h2->vsl, SLT_Debug, "HPACK(cont) %s", h2e->name); Lck_Unlock(&h2->sess->mtx); - (void)h2h_decode_fini(h2); + (void)h2h_decode_hdr_fini(h2); assert(!WS_IsReserved(r2->req->ws)); h2_del_req(wrk, r2); return (h2e); From dridi.boukelmoune at gmail.com Tue Jul 9 16:49:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Tue, 9 Jul 2024 16:49:06 +0000 (UTC) Subject: [master] 40795e3b4 vcc_expr: Remove vcc_expr_tostring() fmt argument Message-ID: <20240709164906.9BEEB9488C@lists.varnish-cache.org> commit 40795e3b4b801e0d2ae20f5873d903dc770bb8c6 Author: Dridi Boukelmoune Date: Mon Apr 29 13:50:54 2024 +0200 vcc_expr: Remove vcc_expr_tostring() fmt argument All call sites pass the STRINGS type, so we can inline it directly. diff --git a/lib/libvcc/vcc_expr.c b/lib/libvcc/vcc_expr.c index 8cda6f12d..326e8b4b9 100644 --- a/lib/libvcc/vcc_expr.c +++ b/lib/libvcc/vcc_expr.c @@ -288,19 +288,18 @@ vcc_expr_tobool(struct vcc *tl, struct expr **e) */ static void -vcc_expr_tostring(struct vcc *tl, struct expr **e, vcc_type_t fmt) +vcc_expr_tostring(struct vcc *tl, struct expr **e) { const char *p; uint8_t constant = EXPR_VAR; CHECK_OBJ_NOTNULL(*e, EXPR_MAGIC); - assert(fmt == STRINGS || fmt->stringform); - assert(fmt != (*e)->fmt); + assert((*e)->fmt != STRINGS); p = (*e)->fmt->tostring; if (p != NULL) { AN(*p); - *e = vcc_expr_edit(tl, fmt, p, *e, NULL); + *e = vcc_expr_edit(tl, STRINGS, p, *e, NULL); (*e)->constant = constant; (*e)->nstr = 1; } else { @@ -816,7 +815,7 @@ vcc_expr5(struct vcc *tl, struct expr **e, vcc_type_t fmt) ERRCHK(tl); /* Unless asked for a HEADER, fold to string here */ if (*e && fmt != HEADER && (*e)->fmt == HEADER) { - vcc_expr_tostring(tl, e, STRINGS); + vcc_expr_tostring(tl, e); ERRCHK(tl); } return; @@ -1071,9 +1070,9 @@ vcc_expr_add(struct vcc *tl, struct expr **e, vcc_type_t fmt) } else if (tk->tok == '+' && ((*e)->fmt == STRINGS || fmt == STRINGS)) { if ((*e)->fmt != STRINGS) - vcc_expr_tostring(tl, e, STRINGS); + vcc_expr_tostring(tl, e); if (e2->fmt != STRINGS) - vcc_expr_tostring(tl, &e2, STRINGS); + vcc_expr_tostring(tl, &e2); if (vcc_islit(*e) && vcc_isconst(e2)) { lit = vcc_islit(e2); *e = vcc_expr_edit(tl, STRINGS, @@ -1420,7 +1419,7 @@ vcc_expr0(struct vcc *tl, struct expr **e, vcc_type_t fmt) return; if ((*e)->fmt != STRINGS && fmt->stringform) - vcc_expr_tostring(tl, e, STRINGS); + vcc_expr_tostring(tl, e); if ((*e)->fmt->stringform) { VSB_printf(tl->sb, "Cannot convert type %s(%s) to %s(%s)\n", @@ -1431,7 +1430,7 @@ vcc_expr0(struct vcc *tl, struct expr **e, vcc_type_t fmt) } if (fmt == BODY && !(*e)->fmt->bodyform) - vcc_expr_tostring(tl, e, STRINGS); + vcc_expr_tostring(tl, e); if (fmt == BODY && (*e)->fmt->bodyform) { if ((*e)->fmt == STRINGS) From dridi.boukelmoune at gmail.com Tue Jul 9 16:49:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Tue, 9 Jul 2024 16:49:06 +0000 (UTC) Subject: [master] 64b233606 vcc_expr: Break down conditions for cmp operators Message-ID: <20240709164906.B3C059488E@lists.varnish-cache.org> commit 64b233606905039acecc4be58b28a51f5713202c Author: Dridi Boukelmoune Date: Tue Jul 9 09:17:15 2024 +0200 vcc_expr: Break down conditions for cmp operators diff --git a/lib/libvcc/vcc_expr.c b/lib/libvcc/vcc_expr.c index 326e8b4b9..a0107474a 100644 --- a/lib/libvcc/vcc_expr.c +++ b/lib/libvcc/vcc_expr.c @@ -1261,11 +1261,13 @@ vcc_expr_cmp(struct vcc *tl, struct expr **e, vcc_type_t fmt) tk = tl->t; for (cp = vcc_cmps; cp->fmt != VOID; cp++) { - if ((*e)->fmt == cp->fmt && tl->t->tok == cp->token) { - AN(cp->func); - cp->func(tl, e, cp); - return; - } + if (tl->t->tok != cp->token) + continue; + if ((*e)->fmt != cp->fmt) + continue; + AN(cp->func); + cp->func(tl, e, cp); + return; } switch (tk->tok) { From dridi.boukelmoune at gmail.com Tue Jul 9 16:49:06 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Tue, 9 Jul 2024 16:49:06 +0000 (UTC) Subject: [master] b77b70951 vcc_expr: Allow STRING STRANDS comparison Message-ID: <20240709164906.DF35694891@lists.varnish-cache.org> commit b77b709510f1e1ff4792ac9bc9c3836d1a5249ec Author: Dridi Boukelmoune Date: Tue Jul 9 11:01:40 2024 +0200 vcc_expr: Allow STRING STRANDS comparison This otherwise fails on a technicality that suggests the original intent was to allow such a comparison: Comparison of different types: STRING '==' STRING It also fixes the STRANDS STRING comparison that failed with a different error message. diff --git a/bin/varnishtest/tests/v00019.vtc b/bin/varnishtest/tests/v00019.vtc index 24a23b51f..d7604419a 100644 --- a/bin/varnishtest/tests/v00019.vtc +++ b/bin/varnishtest/tests/v00019.vtc @@ -69,6 +69,46 @@ varnish v1 -errvcl {Comparison of different types: STRING '==' INT} { } } +varnish v1 -vcl { + import debug; + backend be none; + sub vcl_recv { + if ("string" == "string") { + return (fail("should compile")); + } + } +} + +varnish v1 -vcl { + import debug; + backend be none; + sub vcl_recv { + if ("string" == debug.return_strands("string")) { + return (fail("should compile")); + } + } +} + +varnish v1 -vcl { + import debug; + backend be none; + sub vcl_recv { + if (debug.return_strands("string") == "string") { + return (fail("should compile")); + } + } +} + +varnish v1 -vcl { + import debug; + backend be none; + sub vcl_recv { + if (debug.return_strands("string") == debug.return_strands("string")) { + return (fail("should compile")); + } + } +} + varnish v1 -errvcl {Symbol not found: 'req.http.req.http.foo'} { backend b { .host = "${localhost}"; } sub vcl_recv { diff --git a/lib/libvcc/vcc_expr.c b/lib/libvcc/vcc_expr.c index a0107474a..d1bb53ee6 100644 --- a/lib/libvcc/vcc_expr.c +++ b/lib/libvcc/vcc_expr.c @@ -1046,6 +1046,11 @@ vcc_expr_add(struct vcc *tl, struct expr **e, vcc_type_t fmt) vcc_expr_mul(tl, e, fmt); ERRCHK(tl); + if (tl->t->tok != '+' && (*e)->fmt->stringform) { + vcc_expr_tostring(tl, e); + ERRCHK(tl); + } + while (tl->t->tok == '+' || tl->t->tok == '-') { tk = tl->t; for (ap = vcc_adds; ap->op != EOI; ap++) From dridi.boukelmoune at gmail.com Wed Jul 10 08:44:08 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 10 Jul 2024 08:44:08 +0000 (UTC) Subject: [master] 42a7ecdd9 vcc_expr: Better spot to finalize string concatenation Message-ID: <20240710084408.533816863@lists.varnish-cache.org> commit 42a7ecdd97a65a3f60408809e726e4160857fb26 Author: Dridi Boukelmoune Date: Wed Jul 10 10:39:19 2024 +0200 vcc_expr: Better spot to finalize string concatenation diff --git a/lib/libvcc/vcc_expr.c b/lib/libvcc/vcc_expr.c index d1bb53ee6..e95a4de3e 100644 --- a/lib/libvcc/vcc_expr.c +++ b/lib/libvcc/vcc_expr.c @@ -1046,11 +1046,6 @@ vcc_expr_add(struct vcc *tl, struct expr **e, vcc_type_t fmt) vcc_expr_mul(tl, e, fmt); ERRCHK(tl); - if (tl->t->tok != '+' && (*e)->fmt->stringform) { - vcc_expr_tostring(tl, e); - ERRCHK(tl); - } - while (tl->t->tok == '+' || tl->t->tok == '-') { tk = tl->t; for (ap = vcc_adds; ap->op != EOI; ap++) @@ -1100,6 +1095,10 @@ vcc_expr_add(struct vcc *tl, struct expr **e, vcc_type_t fmt) return; } } + + /* No concatenation, finalize string. */ + if ((*e)->fmt->stringform) + vcc_expr_tostring(tl, e); } /*-------------------------------------------------------------------- From dridi.boukelmoune at gmail.com Wed Jul 10 08:58:05 2024 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Wed, 10 Jul 2024 08:58:05 +0000 (UTC) Subject: [master] 062e71ebd build: Hide comments we don't need in Makefiles Message-ID: <20240710085805.750FB72C1@lists.varnish-cache.org> commit 062e71ebdf6dd3735848355ae24b62db8e50887f Author: Dridi Boukelmoune Date: Wed Jul 10 10:45:27 2024 +0200 build: Hide comments we don't need in Makefiles A comment starting with ## is omitted in Makefile.in and in the final Makefile. diff --git a/lib/libvcc/vmodtool.py b/lib/libvcc/vmodtool.py index c2fda1423..64e56d7f9 100755 --- a/lib/libvcc/vmodtool.py +++ b/lib/libvcc/vmodtool.py @@ -49,7 +49,7 @@ import sys import time AMBOILERPLATE = '''\ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_XXX_vcc ?= $(srcdir)/VCC diff --git a/vmod/automake_boilerplate_blob.am b/vmod/automake_boilerplate_blob.am index a90e66013..1d732af00 100644 --- a/vmod/automake_boilerplate_blob.am +++ b/vmod/automake_boilerplate_blob.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_blob_vcc ?= $(srcdir)/vmod_blob.vcc diff --git a/vmod/automake_boilerplate_cookie.am b/vmod/automake_boilerplate_cookie.am index 18ba8f244..156bb4a72 100644 --- a/vmod/automake_boilerplate_cookie.am +++ b/vmod/automake_boilerplate_cookie.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_cookie_vcc ?= $(srcdir)/vmod_cookie.vcc diff --git a/vmod/automake_boilerplate_debug.am b/vmod/automake_boilerplate_debug.am index 926ca74a4..abda2454e 100644 --- a/vmod/automake_boilerplate_debug.am +++ b/vmod/automake_boilerplate_debug.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_debug_vcc ?= $(srcdir)/vmod_debug.vcc diff --git a/vmod/automake_boilerplate_directors.am b/vmod/automake_boilerplate_directors.am index 3a35af278..a2e5a6ce4 100644 --- a/vmod/automake_boilerplate_directors.am +++ b/vmod/automake_boilerplate_directors.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_directors_vcc ?= $(srcdir)/vmod_directors.vcc diff --git a/vmod/automake_boilerplate_h2.am b/vmod/automake_boilerplate_h2.am index fac5fd517..30fa5e2b9 100644 --- a/vmod/automake_boilerplate_h2.am +++ b/vmod/automake_boilerplate_h2.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_h2_vcc ?= $(srcdir)/vmod_h2.vcc diff --git a/vmod/automake_boilerplate_proxy.am b/vmod/automake_boilerplate_proxy.am index 7849b193f..480d82b83 100644 --- a/vmod/automake_boilerplate_proxy.am +++ b/vmod/automake_boilerplate_proxy.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_proxy_vcc ?= $(srcdir)/vmod_proxy.vcc diff --git a/vmod/automake_boilerplate_purge.am b/vmod/automake_boilerplate_purge.am index 4f82e77cc..00656497e 100644 --- a/vmod/automake_boilerplate_purge.am +++ b/vmod/automake_boilerplate_purge.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_purge_vcc ?= $(srcdir)/vmod_purge.vcc diff --git a/vmod/automake_boilerplate_std.am b/vmod/automake_boilerplate_std.am index 25c2d0797..738e97d9b 100644 --- a/vmod/automake_boilerplate_std.am +++ b/vmod/automake_boilerplate_std.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_std_vcc ?= $(srcdir)/vmod_std.vcc diff --git a/vmod/automake_boilerplate_unix.am b/vmod/automake_boilerplate_unix.am index c7d47f074..ab5e982f7 100644 --- a/vmod/automake_boilerplate_unix.am +++ b/vmod/automake_boilerplate_unix.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_unix_vcc ?= $(srcdir)/vmod_unix.vcc diff --git a/vmod/automake_boilerplate_vtc.am b/vmod/automake_boilerplate_vtc.am index 51d81c18f..0a2bd1228 100644 --- a/vmod/automake_boilerplate_vtc.am +++ b/vmod/automake_boilerplate_vtc.am @@ -1,4 +1,4 @@ -# Generated by vmodtool.py --boilerplate. +## Generated by vmodtool.py --boilerplate. vmod_vtc_vcc ?= $(srcdir)/vmod_vtc.vcc diff --git a/vsc.am b/vsc.am index b3d7c4bdb..b9f269c4d 100644 --- a/vsc.am +++ b/vsc.am @@ -1,8 +1,8 @@ -# Generic rule to generate C code from VSC files. VSC files must be listed -# in the $(VSC_SRC) variable. The $(VSCTOOL) variable must point to the -# location of vsctool.py, normally set up by varnish.m4 at configure time. -# The resulting $(VSC_GEN) variable must be added to $(BUILT_SOURCES). The -# $(VSC_RST) variable references RST file names for manual pages includes. +## Generic rule to generate C code from VSC files. VSC files must be listed +## in the $(VSC_SRC) variable. The $(VSCTOOL) variable must point to the +## location of vsctool.py, normally set up by varnish.m4 at configure time. +## The resulting $(VSC_GEN) variable must be added to $(BUILT_SOURCES). The +## $(VSC_RST) variable references RST file names for manual pages includes. VSC_GEN = $(VSC_SRC:.vsc=.c) $(VSC_SRC:.vsc=.h) diff --git a/vtc.am b/vtc.am index 0994f833f..db9837e48 100644 --- a/vtc.am +++ b/vtc.am @@ -1,12 +1,12 @@ -# This file helps set up a varnishtest execution without a traditional -# pre-defined list of tests. Test cases are expected to be located in a -# tests/ directory relative to the Makefile's source directory. -# -# When a difference is detected, a refresh is triggered by calling the -# vtc-refresh-tests target that must be defined in the including Makefile. -# -# The current assumption is that all the tests for the Makefile are VTCs -# and may be revisited if needed. +## This file helps set up a varnishtest execution without a traditional +## pre-defined list of tests. Test cases are expected to be located in a +## tests/ directory relative to the Makefile's source directory. +## +## When a difference is detected, a refresh is triggered by calling the +## vtc-refresh-tests target that must be defined in the including Makefile. +## +## The current assumption is that all the tests for the Makefile are VTCs +## and may be revisited if needed. VTC_LOG_COMPILER = $(top_builddir)/bin/varnishtest/varnishtest -v -i TEST_EXTENSIONS = .vtc From nils.goroll at uplex.de Mon Jul 15 18:29:03 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 15 Jul 2024 18:29:03 +0000 (UTC) Subject: [master] 895045a93 vbp: Introduce a backend probe state & document fsm Message-ID: <20240715182903.1DB4410DADB@lists.varnish-cache.org> commit 895045a932ae67017b6516586b6681f69cff9e80 Author: Nils Goroll Date: Mon Jun 3 17:42:33 2024 +0200 vbp: Introduce a backend probe state & document fsm The state is not used yet other than for assertions. diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c index 6de4eeb89..a0b47c4a3 100644 --- a/bin/varnishd/cache/cache_backend_probe.c +++ b/bin/varnishd/cache/cache_backend_probe.c @@ -56,6 +56,17 @@ #include "VSC_vbe.h" +struct vbp_state { + const char *name; +}; + +#define VBP_STATE(n) const struct vbp_state vbp_state_ ## n [1] = {{ .name = #n }} +VBP_STATE(scheduled); +VBP_STATE(running); +VBP_STATE(cold); +VBP_STATE(deleted); +#undef VBP_STATE + /* Default averaging rate, we want something pretty responsive */ #define AVG_RATE 4 @@ -83,6 +94,7 @@ struct vbp_target { double rate; vtim_real due; + const struct vbp_state *state; int running; int heap_idx; struct pool_task task[1]; @@ -449,15 +461,18 @@ vbp_task(struct worker *wrk, void *priv) Lck_Lock(&vbp_mtx); if (vt->running < 0) { + assert(vt->state == vbp_state_deleted); assert(vt->heap_idx == VBH_NOIDX); vbp_delete(vt); } else { + assert(vt->state == vbp_state_running); vt->running = 0; if (vt->heap_idx != VBH_NOIDX) { vt->due = VTIM_real() + vt->interval; VBH_delete(vbp_heap, vt->heap_idx); vbp_heap_insert(vt); } + vt->state = vbp_state_scheduled; } Lck_Unlock(&vbp_mtx); } @@ -490,15 +505,22 @@ vbp_thread(struct worker *wrk, void *priv) vt->due = now + vt->interval; VBH_insert(vbp_heap, vt); if (!vt->running) { + assert(vt->state == vbp_state_scheduled); + vt->state = vbp_state_running; vt->running = 1; vt->task->func = vbp_task; vt->task->priv = vt; Lck_Unlock(&vbp_mtx); r = Pool_Task_Any(vt->task, TASK_QUEUE_REQ); Lck_Lock(&vbp_mtx); - if (r) + if (r) { vt->running = 0; + vt->state = vbp_state_scheduled; + } } + else + assert(vt->state == vbp_state_running); + } } NEEDLESS(Lck_Unlock(&vbp_mtx)); @@ -658,12 +680,18 @@ VBP_Control(const struct backend *be, int enable) Lck_Lock(&vbp_mtx); if (enable) { + // XXX next two assertions are WRONG, see #4108 - WIP + assert(vt->state == vbp_state_cold); assert(vt->heap_idx == VBH_NOIDX); vt->due = VTIM_real(); vbp_heap_insert(vt); + vt->state = vbp_state_scheduled; } else { + assert(vt->state == vbp_state_scheduled || + vt->state == vbp_state_running); assert(vt->heap_idx != VBH_NOIDX); VBH_delete(vbp_heap, vt->heap_idx); + vt->state = vbp_state_cold; } Lck_Unlock(&vbp_mtx); } @@ -686,6 +714,7 @@ VBP_Insert(struct backend *b, const struct vrt_backend_probe *vp, ALLOC_OBJ(vt, VBP_TARGET_MAGIC); XXXAN(vt); + vt->state = vbp_state_cold; vt->conn_pool = tp; VCP_AddRef(vt->conn_pool); vt->backend = b; @@ -711,13 +740,17 @@ VBP_Remove(struct backend *be) be->probe = NULL; vt->backend = NULL; if (vt->running) { + assert(vt->state == vbp_state_running); // task scheduled, it calls vbp_delete() vt->running = -1; vt = NULL; + vt->state = vbp_state_deleted; } else if (vt->heap_idx != VBH_NOIDX) { + assert(vt->state == vbp_state_scheduled); // task done, not yet rescheduled VBH_delete(vbp_heap, vt->heap_idx); - } + } else + assert(vt->state == vbp_state_cold); Lck_Unlock(&vbp_mtx); if (vt != NULL) { assert(vt->heap_idx == VBH_NOIDX); diff --git a/doc/graphviz/cache_backend_probe.dot b/doc/graphviz/cache_backend_probe.dot new file mode 100644 index 000000000..937eb6c0f --- /dev/null +++ b/doc/graphviz/cache_backend_probe.dot @@ -0,0 +1,35 @@ +# cache_backend_probe struct vbp_state + +digraph cache_backend_probe { + ALLOC + scheduled + running + cold + deleted + FREE + + edge [fontname=Courier] + + edge [label="vbp_task()"] + deleted -> FREE + running -> scheduled + + edge [label="vbp_thread()"] + scheduled -> running + + edge [label="vbp_thread() error"] + scheduled -> scheduled + + edge [label="VBP_Control()"] + cold -> scheduled + scheduled -> cold + running -> cold + + edge [label="VBP_Insert()"] + ALLOC -> cold + + edge [label="VBP_Remove()"] + running -> deleted # should not happen. we should go through some cool first + scheduled -> FREE # This should not happen. VBP_Control should have set cold + cold -> FREE +} From nils.goroll at uplex.de Mon Jul 15 18:29:03 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 15 Jul 2024 18:29:03 +0000 (UTC) Subject: [master] 8495028eb vbp: move VBH_NOIDX assertion to vbp_delete() Message-ID: <20240715182903.33C3010DADE@lists.varnish-cache.org> commit 8495028ebf51ffe9013096908c60037a6370bb56 Author: Nils Goroll Date: Mon Jun 3 18:11:34 2024 +0200 vbp: move VBH_NOIDX assertion to vbp_delete() diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c index a0b47c4a3..556d318c0 100644 --- a/bin/varnishd/cache/cache_backend_probe.c +++ b/bin/varnishd/cache/cache_backend_probe.c @@ -116,6 +116,8 @@ vbp_delete(struct vbp_target *vt) { CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); + assert(vt->heap_idx == VBH_NOIDX); + #define DN(x) /**/ VRT_BACKEND_PROBE_HANDLE(); #undef DN @@ -462,7 +464,6 @@ vbp_task(struct worker *wrk, void *priv) Lck_Lock(&vbp_mtx); if (vt->running < 0) { assert(vt->state == vbp_state_deleted); - assert(vt->heap_idx == VBH_NOIDX); vbp_delete(vt); } else { assert(vt->state == vbp_state_running); @@ -752,10 +753,8 @@ VBP_Remove(struct backend *be) } else assert(vt->state == vbp_state_cold); Lck_Unlock(&vbp_mtx); - if (vt != NULL) { - assert(vt->heap_idx == VBH_NOIDX); + if (vt != NULL) vbp_delete(vt); - } } /*-------------------------------------------------------------------*/ From nils.goroll at uplex.de Mon Jul 15 18:29:03 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 15 Jul 2024 18:29:03 +0000 (UTC) Subject: [master] ab21f4a2c vbp: Rework probe state management Message-ID: <20240715182903.567D910DAE2@lists.varnish-cache.org> commit ab21f4a2c9f580814bd0626371e755ea539c665e Author: Nils Goroll Date: Mon Jun 3 20:00:04 2024 +0200 vbp: Rework probe state management Every time I looked at the probe code, my mind ended up twisted and confused. A probe could change the "enabled" state (tracking the temperature) and be removed at any time (unless the mtx is held), yet the code did not seem to reflect this. We un-twist my mind by completing the transition to probe states and adding a chain of two states for the case that a probe is controlled/deleted while its task is running: cooling: running probe disabled deleted: running probe removed (while cooling only) With this new scheme, we can now have (I think) a clean state diagram (see dot file): - a probe begins in the cold state - from cold, it can either get removed or scheduled via VBP_Control() - from scheduled, it can go back to cold (via VBP_Control()) or be picked up by vbp_thread() to change to running (aka task started) - once the task finishes, it normally goes back to scheduled, but in the meantime it could have changed to cooling or deleted, so vbp_task_comple() hadles these cases and either transitions to cold or deletes the probe - if the task can not be scheduled, the same handling happens We now also remove running probes from the binheap to remove complexity. Fixes #4108 for good diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c index 556d318c0..feb9b91a4 100644 --- a/bin/varnishd/cache/cache_backend_probe.c +++ b/bin/varnishd/cache/cache_backend_probe.c @@ -64,6 +64,7 @@ struct vbp_state { VBP_STATE(scheduled); VBP_STATE(running); VBP_STATE(cold); +VBP_STATE(cooling); VBP_STATE(deleted); #undef VBP_STATE @@ -95,7 +96,6 @@ struct vbp_target { vtim_real due; const struct vbp_state *state; - int running; int heap_idx; struct pool_task task[1]; }; @@ -444,6 +444,33 @@ vbp_heap_insert(struct vbp_target *vt) /*-------------------------------------------------------------------- */ +/* + * called when a task was successful or could not get scheduled + * returns non-NULL if target is to be deleted (outside mtx) + */ +static struct vbp_target * +vbp_task_complete(struct vbp_target *vt) +{ + CHECK_OBJ_NOTNULL(vt, VBP_TARGET_MAGIC); + + Lck_AssertHeld(&vbp_mtx); + + assert(vt->heap_idx == VBH_NOIDX); + + if (vt->state == vbp_state_running) { + vt->state = vbp_state_scheduled; + vt->due = VTIM_real() + vt->interval; + vbp_heap_insert(vt); + vt = NULL; + } else if (vt->state == vbp_state_cooling) { + vt->state = vbp_state_cold; + vt = NULL; + } else if (vt->state != vbp_state_deleted) { + WRONG(vt->state->name); + } + return (vt); +} + static void v_matchproto_(task_func_t) vbp_task(struct worker *wrk, void *priv) { @@ -452,7 +479,6 @@ vbp_task(struct worker *wrk, void *priv) CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); CAST_OBJ_NOTNULL(vt, priv, VBP_TARGET_MAGIC); - AN(vt->running); AN(vt->req); assert(vt->req_len > 0); @@ -462,20 +488,11 @@ vbp_task(struct worker *wrk, void *priv) VBP_Update_Backend(vt); Lck_Lock(&vbp_mtx); - if (vt->running < 0) { - assert(vt->state == vbp_state_deleted); - vbp_delete(vt); - } else { - assert(vt->state == vbp_state_running); - vt->running = 0; - if (vt->heap_idx != VBH_NOIDX) { - vt->due = VTIM_real() + vt->interval; - VBH_delete(vbp_heap, vt->heap_idx); - vbp_heap_insert(vt); - } - vt->state = vbp_state_scheduled; - } + vt = vbp_task_complete(vt); Lck_Unlock(&vbp_mtx); + if (vt == NULL) + return; + vbp_delete(vt); } /*-------------------------------------------------------------------- @@ -502,26 +519,26 @@ vbp_thread(struct worker *wrk, void *priv) vt = NULL; (void)Lck_CondWaitUntil(&vbp_cond, &vbp_mtx, nxt); } else { + assert(vt->state == vbp_state_scheduled); VBH_delete(vbp_heap, vt->heap_idx); - vt->due = now + vt->interval; - VBH_insert(vbp_heap, vt); - if (!vt->running) { - assert(vt->state == vbp_state_scheduled); - vt->state = vbp_state_running; - vt->running = 1; - vt->task->func = vbp_task; - vt->task->priv = vt; - Lck_Unlock(&vbp_mtx); - r = Pool_Task_Any(vt->task, TASK_QUEUE_REQ); - Lck_Lock(&vbp_mtx); - if (r) { - vt->running = 0; - vt->state = vbp_state_scheduled; - } - } - else - assert(vt->state == vbp_state_running); + vt->state = vbp_state_running; + vt->task->func = vbp_task; + vt->task->priv = vt; + Lck_Unlock(&vbp_mtx); + r = Pool_Task_Any(vt->task, TASK_QUEUE_REQ); + + Lck_Lock(&vbp_mtx); + if (r == 0) + continue; + vt = vbp_task_complete(vt); + if (vt == NULL) + continue; + Lck_Unlock(&vbp_mtx); + + vbp_delete(vt); + + Lck_Lock(&vbp_mtx); } } NEEDLESS(Lck_Unlock(&vbp_mtx)); @@ -681,18 +698,24 @@ VBP_Control(const struct backend *be, int enable) Lck_Lock(&vbp_mtx); if (enable) { - // XXX next two assertions are WRONG, see #4108 - WIP - assert(vt->state == vbp_state_cold); - assert(vt->heap_idx == VBH_NOIDX); - vt->due = VTIM_real(); - vbp_heap_insert(vt); - vt->state = vbp_state_scheduled; + if (vt->state == vbp_state_cooling) { + vt->state = vbp_state_running; + } else if (vt->state == vbp_state_cold) { + assert(vt->heap_idx == VBH_NOIDX); + vt->due = VTIM_real(); + vbp_heap_insert(vt); + vt->state = vbp_state_scheduled; + } else + WRONG(vt->state->name); } else { - assert(vt->state == vbp_state_scheduled || - vt->state == vbp_state_running); - assert(vt->heap_idx != VBH_NOIDX); - VBH_delete(vbp_heap, vt->heap_idx); - vt->state = vbp_state_cold; + if (vt->state == vbp_state_running) { + vt->state = vbp_state_cooling; + } else if (vt->state == vbp_state_scheduled) { + assert(vt->heap_idx != VBH_NOIDX); + VBH_delete(vbp_heap, vt->heap_idx); + vt->state = vbp_state_cold; + } else + WRONG(vt->state->name); } Lck_Unlock(&vbp_mtx); } @@ -740,16 +763,9 @@ VBP_Remove(struct backend *be) be->sick = 1; be->probe = NULL; vt->backend = NULL; - if (vt->running) { - assert(vt->state == vbp_state_running); - // task scheduled, it calls vbp_delete() - vt->running = -1; - vt = NULL; + if (vt->state == vbp_state_cooling) { vt->state = vbp_state_deleted; - } else if (vt->heap_idx != VBH_NOIDX) { - assert(vt->state == vbp_state_scheduled); - // task done, not yet rescheduled - VBH_delete(vbp_heap, vt->heap_idx); + vt = NULL; } else assert(vt->state == vbp_state_cold); Lck_Unlock(&vbp_mtx); @@ -763,18 +779,11 @@ static int v_matchproto_(vbh_cmp_t) vbp_cmp(void *priv, const void *a, const void *b) { const struct vbp_target *aa, *bb; - int ar, br; AZ(priv); CAST_OBJ_NOTNULL(aa, a, VBP_TARGET_MAGIC); CAST_OBJ_NOTNULL(bb, b, VBP_TARGET_MAGIC); - ar = aa->running == 0; - br = bb->running == 0; - - if (ar != br) - return (ar); - return (aa->due < bb->due); } diff --git a/doc/graphviz/cache_backend_probe.dot b/doc/graphviz/cache_backend_probe.dot index 937eb6c0f..9f289b162 100644 --- a/doc/graphviz/cache_backend_probe.dot +++ b/doc/graphviz/cache_backend_probe.dot @@ -5,31 +5,33 @@ digraph cache_backend_probe { scheduled running cold - deleted + cooling # going cold while task runs + deleted # from cooling, removed while task runs FREE edge [fontname=Courier] - edge [label="vbp_task()"] - deleted -> FREE + # via vbp_task() or vbp_thread() scheduling error + edge [label="vbp_task_complete()"] running -> scheduled + cooling -> cold + deleted -> FREE edge [label="vbp_thread()"] scheduled -> running - edge [label="vbp_thread() error"] - scheduled -> scheduled - - edge [label="VBP_Control()"] + edge [label="VBP_Control(enable)"] + cooling -> running cold -> scheduled + + edge [label="VBP_Control(disable)"] + running -> cooling scheduled -> cold - running -> cold edge [label="VBP_Insert()"] ALLOC -> cold edge [label="VBP_Remove()"] - running -> deleted # should not happen. we should go through some cool first - scheduled -> FREE # This should not happen. VBP_Control should have set cold + cooling -> deleted cold -> FREE } From nils.goroll at uplex.de Mon Jul 15 18:29:03 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 15 Jul 2024 18:29:03 +0000 (UTC) Subject: [master] fc2445bf7 vbp: Rename vbp_thread to vbp_scheduler for clarity Message-ID: <20240715182903.6F58F10DAE6@lists.varnish-cache.org> commit fc2445bf7107e85ff977f7db49067d695d71ebf9 Author: Nils Goroll Date: Tue Jul 9 15:52:50 2024 +0200 vbp: Rename vbp_thread to vbp_scheduler for clarity This background thread does not run the actual probes, but schedules tasks which do (vbp_task). Rename suggested by phk. diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c index feb9b91a4..ee9f98bfc 100644 --- a/bin/varnishd/cache/cache_backend_probe.c +++ b/bin/varnishd/cache/cache_backend_probe.c @@ -499,7 +499,7 @@ vbp_task(struct worker *wrk, void *priv) */ static void * v_matchproto_(bgthread_t) -vbp_thread(struct worker *wrk, void *priv) +vbp_scheduler(struct worker *wrk, void *priv) { vtim_real now, nxt; struct vbp_target *vt; @@ -808,5 +808,5 @@ VBP_Init(void) vbp_heap = VBH_new(NULL, vbp_cmp, vbp_update); AN(vbp_heap); PTOK(pthread_cond_init(&vbp_cond, NULL)); - WRK_BgThread(&thr, "backend-poller", vbp_thread, NULL); + WRK_BgThread(&thr, "backend-probe-scheduler", vbp_scheduler, NULL); } From nils.goroll at uplex.de Wed Jul 17 11:25:05 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 17 Jul 2024 11:25:05 +0000 (UTC) Subject: [master] 32850ee59 cache_fetch: remove superfluous assignment Message-ID: <20240717112505.96678115063@lists.varnish-cache.org> commit 32850ee59cdfd6e35b6ca64367c5f6e05fbac8d2 Author: Nils Goroll Date: Wed Jul 17 13:22:27 2024 +0200 cache_fetch: remove superfluous assignment oc already gets assigned at the top of the function. Ref b6d1f73b232133c6d4731644e6d2d606ea3a17f2 diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c index 914ce8010..34fbb3ae4 100644 --- a/bin/varnishd/cache/cache_fetch.c +++ b/bin/varnishd/cache/cache_fetch.c @@ -597,7 +597,6 @@ vbf_stp_fetchbody(struct worker *wrk, struct busyobj *bo) } AZ(vfc->failed); l = est; - oc = bo->fetch_objcore; if (oc->boc->transit_buffer > 0) l = vmin_t(ssize_t, l, oc->boc->transit_buffer); assert(l >= 0); From nils.goroll at uplex.de Mon Jul 22 14:01:12 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 22 Jul 2024 14:01:12 +0000 (UTC) Subject: [master] 7292f923d Move implementation of transit_buffer to the storage engine Message-ID: <20240722140112.B1004119979@lists.varnish-cache.org> commit 7292f923dc731292226782a898a804f880567707 Author: Nils Goroll Date: Wed Jul 17 14:56:42 2024 +0200 Move implementation of transit_buffer to the storage engine By the same argument as #4056, storage engines might be able to make a better decision on the layout of object bodies if they know the expected length upfront, thus the transit buffer limit should be implmented at the storage layer. diff --git a/bin/varnishd/cache/cache_fetch.c b/bin/varnishd/cache/cache_fetch.c index 34fbb3ae4..ef11def8f 100644 --- a/bin/varnishd/cache/cache_fetch.c +++ b/bin/varnishd/cache/cache_fetch.c @@ -597,8 +597,6 @@ vbf_stp_fetchbody(struct worker *wrk, struct busyobj *bo) } AZ(vfc->failed); l = est; - if (oc->boc->transit_buffer > 0) - l = vmin_t(ssize_t, l, oc->boc->transit_buffer); assert(l >= 0); if (VFP_GetStorage(vfc, &l, &ptr) != VFP_OK) { bo->htc->doclose = SC_RX_BODY; diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 68c780552..9c720aeb4 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -473,11 +473,15 @@ sml_getspace(struct worker *wrk, struct objcore *oc, ssize_t *sz, struct storage *st; CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); + CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); + CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); AN(sz); AN(ptr); if (*sz == 0) *sz = cache_param->fetch_chunksize; assert(*sz > 0); + if (oc->boc->transit_buffer > 0) + *sz = vmin_t(ssize_t, *sz, oc->boc->transit_buffer); o = sml_getobj(wrk, oc); CHECK_OBJ_NOTNULL(o, OBJECT_MAGIC); From nils.goroll at uplex.de Mon Jul 29 16:42:07 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 29 Jul 2024 16:42:07 +0000 (UTC) Subject: [master] d43e225d7 Size pool name buffer for the worst case Message-ID: <20240729164207.423521062A1@lists.varnish-cache.org> commit d43e225d7216ab7f90cd8c2dc7b189a69a7e73e4 Author: Nils Goroll Date: Mon Jul 29 18:39:10 2024 +0200 Size pool name buffer for the worst case "999 pools ought to be enough for everyone" yet the actual maximum is UINT_MAX. Noticed while staring at #4138 diff --git a/bin/varnishd/cache/cache_session.c b/bin/varnishd/cache/cache_session.c index 898242da3..2a8229e5d 100644 --- a/bin/varnishd/cache/cache_session.c +++ b/bin/varnishd/cache/cache_session.c @@ -702,7 +702,7 @@ SES_Rel(struct sess *sp) void SES_NewPool(struct pool *pp, unsigned pool_no) { - char nb[8]; + char nb[4 /* "sess" */ + 10 /* "%u" */ + 1]; CHECK_OBJ_NOTNULL(pp, POOL_MAGIC); bprintf(nb, "req%u", pool_no); From nils.goroll at uplex.de Wed Jul 31 12:51:05 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 31 Jul 2024 12:51:05 +0000 (UTC) Subject: [master] 4e2c50da9 Serialize VCL temperature transitions with VRT_AddDirector() Message-ID: <20240731125105.AD97110ED86@lists.varnish-cache.org> commit 4e2c50da9b4b66a39191dcc7ea11bc4e3d11ab6c Author: Nils Goroll Date: Wed Jul 31 14:20:30 2024 +0200 Serialize VCL temperature transitions with VRT_AddDirector() VRT_AddDirector() reads the temperature under vcl_mtx and only warms up a newly created director for a warm VCL. vcl_set_state, when transitioning to a cold temperature, implicitly assumes that all directors are warm, as it sends a cold event after changing the temperature. Before this commit, we had no guarantee which temperature VRT_AddDirector() would read while racing with vcl_set_state, it could really be any except VCL_TEMP_INIT. By adding this serialization on vcl_mtx, we make sure that the critical region of VRT_AddDirector executes either before the temperature change or after. If before, a warm event is generated for the newly added director, followed by a cold event from vcl_set_state. If after, VRT_AddDirector() should find the vcl temperature as COOLING and not add a backend (backround: VRT_AddDirector() should only be called while holding a vcl reference via VRT_VCL_Prevent_{Cold,Discard}()). The second change, to also momentarily hold vcl_mtx for the transition to a WARM vcl, is not to prevent a known race, but rather to ensure visibility of the new temperature across all threads. I _suspect_ that this change might also fix https://github.com/nigoroll/libvmod-dynamic/issues/117 , but due to lack of a reproducer, this is speculative. Besides the visibility issue, another potential reason is that vcl_set_state could also race VCL_Rel() (likely called via VRT_VCL_Allow_{Cold,Discard}()). diff --git a/bin/varnishd/cache/cache_vcl.c b/bin/varnishd/cache/cache_vcl.c index 897563b09..262b48538 100644 --- a/bin/varnishd/cache/cache_vcl.c +++ b/bin/varnishd/cache/cache_vcl.c @@ -602,8 +602,10 @@ vcl_set_state(struct vcl *vcl, const char *state, struct vsb **msg) if (vcl->temp == VCL_TEMP_COLD) break; if (vcl->busy == 0 && vcl->temp->is_warm) { + Lck_Lock(&vcl_mtx); vcl->temp = VTAILQ_EMPTY(&vcl->ref_list) ? VCL_TEMP_COLD : VCL_TEMP_COOLING; + Lck_Unlock(&vcl_mtx); vcl_BackendEvent(vcl, VCL_EVENT_COLD); AZ(vcl_send_event(vcl, VCL_EVENT_COLD, msg)); AZ(*msg); @@ -627,7 +629,9 @@ vcl_set_state(struct vcl *vcl, const char *state, struct vsb **msg) i = -1; } else { + Lck_Lock(&vcl_mtx); vcl->temp = VCL_TEMP_WARM; + Lck_Unlock(&vcl_mtx); i = vcl_send_event(vcl, VCL_EVENT_WARM, msg); if (i == 0) { vcl_BackendEvent(vcl, VCL_EVENT_WARM); From nils.goroll at uplex.de Wed Jul 31 13:38:04 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 31 Jul 2024 13:38:04 +0000 (UTC) Subject: [master] a19aeb455 Forgotten static declaration Message-ID: <20240731133804.A9CA1110613@lists.varnish-cache.org> commit a19aeb455c5cc46f0c73348f71f4499bb2379425 Author: Nils Goroll Date: Wed Jul 31 15:37:18 2024 +0200 Forgotten static declaration found by Flexelint diff --git a/bin/varnishd/cache/cache_backend_probe.c b/bin/varnishd/cache/cache_backend_probe.c index ee9f98bfc..04cbc4bfe 100644 --- a/bin/varnishd/cache/cache_backend_probe.c +++ b/bin/varnishd/cache/cache_backend_probe.c @@ -60,7 +60,7 @@ struct vbp_state { const char *name; }; -#define VBP_STATE(n) const struct vbp_state vbp_state_ ## n [1] = {{ .name = #n }} +#define VBP_STATE(n) static const struct vbp_state vbp_state_ ## n [1] = {{ .name = #n }} VBP_STATE(scheduled); VBP_STATE(running); VBP_STATE(cold); From nils.goroll at uplex.de Wed Jul 31 13:48:04 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 31 Jul 2024 13:48:04 +0000 (UTC) Subject: [master] bf3785143 Flexelint: enum instead of int Message-ID: <20240731134804.CD862110CE5@lists.varnish-cache.org> commit bf3785143b14d50d2b4b275426eb97c7e49c80b5 Author: Nils Goroll Date: Wed Jul 31 15:46:44 2024 +0200 Flexelint: enum instead of int Ref b37b3c3d33d063560105be47bed71cf84beccc55 diff --git a/bin/varnishd/cache/cache_esi_fetch.c b/bin/varnishd/cache/cache_esi_fetch.c index e80fee3d0..465b432bd 100644 --- a/bin/varnishd/cache/cache_esi_fetch.c +++ b/bin/varnishd/cache/cache_esi_fetch.c @@ -68,7 +68,7 @@ vfp_vep_callback(struct vfp_ctx *vc, void *priv, ssize_t l, enum vgz_flag flg) ssize_t dl; const void *dp; uint8_t *ptr; - int i; + enum vgzret_e i; CHECK_OBJ_NOTNULL(vc, VFP_CTX_MAGIC); CAST_OBJ_NOTNULL(vef, priv, VEF_MAGIC); From nils.goroll at uplex.de Wed Jul 31 17:22:05 2024 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 31 Jul 2024 17:22:05 +0000 (UTC) Subject: [master] 8cf35474e Mark debug flags which should be implemented differently Message-ID: <20240731172205.960E9117853@lists.varnish-cache.org> commit 8cf35474e393912af1aecfae35bb4f660f9a4a1c Author: Nils Goroll Date: Wed Jul 31 19:17:31 2024 +0200 Mark debug flags which should be implemented differently I somehow missed the additional SLOW_BEREQ getting in (could be that this was during my vacation). I think we should avoid adding behavioural changes by special cased debug flags whenever we have better options, and in this case the better option are filters. Which, in this case, would come with #4035, a PR which ironically now needs additional attention just because of these changes. diff --git a/include/tbl/debug_bits.h b/include/tbl/debug_bits.h index 869399a6f..cbe11a487 100644 --- a/include/tbl/debug_bits.h +++ b/include/tbl/debug_bits.h @@ -41,7 +41,7 @@ DEBUG_BIT(HASHEDGE, hashedge, "Edge cases in Hash") DEBUG_BIT(VCLREL, vclrel, "Rapid VCL release") DEBUG_BIT(LURKER, lurker, "VSL Ban lurker") DEBUG_BIT(ESI_CHOP, esi_chop, "Chop ESI fetch to bits") -DEBUG_BIT(FLUSH_HEAD, flush_head, "Flush after http1 head") +DEBUG_BIT(FLUSH_HEAD, flush_head, "Flush after http1 head") // XXX -> filter DEBUG_BIT(VTC_MODE, vtc_mode, "Varnishtest Mode") DEBUG_BIT(WITNESS, witness, "Emit WITNESS lock records") DEBUG_BIT(VSM_KEEP, vsm_keep, "Keep the VSM file on restart") @@ -52,7 +52,7 @@ DEBUG_BIT(PROCESSORS, processors, "Fetch/Deliver processors") DEBUG_BIT(PROTOCOL, protocol, "Protocol debugging") DEBUG_BIT(VCL_KEEP, vcl_keep, "Keep VCL C and so files") DEBUG_BIT(LCK, lck, "Additional lock statistics") -DEBUG_BIT(SLOW_BEREQ, slow_bereq, "Slow down bereq") +DEBUG_BIT(SLOW_BEREQ, slow_bereq, "Slow down bereq") // XXX -> filter #undef DEBUG_BIT /*lint -restore */