From nils.goroll at uplex.de Wed Jul 2 07:43:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 2 Jul 2025 07:43:05 +0000 (UTC) Subject: [master] 1da146080 Update vtest2 Message-ID: <20250702074305.A9B9911C96A@lists.varnish-cache.org> commit 1da14608086131c56ea5c52c24191f91cbfd936d Author: Nils Goroll Date: Wed Jul 2 09:42:22 2025 +0200 Update vtest2 diff --git a/bin/varnishtest/vtest2 b/bin/varnishtest/vtest2 index 4cffb5a3c..63b53d697 160000 --- a/bin/varnishtest/vtest2 +++ b/bin/varnishtest/vtest2 @@ -1 +1 @@ -Subproject commit 4cffb5a3c49e270305c20fc9824a213193ef26af +Subproject commit 63b53d697f264754792845fa1974cb1be55e3344 From nils.goroll at uplex.de Wed Jul 2 08:50:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 2 Jul 2025 08:50:04 +0000 (UTC) Subject: [master] 337e95a1d vmod_std: remove type2type functions Message-ID: <20250702085004.EEB1A11EE14@lists.varnish-cache.org> commit 337e95a1de25884cfc135a9447048c918ff4bd63 Author: Nils Goroll Date: Wed Jul 2 10:48:38 2025 +0200 vmod_std: remove type2type functions Ref: #3352 diff --git a/bin/varnishtest/tests/r01532.vtc b/bin/varnishtest/tests/r01532.vtc index 271e51e26..5bb4b743b 100644 --- a/bin/varnishtest/tests/r01532.vtc +++ b/bin/varnishtest/tests/r01532.vtc @@ -9,7 +9,7 @@ varnish v1 -vcl+backend { import std; sub vcl_deliver { - set resp.http.x-foo = std.real2time(1140618699.00, now); + set resp.http.x-foo = std.time(real=1140618699.00, fallback=now); } } -start diff --git a/bin/varnishtest/tests/r03308.vtc b/bin/varnishtest/tests/r03308.vtc index 83355f935..dc98ca904 100644 --- a/bin/varnishtest/tests/r03308.vtc +++ b/bin/varnishtest/tests/r03308.vtc @@ -11,10 +11,9 @@ varnish v1 -vcl+backend { import std; sub vcl_deliver { - set resp.http.ts = std.real2time( - std.real("999999999999.999", 0) * - std.real("999999999999.999", 0), - now); + set resp.http.ts = std.time( + real=std.real("999999999999.999", 0) * std.real("999999999999.999", 0), + fallback=now); } } -start diff --git a/doc/changes.rst b/doc/changes.rst index 6ee0ed3ff..ca0c5ff58 100644 --- a/doc/changes.rst +++ b/doc/changes.rst @@ -41,6 +41,28 @@ Varnish Cache NEXT (8.0, 2025-09-15) .. PLEASE keep this roughly in commit order as shown by git-log / tig (new to old) +* The VMOD functions ``std.real2integer()``, ``std.real2time()``, + ``std.time2integer()`` and ``std.time2real()`` have been removed. They had + been marked deprecated since Varnish Cache release 6.2.0 (2019-03-15). + + The plug-in replacements for these functions are: + + * ``std.real2integer()``:: + + std.integer(real=std.round(...), fallback=...) + + * ``std.real2time()``:: + + std.time(real=std.round(...), fallback=...) + + * ``std.time2integer()``:: + + std.integer(time=..., fallback=...) + + * ``std.time2real()``:: + + std.real(time=..., fallback=...) + * The bundled varnishtest sources have now been replaced with the seperate VTest2 repository. diff --git a/vmod/tests/std_b00009.vtc b/vmod/tests/std_b00009.vtc index 47ee311fe..eea455a12 100644 --- a/vmod/tests/std_b00009.vtc +++ b/vmod/tests/std_b00009.vtc @@ -1,4 +1,4 @@ -varnishtest "Test real2integer, real2time, time2integer and time2real in std" +varnishtest "Test conversion functions in std" server s1 { rxreq @@ -10,14 +10,16 @@ varnish v1 -vcl+backend { sub vcl_deliver { set resp.http.x-foo = std.integer(req.http.foo, 0); - set resp.http.x-bar = std.time2integer(std.real2time( - std.real(resp.http.x-foo, 0.0), now), 1); + set resp.http.x-bar = std.integer(time=std.time( + real=std.real(resp.http.x-foo, 0.0), fallback=now), + fallback=1); - set resp.http.x-baz = std.time2real(std.real2time( - std.real(resp.http.x-foo, 0.0), now), 1.0); + set resp.http.x-baz = std.real(time=std.time( + real=std.real(resp.http.x-foo, 0.0), fallback=now), + fallback=1.0); - set resp.http.x-qux = std.real2integer( - std.real(req.http.foo, 2.0), 2); + set resp.http.x-qux = std.integer( + real=std.real(req.http.foo, 2.0), fallback=2); set resp.http.x-xyzzy1 = std.integer( s=std.real(req.http.foo, 2.0)); @@ -25,7 +27,9 @@ varnish v1 -vcl+backend { set resp.http.x-xyzzy2 = std.integer( real=std.real(req.http.foo, 2.0)); - set resp.http.x-int-fallback = std.real2integer(123456789012.345 * 1000.0 * 10, 2); + set resp.http.x-int-fallback = std.integer( + real=123456789012.345 * 1000.0 * 10, + fallback=2); } } -start diff --git a/vmod/vmod_std.vcc b/vmod/vmod_std.vcc index 2fb65f7bf..87978f615 100644 --- a/vmod/vmod_std.vcc +++ b/vmod/vmod_std.vcc @@ -696,74 +696,6 @@ $Function DURATION timed_call(SUB) Call the given SUB and return a high precision measurement of the execution time. -DEPRECATED functions -==================== - -$Function INT real2integer(REAL r, INT fallback) - -**DEPRECATED**: This function will be removed in a future version of -varnish, use `std.integer()`_ with a *real* argument and the -`std.round()`_ function instead, for example:: - - std.integer(real=std.round(...), fallback=...) - -Rounds the real *r* to the nearest integer, but round halfway cases -away from zero (see `round(3)`). If conversion fails, *fallback* will -be returned. - -Examples:: - - set req.http.integer = std.real2integer(1140618699.00, 0); - set req.http.posone = real2integer( 0.5, 0); # = 1.0 - set req.http.negone = real2integer(-0.5, 0); # = -1.0 - -$Function TIME real2time(REAL r, TIME fallback) - -**DEPRECATED**: This function will be removed in a future version of -varnish, use `std.time()`_ with a *real* argument and the -`std.round()`_ function instead, for example:: - - std.time(real=std.round(...), fallback=...) - -Rounds the real *r* to the nearest integer (see -`std.real2integer()`_) and returns the corresponding time when -interpreted as a unix epoch. If conversion fails, *fallback* will be -returned. - -Example:: - - set req.http.time = std.real2time(1140618699.00, now); - -$Function INT time2integer(TIME t, INT fallback) - -**DEPRECATED**: This function will be removed in a future version of -varnish, use `std.integer()`_ with a *time* argument instead, for -example:: - - std.integer(time=..., fallback=...) - -Converts the time *t* to a integer. If conversion fails, -*fallback* will be returned. - -Example:: - - set req.http.int = std.time2integer(now, 0); - -$Function REAL time2real(TIME t, REAL fallback) - -**DEPRECATED**: This function will be removed in a future version of -varnish, use `std.real()`_ with a *time* argument instead, for -example:: - - std.real(time=..., fallback=...) - -Converts the time *t* to a real. If conversion fails, *fallback* will -be returned. - -Example:: - - set req.http.real = std.time2real(now, 1.0); - SEE ALSO ======== diff --git a/vmod/vmod_std_conversions.c b/vmod/vmod_std_conversions.c index f8ef8d936..2965f24c2 100644 --- a/vmod/vmod_std_conversions.c +++ b/vmod/vmod_std_conversions.c @@ -353,55 +353,3 @@ vmod_strftime(VRT_CTX, VCL_TIME t, VCL_STRING fmt) WS_Release(ctx->ws, r); return (s); } - -/* These functions are deprecated as of 2019-03-15 release */ - -VCL_INT v_matchproto_(td_std_real2integer) -vmod_real2integer(VRT_CTX, VCL_REAL r, VCL_INT i) -{ - VCL_INT retval; - - CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - - if (!VRT_REAL_is_valid(r)) - return (i); - retval = (VCL_INT)round(r); - if (!VRT_INT_is_valid(retval)) - return (i); - return (retval); -} - -VCL_TIME v_matchproto_(td_std_real2time) -vmod_real2time(VRT_CTX, VCL_REAL r, VCL_TIME t) -{ - CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - - if (!isfinite(r)) - return (t); - - return (r); -} - -VCL_INT v_matchproto_(td_std_time2integer) -vmod_time2integer(VRT_CTX, VCL_TIME t, VCL_INT i) -{ - CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - - if (!isfinite(t)) - return (i); - t = round(t); - if (t > VRT_INTEGER_MAX || t < VRT_INTEGER_MIN) - return (i); - return ((VCL_INT)t); -} - -VCL_REAL v_matchproto_(td_std_time2real) -vmod_time2real(VRT_CTX, VCL_TIME t, VCL_REAL r) -{ - CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); - - if (!isfinite(t)) - return (r); - - return (t); -} From nils.goroll at uplex.de Fri Jul 4 17:04:03 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:03 +0000 (UTC) Subject: [master] ce9e52123 cache_obj: refactor: pull out extension signal to client Message-ID: <20250704170403.4790711A909@lists.varnish-cache.org> commit ce9e5212325050bc1f4e2c6eef42fde3ace48613 Author: Nils Goroll Date: Sun Sep 29 10:44:06 2024 +0200 cache_obj: refactor: pull out extension signal to client diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c index 9ffa373f3..aa4ef2ac4 100644 --- a/bin/varnishd/cache/cache_obj.c +++ b/bin/varnishd/cache/cache_obj.c @@ -258,6 +258,17 @@ ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) /*==================================================================== */ +static inline void +objSignalFetchLocked(const struct objcore *oc, uint64_t l) +{ + if (oc->boc->transit_buffer > 0) { + assert(oc->flags & OC_F_TRANSIENT); + /* Signal the new client position */ + oc->boc->delivered_so_far = l; + PTOK(pthread_cond_signal(&oc->boc->cond)); + } +} + uint64_t ObjWaitExtend(const struct worker *wrk, const struct objcore *oc, uint64_t l, enum boc_state_e *statep) @@ -272,13 +283,8 @@ ObjWaitExtend(const struct worker *wrk, const struct objcore *oc, uint64_t l, while (1) { rv = oc->boc->fetched_so_far; assert(l <= rv || oc->boc->state == BOS_FAILED); - if (oc->boc->transit_buffer > 0) { - assert(oc->flags & OC_F_TRANSIENT); - /* Signal the new client position */ - oc->boc->delivered_so_far = l; - PTOK(pthread_cond_signal(&oc->boc->cond)); - } state = oc->boc->state; + objSignalFetchLocked(oc, l); if (rv > l || state >= BOS_FINISHED) break; (void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); From nils.goroll at uplex.de Fri Jul 4 17:04:03 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:03 +0000 (UTC) Subject: [master] 3ed720f74 cache_obj: Add a more generic boc extension notification facility Message-ID: <20250704170403.A8DAC11A90C@lists.varnish-cache.org> commit 3ed720f74e50e24c561ce5bcd604785e4bf18063 Author: Nils Goroll Date: Sun Sep 29 10:47:13 2024 +0200 cache_obj: Add a more generic boc extension notification facility This commit prepares a more generic busy object extension notification facility in preparation of the asynchronous iteration facility introduced with the next commit. It makes more sense when looked at in context of that, but the changes constitute a fairly independent part and thus have been separated. Background To support streaming of busy objects (delivery to a client while the body is being fetched), the Object API provides ObjWaitExtend(), which is called by storage iterators to learn the available amount of body data and to wait for more if all available data has been processed (= sent to the client, usually). The other end of the facility is ObjExtend(), which is called by the fetch side of storage to update the available amount of body data and wake up any clients blocking in ObjWaitExtend(). This facility recently got extended by a blocking operation in the other direction, where the writing side blocks if the amount of unsent data exceeds the amount configured via the transit_buffer. Why this change? The existing facility is based on the model of blocking threads. In order to support asynchronous iterators, where a single thread may serve multiple requests, we need a different, non-blocking model with notifications. Implementation The basic implementation idea is to introduce a variant of ObjWaitExtend() which, rather than blocking on a condition variable, registers a callback function to be called when the condition variable got signalled. This is ObjVAIGetExtend(): It returns the updated extension, if available, _or_ registers the callback. To implement the actual callback, we add to struct boc a queue (struct vai_q_head), whose elements are basically the notification callback with two pointers: the caller gets a private pointer as well as vai_hdl is an opaque handle owned by storage. ObjExtend() now also works the list of registered callbacks. ObjVAICancel() removes a callback when the caller is no longer interested or needs to reclaim the queue entry. diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 3b01a7ac7..e92959f43 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -282,6 +282,10 @@ enum boc_state_e { #include "tbl/boc_state.h" }; +// cache_obj.h vai notify +struct vai_qe; +VSLIST_HEAD(vai_q_head, vai_qe); + struct boc { unsigned magic; #define BOC_MAGIC 0x70c98476 @@ -294,6 +298,7 @@ struct boc { uint64_t fetched_so_far; uint64_t delivered_so_far; uint64_t transit_buffer; + struct vai_q_head vai_q_head; }; /* Object core structure --------------------------------------------- @@ -761,6 +766,15 @@ int ObjGetDouble(struct worker *, struct objcore *, enum obj_attr, double *); int ObjGetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t *); int ObjCheckFlag(struct worker *, struct objcore *, enum obj_flags of); +/*==================================================================== + * ObjVAI...(): Asynchronous Iteration + * + * see comments in cache_obj.c for usage + */ + +typedef void *vai_hdl; +typedef void vai_notify_cb(vai_hdl, void *priv); + /* cache_req_body.c */ ssize_t VRB_Iterate(struct worker *, struct vsl_log *, struct req *, objiterate_f *func, void *priv); diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c index aa4ef2ac4..c5f2e54fc 100644 --- a/bin/varnishd/cache/cache_obj.c +++ b/bin/varnishd/cache/cache_obj.c @@ -231,6 +231,29 @@ obj_extend_condwait(const struct objcore *oc) (void)Lck_CondWait(&oc->boc->cond, &oc->boc->mtx); } +// notify of an extension of the boc or state change +//lint -sem(obj_boc_notify_Unlock, thread_unlock) + +static void +obj_boc_notify_Unlock(struct boc *boc) +{ + struct vai_qe *qe, *next; + + PTOK(pthread_cond_broadcast(&boc->cond)); + qe = VSLIST_FIRST(&boc->vai_q_head); + VSLIST_FIRST(&boc->vai_q_head) = NULL; + while (qe != NULL) { + CHECK_OBJ(qe, VAI_Q_MAGIC); + AN(qe->flags & VAI_QF_INQUEUE); + qe->flags &= ~VAI_QF_INQUEUE; + next = VSLIST_NEXT(qe, list); + VSLIST_NEXT(qe, list) = NULL; + qe->cb(qe->hdl, qe->priv); + qe = next; + } + Lck_Unlock(&boc->mtx); +} + void ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) { @@ -241,14 +264,13 @@ ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) AN(om->objextend); assert(l >= 0); - Lck_Lock(&oc->boc->mtx); if (l > 0) { + Lck_Lock(&oc->boc->mtx); obj_extend_condwait(oc); om->objextend(wrk, oc, l); oc->boc->fetched_so_far += l; - PTOK(pthread_cond_broadcast(&oc->boc->cond)); + obj_boc_notify_Unlock(oc->boc); } - Lck_Unlock(&oc->boc->mtx); assert(oc->boc->state < BOS_FINISHED); if (final && om->objtrimstore != NULL) @@ -294,6 +316,51 @@ ObjWaitExtend(const struct worker *wrk, const struct objcore *oc, uint64_t l, *statep = state; return (rv); } + +// get a new extension _or_ register a notification +uint64_t +ObjVAIGetExtend(struct worker *wrk, const struct objcore *oc, uint64_t l, + enum boc_state_e *statep, struct vai_qe *qe) +{ + enum boc_state_e state; + uint64_t rv; + + (void) wrk; + CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); + CHECK_OBJ_NOTNULL(oc->boc, BOC_MAGIC); + CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); + Lck_Lock(&oc->boc->mtx); + rv = oc->boc->fetched_so_far; + assert(l <= rv || oc->boc->state == BOS_FAILED); + state = oc->boc->state; + objSignalFetchLocked(oc, l); + if (l == rv && state < BOS_FINISHED && + (qe->flags & VAI_QF_INQUEUE) == 0) { + qe->flags |= VAI_QF_INQUEUE; + VSLIST_INSERT_HEAD(&oc->boc->vai_q_head, qe, list); + } + Lck_Unlock(&oc->boc->mtx); + if (statep != NULL) + *statep = state; + return (rv); +} + +void +ObjVAICancel(struct worker *wrk, struct boc *boc, struct vai_qe *qe) +{ + + (void) wrk; + CHECK_OBJ_NOTNULL(boc, BOC_MAGIC); + CHECK_OBJ_NOTNULL(qe, VAI_Q_MAGIC); + + Lck_Lock(&boc->mtx); + // inefficient, but should be rare + if ((qe->flags & VAI_QF_INQUEUE) != 0) + VSLIST_REMOVE(&boc->vai_q_head, qe, vai_qe, list); + qe->flags = 0; + Lck_Unlock(&boc->mtx); +} + /*==================================================================== */ @@ -319,8 +386,7 @@ ObjSetState(struct worker *wrk, const struct objcore *oc, Lck_Lock(&oc->boc->mtx); oc->boc->state = next; - PTOK(pthread_cond_broadcast(&oc->boc->cond)); - Lck_Unlock(&oc->boc->mtx); + obj_boc_notify_Unlock(oc->boc); } /*==================================================================== diff --git a/bin/varnishd/cache/cache_obj.h b/bin/varnishd/cache/cache_obj.h index 1f936a534..f6ee8618e 100644 --- a/bin/varnishd/cache/cache_obj.h +++ b/bin/varnishd/cache/cache_obj.h @@ -50,6 +50,26 @@ typedef void *objsetattr_f(struct worker *, struct objcore *, enum obj_attr attr, ssize_t len, const void *ptr); typedef void objtouch_f(struct worker *, struct objcore *, vtim_real now); +/* called by Obj/storage to notify that the lease function (vai_lease_f) can be + * called again after a -EAGAIN / -ENOBUFS return value + * NOTE: + * - the callback gets executed by an arbitrary thread + * - WITH the boc mtx held + * so it should never block and be efficient + */ + +/* notify entry added to struct boc::vai_q_head */ +struct vai_qe { + unsigned magic; +#define VAI_Q_MAGIC 0x573e27eb + unsigned flags; +#define VAI_QF_INQUEUE (1U<<0) + VSLIST_ENTRY(vai_qe) list; + vai_notify_cb *cb; + vai_hdl hdl; + void *priv; +}; + struct obj_methods { /* required */ objfree_f *objfree; diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h index c4dc02a48..6d73c04fd 100644 --- a/bin/varnishd/cache/cache_varnishd.h +++ b/bin/varnishd/cache/cache_varnishd.h @@ -348,6 +348,10 @@ void *ObjSetAttr(struct worker *, struct objcore *, enum obj_attr, int ObjCopyAttr(struct worker *, struct objcore *, struct objcore *, enum obj_attr attr); void ObjBocDone(struct worker *, struct objcore *, struct boc **); +// VAI +uint64_t ObjVAIGetExtend(struct worker *, const struct objcore *, uint64_t, + enum boc_state_e *, struct vai_qe *); +void ObjVAICancel(struct worker *, struct boc *, struct vai_qe *); int ObjSetDouble(struct worker *, struct objcore *, enum obj_attr, double); int ObjSetU64(struct worker *, struct objcore *, enum obj_attr, uint64_t); From nils.goroll at uplex.de Fri Jul 4 17:04:03 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:03 +0000 (UTC) Subject: [master] 36cad5428 cache_obj: Add an asynchronous iteration API Message-ID: <20250704170403.CFAC011A910@lists.varnish-cache.org> commit 36cad542831457edf2fb939a886ad0142ef88b7b Author: Nils Goroll Date: Mon Jan 6 22:02:26 2025 +0100 cache_obj: Add an asynchronous iteration API This commit adds a new object iteration API to support asynchronous IO. Background To process object bodies, the Object API so far provides ObjIterate(), which calls a storage specific iterator function. It in turn calls a caller-provided objiterate_f function on individual, contigious segments of data (extents). In turn, objiterate_f gets called with either no flags, or one of OBJ_ITER_FLUSH and OBJ_ITER_END. The storage iterator uses these flags to signal lifetime of the provided entents: They remain valid until a flag is present, so the caller may delay use until an extent is provided with a flag sent. Or, seen from the other end, objiterate_f needs to ensure it does not use any previously received extent when a flag is seen. objiterate_f can not make any assumption as to if or when it is going to be called, if the storage iterator function needs time to retrieve data or a streaming fetch is in progress, then so be it, objiterate_f may eventually get called or not. Or, again seen from the other end, the storage iterator function assumes being called from a thread and may block at any time. Why this change? The model described above is fundamentally incompatible with asynchronous, event driven IO models, where a single thread might serve multiple requests in parallel to benefit from efficiency gains and thus no called function must ever block. This additional API is intended to provide an interface suitable for such asynchronous models. As before, also the asynchronous iterator is owned by a storage specific implementation, but now, instead of using a thread for its state, that state exists in a data structure opaque to the caller. Batching with scatter arrays (VSCARAB) As recapitulated above, the existing objiterate_f works on one buffer at a time, yet even before asynchronous I/O, issuing one system call for each buffer would be inefficient. So, for the case of HTTP/1, the V1L layer collects buffers into an array of io vectors (struct iovec), which are handed over to the kernel using writev(). These arrays of io vectors seem to have no established name even after decades of existence, elsewhere they are called siov or array, so in this API, we are going to call them scatter arrays. With the new API, we use scatter arrays for all the processing steps: The goal is that storage fills a scatter array, which then gets processed and maybe replaced by filters, until finally some transport hands many I/Os at once to the kernel. Established interfaces follow the signature of writev(), they have a pointer to an array of struct iovec and a count (struct iovec *iov, int iovcnt). Yet for our plans, we want to have something which can be passed around in a single unit, to ensure that the array is always used with the right count, something which can vary in size and live on the heap or the stack. This is the VSCARAB (struct vscarab), the Varnish SCatter ARAy of Buffers, basically a container struct with a flexible array member (fam). The VSCARAB has a capacity, a used count, and is annotated with v_counted_by_() such that, when support for bounds checking is improved by compilers, we get additional sanity checks (and possibly optimizations). The flags member of struct vscarab has one used bit so far, VSCARAB_F_END, which is to signal "EOF", like VDP_END. It should be set together with the last bits of data, but can also be set later. We add macros to work on VSCARABs for (bounds) allocation (on the stack and heap), initialization, checking (magic and limits), iterating, and adding elements. VSCARET and VFLA Managing scatter arrays is one side of the coin, when we are done using buffers, we need to return them to storage, such that storage can do LRU things or reuse memory. As before, we want to batch these operations for efficiency. As an easy to use, flexible data structure, we add VSCARABs sibing VSCARET. And, because both are basically the same, we generalize macros as VFLA, Varnish Flexible Arrays. API Usage The basic model for the API is that the storage engine "leases" to the caller a number of extents, which the caller is then free to use until it returns the leases to the storage engine. The storage engine can also signal to the caller that it can not return more extents unless some are returned or that it simply can not return any at this time for other reasons (for example, because it is waiting for data on a streaming fetch). In both cases, the storage engine promises to call the caller's notification function when it is ready to provide more extents or iteration has ended. The API consists of four functions: - ObjVAIinit() requests an asynchronous iteration on an object. The caller provides an optional workspace for the storage engine to use for its state, and the notification callback / private pointer introduced with the previous commit. Its use is explained below. ObjVAIinit() returns either an opaque handle owned jointly by the Object layer in Varnish-Cache and the storage engine, or NULL if the storage engine does not provide asynchronous iteration. All other API functions work on the handle returned by ObjVAIinit(): - ObjVAIlease() returns the next extents from the object body in a caller-prodived VSCARAB. Each extent is a struct viov, which contains a struct iovec (see iovec(3type) / readv(2)) with the actual extent, and an integer identifying the lease. For the VSCARAB containing the last extent and/or any later call (for which the return value is 0), VSCARAB_F_END is set in flags. The "lease" integer (uint64_t) of each viov is opaque to the caller and needs to be returned as-is later, but is guaranteed by storage to be a multiple of 8. This can be used by the caller to temporarily stash a tiny amount of additional state into the lower bits of the lease. ObjVAIlease either returns a positive integer with a number of available leases, zero if no more leases can be provided, or a negative integer for "call again later" and error conditions: -EAGAIN signals that no more data is available at this point, and the storage engine will call the notification function when the condition changes. -ENOBUFS behaves identically, but also requires the caller to return more leases. -EPIPE mirrors BOS_FAILED on the busy object. Any other -(errno) can be used by the storage engine to signal other error conditions. To summarize, the return value is either negative for errors or returns the number of extents _added_ to the VSCARAB. To determine eof, callers must only check the flags member of the VSCARAB for VSCARAB_F_END. - ObjVAIreturn() returns a VSCARET of leases to the storage when the caller is done with them For efficiency, leases of extents which are no longer in use should be collected in a VSCARET and returned using ObjVAIreturn() before any blocking condition. They must be returned when ObjVAIlease() requests so by returning -ENOBUFS and, naturally, when iteration over the object body ends. - ObjVAIfini() finalizes iteration. The handle must not be used thereafter. Implementation One particular aspect of the implementation is that the storage engine returns the "lease", "return" and "fini" functions to be used with the handle. This allows the storage engine to provide functions tailored to the attributes of the storage object, for example streaming fetches require more elaborate handling than settled storage objects. Consequently, the vai_hdl which is, by design, opaque to the caller, is not entirely opaque to the object layer: The implementation requires it to start with a struct vai_hdl_preamble containing the function pointers to be called by ObjVAIlease(), ObjVAIreturn() and ObjVAIfini(). More details about the implementation will become clear with the next commit, which implements SML's synchronous iterator using the new API. diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index e92959f43..118f65a34 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -42,6 +42,7 @@ #include #include #include +#include #include "vdef.h" #include "vrt.h" @@ -775,6 +776,152 @@ int ObjCheckFlag(struct worker *, struct objcore *, enum obj_flags of); typedef void *vai_hdl; typedef void vai_notify_cb(vai_hdl, void *priv); + +/* + * VSCARAB: Varnish SCatter ARAy of Buffers: + * + * an array of viovs, elsewhere also called an siov or sarray + */ +struct viov { + uint64_t lease; + struct iovec iov; +}; + +struct vscarab { + unsigned magic; +#define VSCARAB_MAGIC 0x05ca7ab0 + unsigned flags; +#define VSCARAB_F_END 1 // last viov is last overall + unsigned capacity; + unsigned used; + struct viov s[] v_counted_by_(capacity); +}; + +// VFLA: starting generic container-with-flexible-array-member macros +// aka "struct hack" +// +// type : struct name +// name : a pointer to struct type +// mag : the magic value for this VFLA +// cptr : pointer to container struct (aka "head") +// fam : member name of the flexible array member +// cap : capacity +// +// common properties of all VFLAs: +// - are a miniobj (have magic as the first element) +// - capacity member is the fam capacity +// - used member is the number of fam elements used +// +// VFLA_SIZE ignores the cap == 0 case, we assert in _INIT +// offsetoff ref: https://gustedt.wordpress.com/2011/03/14/flexible-array-member/ +//lint -emacro(413, VFLA_SIZE) +#define VFLA_SIZE(type, fam, cap) (offsetof(struct type, fam) + \ + (cap) * sizeof(((struct type *)0)->fam[0])) +#define VFLA_INIT_(type, cptr, mag, fam, cap, save) do { \ + unsigned save = (cap); \ + AN(save); \ + memset((cptr), 0, VFLA_SIZE(type, fam, save)); \ + (cptr)->magic = (mag); \ + (cptr)->capacity = (save); \ +} while (0) +#define VFLA_INIT(type, cptr, mag, fam, cap) \ + VFLA_INIT_(type, cptr, mag, fam, cap, VUNIQ_NAME(save)) +// declare, allocate and initialize a local VFLA +// the additional VLA buf declaration avoids +// "Variable-sized object may not be initialized" +#define VFLA_LOCAL_(type, name, mag, fam, cap, bufname) \ + char bufname[VFLA_SIZE(type, fam, cap)]; \ + struct type *name = (void *)bufname; \ + VFLA_INIT(type, name, mag, fam, cap) +#define VFLA_LOCAL(type, name, mag, fam, cap) \ + VFLA_LOCAL_(type, name, mag, fam, cap, VUNIQ_NAME(buf)) +// malloc and initialize a VFLA +#define VFLA_ALLOC(type, name, mag, fam, cap) do { \ + (name) = malloc(VFLA_SIZE(type, fam, cap)); \ + if ((name) != NULL) \ + VFLA_INIT(type, name, mag, fam, cap); \ +} while(0) +#define VFLA_FOREACH(var, cptr, fam) \ + for (var = &(cptr)->fam[0]; var < &(cptr)->fam[(cptr)->used]; var++) +// continue iterating after a break of a _FOREACH +#define VFLA_FOREACH_RESUME(var, cptr, fam) \ + for (; var != NULL && var < &(cptr)->fam[(cptr)->used]; var++) +#define VFLA_GET(cptr, fam) ((cptr)->used < (cptr)->capacity ? &(cptr)->fam[(cptr)->used++] : NULL) +// asserts sufficient capacity +#define VFLA_ADD(cptr, fam, val) do { \ + assert((cptr)->used < (cptr)->capacity); \ + (cptr)->fam[(cptr)->used++] = (val); \ +} while(0) + +#define VSCARAB_SIZE(cap) VFLA_SIZE(vscarab, s, cap) +#define VSCARAB_INIT(scarab, cap) VFLA_INIT(vscarab, scarab, VSCARAB_MAGIC, s, cap) +#define VSCARAB_LOCAL(scarab, cap) VFLA_LOCAL(vscarab, scarab, VSCARAB_MAGIC, s, cap) +#define VSCARAB_ALLOC(scarab, cap) VFLA_ALLOC(vscarab, scarab, VSCARAB_MAGIC, s, cap) +#define VSCARAB_FOREACH(var, scarab) VFLA_FOREACH(var, scarab, s) +#define VSCARAB_FOREACH_RESUME(var, scarab) VFLA_FOREACH_RESUME(var, scarab, s) +#define VSCARAB_GET(scarab) VFLA_GET(scarab, s) +#define VSCARAB_ADD(scarab, val) VFLA_ADD(scarab, s, val) +//lint -emacro(64, VSCARAB_ADD_IOV_NORET) weird flexelint bug? +#define VSCARAB_ADD_IOV_NORET(scarab, vec) \ + VSCARAB_ADD(scarab, ((struct viov){.lease = VAI_LEASE_NORET, .iov = (vec)})) +#define VSCARAB_LAST(scarab) (&(scarab)->s[(scarab)->used - 1]) + +#define VSCARAB_CHECK(scarab) do { \ + CHECK_OBJ(scarab, VSCARAB_MAGIC); \ + assert(scarab->used <= scarab->capacity); \ +} while(0) + +#define VSCARAB_CHECK_NOTNULL(scarab) do { \ + AN(scarab); \ + VSCARAB_CHECK(scarab); \ +} while(0) + +/* + * VSCARET: Varnish SCatter Array Return + * + * an array of leases obtained from a vscarab + */ + +struct vscaret { + unsigned magic; +#define VSCARET_MAGIC 0x9c1f3d7b + unsigned capacity; + unsigned used; + uint64_t lease[] v_counted_by_(capacity); +}; + +#define VSCARET_SIZE(cap) VFLA_SIZE(vscaret, lease, cap) +#define VSCARET_INIT(scaret, cap) VFLA_INIT(vscaret, scaret, VSCARET_MAGIC, lease, cap) +#define VSCARET_LOCAL(scaret, cap) VFLA_LOCAL(vscaret, scaret, VSCARET_MAGIC, lease, cap) +#define VSCARET_ALLOC(scaret, cap) VFLA_ALLOC(vscaret, scaret, VSCARET_MAGIC, lease, cap) +#define VSCARET_FOREACH(var, scaret) VFLA_FOREACH(var, scaret, lease) +#define VSCARET_GET(scaret) VFLA_GET(scaret, lease) +#define VSCARET_ADD(scaret, val) VFLA_ADD(scaret, lease, val) + +#define VSCARET_CHECK(scaret) do { \ + CHECK_OBJ(scaret, VSCARET_MAGIC); \ + assert(scaret->used <= scaret->capacity); \ +} while(0) + +#define VSCARET_CHECK_NOTNULL(scaret) do { \ + AN(scaret); \ + VSCARET_CHECK(scaret); \ +} while(0) + +/* + * VSCARABs can contain leases which are not to be returned to storage, for + * example static data or fragments of larger leases to be returned later. For + * these cases, use this magic value as the lease. This is deliberately not 0 to + * catch oversights. + */ +#define VAI_LEASE_NORET ((uint64_t)0x8) + +vai_hdl ObjVAIinit(struct worker *, struct objcore *, struct ws *, + vai_notify_cb *, void *); +int ObjVAIlease(struct worker *, vai_hdl, struct vscarab *); +void ObjVAIreturn(struct worker *, vai_hdl, struct vscaret *); +void ObjVAIfini(struct worker *, vai_hdl *); + /* cache_req_body.c */ ssize_t VRB_Iterate(struct worker *, struct vsl_log *, struct req *, objiterate_f *func, void *priv); diff --git a/bin/varnishd/cache/cache_main.c b/bin/varnishd/cache/cache_main.c index 32a44e3ea..31b94829f 100644 --- a/bin/varnishd/cache/cache_main.c +++ b/bin/varnishd/cache/cache_main.c @@ -405,9 +405,55 @@ static struct cli_proto child_cmds[] = { { NULL } }; +#define CAP 17U + +static void +t_vscarab1(struct vscarab *scarab) +{ + struct viov *v; + uint64_t sum; + + VSCARAB_CHECK_NOTNULL(scarab); + AZ(scarab->used); + + v = VSCARAB_GET(scarab); + AN(v); + v->lease = 12; + + VSCARAB_ADD(scarab, (struct viov){.lease = 30}); + + sum = 0; + VSCARAB_FOREACH(v, scarab) + sum += v->lease; + + assert(sum == 42); +} + +static void +t_vscarab(void) +{ + char testbuf[VSCARAB_SIZE(CAP)]; + struct vscarab *frombuf = (void *)testbuf; + VSCARAB_INIT(frombuf, CAP); + t_vscarab1(frombuf); + + // --- + + VSCARAB_LOCAL(scarab, CAP); + t_vscarab1(scarab); + + // --- + + struct vscarab *heap; + VSCARAB_ALLOC(heap, CAP); + t_vscarab1(heap); + free(heap); +} + void child_main(int sigmagic, size_t altstksz) { + t_vscarab(); if (sigmagic) child_sigmagic(altstksz); diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c index c5f2e54fc..6fe72f448 100644 --- a/bin/varnishd/cache/cache_obj.c +++ b/bin/varnishd/cache/cache_obj.c @@ -183,6 +183,101 @@ ObjIterate(struct worker *wrk, struct objcore *oc, return (om->objiterator(wrk, oc, priv, func, final)); } +/*==================================================================== + * ObjVAI...(): Asynchronous Iteration + * + * + * ObjVAIinit() returns an opaque handle, or NULL if not supported + * + * A VAI handle must not be used concurrently + * + * the vai_notify_cb(priv) will be called asynchronously by the storage + * engine when a -EAGAIN / -ENOBUFS condition is over and ObjVAIlease() + * can be called again. + * + * Note: + * - the callback gets executed by an arbitrary thread + * - WITH the boc mtx held + * so it should never block and only do minimal work + * + * ObjVAIlease() fills the vscarab with leases. returns: + * + * -EAGAIN: nothing available at the moment, storage will notify, no use to + * call again until notification + * -ENOBUFS: caller needs to return leases, storage will notify + * -EPIPE: BOS_FAILED for busy object + * -(errno): other problem, fatal + * + * >= 0: number of viovs added (== scarab->capacity - scarab->used) + * + * struct vscarab: + * + * the leases can be used by the caller until returned with + * ObjVAIreturn(). The storage guarantees that the lease member is a + * multiple of 8 (that is, the lower three bits are zero). These can be + * used by the caller between lease and return, but must be cleared to + * zero before returning. + * + * ObjVAIreturn() returns leases collected in a struct vscaret + * + * it must be called with a vscaret, which holds an array of lease values from viovs + * received when the caller can guarantee that they are no longer accessed + * + * ObjVAIfini() finalized iteration + * + * it must be called when iteration is done, irrespective of error status + */ + +vai_hdl +ObjVAIinit(struct worker *wrk, struct objcore *oc, struct ws *ws, + vai_notify_cb *cb, void *cb_priv) +{ + const struct obj_methods *om = obj_getmethods(oc); + + CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); + + if (om->vai_init == NULL) + return (NULL); + return (om->vai_init(wrk, oc, ws, cb, cb_priv)); +} + +int +ObjVAIlease(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) +{ + struct vai_hdl_preamble *vaip = vhdl; + + AN(vaip); + assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); + AN(vaip->vai_lease); + return (vaip->vai_lease(wrk, vhdl, scarab)); +} + +void +ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) +{ + struct vai_hdl_preamble *vaip = vhdl; + + AN(vaip); + assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); + /* vai_return is optional */ + if (vaip->vai_return != NULL) + vaip->vai_return(wrk, vhdl, scaret); + else + VSCARET_INIT(scaret, scaret->capacity); +} + +void +ObjVAIfini(struct worker *wrk, vai_hdl *vhdlp) +{ + AN(vhdlp); + struct vai_hdl_preamble *vaip = *vhdlp; + + AN(vaip); + assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); + AN(vaip->vai_lease); + vaip->vai_fini(wrk, vhdlp); +} + /*==================================================================== * ObjGetSpace() * diff --git a/bin/varnishd/cache/cache_obj.h b/bin/varnishd/cache/cache_obj.h index f6ee8618e..0aff7c8b2 100644 --- a/bin/varnishd/cache/cache_obj.h +++ b/bin/varnishd/cache/cache_obj.h @@ -70,6 +70,78 @@ struct vai_qe { void *priv; }; +#define VAI_ASSERT_LEASE(x) AZ((x) & 0x7) + +/* + * start an iteration. the ws can we used (reserved) by storage + * the void * will be passed as the second argument to vai_notify_cb + */ +typedef vai_hdl vai_init_f(struct worker *, struct objcore *, struct ws *, + vai_notify_cb *, void *); + +/* + * lease io vectors from storage + * + * vai_hdl is from vai_init_f + * the vscarab is provided by the caller to return leases + * + * return: + * -EAGAIN: nothing available at the moment, storage will notify, no use to + * call again until notification + * -ENOBUFS: caller needs to return leases, storage will notify + * -EPIPE: BOS_FAILED for busy object + * -(errno): other problem, fatal + * >= 0: number of viovs added + */ +typedef int vai_lease_f(struct worker *, vai_hdl, struct vscarab *); + +/* + * return leases + */ +typedef void vai_return_f(struct worker *, vai_hdl, struct vscaret *); + +/* + * finish iteration, vai_return_f must have been called on all leases + */ +typedef void vai_fini_f(struct worker *, vai_hdl *); + +/* + * vai_hdl must start with this preamble such that when cast to it, cache_obj.c + * has access to the methods. + * + * The first magic is owned by storage, the second magic is owned by cache_obj.c + * and must be initialized to VAI_HDL_PREAMBLE_MAGIC2 + * + */ + +//lint -esym(768, vai_hdl_preamble::reserve) +struct vai_hdl_preamble { + unsigned magic; // owned by storage + unsigned magic2; +#define VAI_HDL_PREAMBLE_MAGIC2 0x7a15d162 + vai_lease_f *vai_lease; + vai_return_f *vai_return; // optional + uintptr_t reserve[4]; // abi fwd compat + vai_fini_f *vai_fini; +}; + +#define INIT_VAI_HDL(to, x) do { \ + (void)memset(to, 0, sizeof *(to)); \ + (to)->preamble.magic = (x); \ + (to)->preamble.magic2 = VAI_HDL_PREAMBLE_MAGIC2; \ +} while (0) + +#define CHECK_VAI_HDL(obj, x) do { \ + assert(obj->preamble.magic == (x)); \ + assert(obj->preamble.magic2 == VAI_HDL_PREAMBLE_MAGIC2);\ +} while (0) + +#define CAST_VAI_HDL_NOTNULL(obj, ptr, x) do { \ + AN(ptr); \ + (obj) = (ptr); \ + CHECK_VAI_HDL(obj, x); \ +} while (0) + struct obj_methods { /* required */ objfree_f *objfree; @@ -84,5 +156,6 @@ struct obj_methods { objslim_f *objslim; objtouch_f *objtouch; objsetstate_f *objsetstate; + /* async iteration (VAI) */ + vai_init_f *vai_init; }; - From nils.goroll at uplex.de Fri Jul 4 17:04:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:04 +0000 (UTC) Subject: [master] 9f65997fa storage_simple: Implement asynchronous iteration and use it for the iterator Message-ID: <20250704170404.10CCF11A919@lists.varnish-cache.org> commit 9f65997fa11aaa1d3287856749a6b856f8efba62 Author: Nils Goroll Date: Mon Jan 6 22:02:36 2025 +0100 storage_simple: Implement asynchronous iteration and use it for the iterator This commit implements the asynchronous iteration API defined and described in previous commits for the simple storage layer and reimplements the synchronous iterator with it. This commit message does not provide background information, please refer to the two previous commits. Implementation sml_ai_init() initializes the handle and choses either a simple or more elaborate "boc" lease function depending on whether or not a streaming fetch is ongoing (boc present). sml_ai_lease_simple() is just that, dead simple. It iterates the storage segment list and fills the VSCARAB provided by the caller. It is a good starting point into the implementation. sml_ai_lease_boc() handles the busy case and is more elaborate due to the nature of streaming fetches. It first calls ObjVAIGetExtend() to get the current extent. If no data is available, it returns the appropriate value. Other than that, is basically does the same things as sml_ai_lease_simple() with these exceptions: It also needs to return partial extents ("fragments") and it needs to handle the case where the last available segment is reached, in which case there is no successor to store for the next invocation. sml_ai_return() is only used for the "boc" case. It removes returned full segments from the list and then frees them outside the boc mtx. It also adds special handling for the last segment still needed by sml_ai_lease_boc() to resume. This segment is retained on the VSCARET. sml_ai_fini() is straight forward and should not need explanation. Implementation of sml_iterator() using the new API To reimplement the existing synchronous iterator based on the new API, we first need a little facility to block waiting for a notification. This is struct sml_notify with the four sml_notify* functions. sml_notify() is the callback, sml_notify_wait() blocks waiting for a notification to arrive. Until it runs out of work, the iterator performs these steps: ObjVAIlease() is called repeatedly until either the VSCARAB is full or a negative value is returned. This allows the rest of the code to react to the next condition appropriately by sending an OBJ_ITER_FLUSH with the last lease only. Calling func() on each extent is trivial, the complications only come from handling OBJ_ITER_FLUSH, "just in time" returns and error handling. diff --git a/bin/varnishd/storage/storage_persistent.c b/bin/varnishd/storage/storage_persistent.c index f193ae976..5eceb7e32 100644 --- a/bin/varnishd/storage/storage_persistent.c +++ b/bin/varnishd/storage/storage_persistent.c @@ -696,6 +696,7 @@ smp_init(void) smp_oc_realmethods.objsetattr = SML_methods.objsetattr; smp_oc_realmethods.objtouch = LRU_Touch; smp_oc_realmethods.objfree = smp_oc_objfree; + smp_oc_realmethods.vai_init = SML_methods.vai_init; } /*-------------------------------------------------------------------- diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 8c81c85ee..ce185fd1b 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -31,6 +31,8 @@ #include "config.h" +#include + #include "cache/cache_varnishd.h" #include "cache/cache_obj.h" @@ -306,130 +308,434 @@ sml_objfree(struct worker *wrk, struct objcore *oc) wrk->stats->n_object--; } +// kept for reviewers - XXX remove later +#undef VAI_DBG + +struct sml_hdl { + struct vai_hdl_preamble preamble; +#define SML_HDL_MAGIC 0x37dfd996 + struct vai_qe qe; + struct ws *ws; // NULL is malloc() + struct objcore *oc; + struct object *obj; + const struct stevedore *stv; + struct boc *boc; + + struct storage *st; // updated by _lease() + + // only for _lease_boc() + uint64_t st_off; // already returned fragment of current st + uint64_t avail, returned; + struct storage *last; // to resume, held back by _return() +}; + +static inline void +sml_ai_viov_fill(struct viov *viov, struct storage *st) +{ + viov->iov.iov_base = st->ptr; + viov->iov.iov_len = st->len; + viov->lease = (uintptr_t)st; + VAI_ASSERT_LEASE(viov->lease); +} + +static int +sml_ai_lease_simple(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) +{ + struct storage *st; + struct sml_hdl *hdl; + struct viov *viov; + int r = 0; + + (void) wrk; + CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); + VSCARAB_CHECK_NOTNULL(scarab); + + AZ(hdl->st_off); + st = hdl->st; + while (st != NULL && (viov = VSCARAB_GET(scarab)) != NULL) { + CHECK_OBJ(st, STORAGE_MAGIC); + sml_ai_viov_fill(viov, st); + r++; + st = VTAILQ_PREV(st, storagehead, list); + } + hdl->st = st; + if (st == NULL) + scarab->flags |= VSCARAB_F_END; + return (r); +} + +/* + * on leases while streaming (with a boc): + * + * SML uses the lease return facility to implement the "free behind" for + * OC_F_TRANSIENT objects. When streaming, we also return leases on + * fragments of sts, but we must only "free behind" when we are done with the + * last fragment. + * + * So we use a magic lease to signal "this is only a fragment", which we ignore + * on returns + */ + +static int +sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) +{ + enum boc_state_e state = BOS_INVALID; + struct storage *next; + struct sml_hdl *hdl; + struct viov *viov; + int r = 0; + + CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); + VSCARAB_CHECK_NOTNULL(scarab); + + if (hdl->avail == hdl->returned) { + hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, + &state, &hdl->qe); + if (state == BOS_FAILED) { + hdl->last = NULL; + return (-EPIPE); + } + else if (state == BOS_FINISHED) + (void)0; + else if (hdl->avail == hdl->returned) { + // ObjVAIGetExtend() has scheduled a notification + if (hdl->boc->transit_buffer > 0) + return (-ENOBUFS); + else + return (-EAGAIN); + } + else + assert(state < BOS_FINISHED); + } + Lck_Lock(&hdl->boc->mtx); + if (hdl->st == NULL && hdl->last != NULL) { + /* when the "last" st completed, we did not yet have a next, so + * resume from there. Because "last" might have been returned and + * deleted, we can not just use the pointer, but rather need to + * iterate the st list. + * if we can not find "last", it also has been returned and + * deleted, and the current write head (VTAILQ_LAST) is our next + * st, which can also be null if we are done. + */ + VTAILQ_FOREACH_REVERSE(next, &hdl->obj->list, storagehead, list) { + if (next == hdl->last) { + hdl->st = VTAILQ_PREV(next, storagehead, list); + break; + } + } + } + hdl->last = NULL; + if (hdl->st == NULL) { + assert(hdl->returned == 0 || hdl->avail == hdl->returned); + hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); + } + if (hdl->st == NULL) + assert(hdl->avail == hdl->returned); + + while (hdl->avail > hdl->returned && (viov = VSCARAB_GET(scarab)) != NULL) { + CHECK_OBJ_NOTNULL(hdl->st, STORAGE_MAGIC); // ObjVAIGetExtend ensures + assert(hdl->st_off <= hdl->st->space); + size_t av = hdl->avail - hdl->returned; + size_t l = hdl->st->space - hdl->st_off; + AN(l); + if (l > av) + l = av; + viov->iov.iov_base = hdl->st->ptr + hdl->st_off; + viov->iov.iov_len = l; + if (hdl->st_off + l == hdl->st->space) { + next = VTAILQ_PREV(hdl->st, storagehead, list); + AZ(hdl->last); + if (next == NULL) + hdl->last = hdl->st; + else + CHECK_OBJ(next, STORAGE_MAGIC); +#ifdef VAI_DBG + VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", + hdl->st_off, l, hdl->st, next, hdl->boc->stevedore_priv); +#endif + viov->lease = (uintptr_t)hdl->st; + hdl->st_off = 0; + hdl->st = next; + } + else { + viov->lease = VAI_LEASE_NORET; + hdl->st_off += l; + } + hdl->returned += l; + VAI_ASSERT_LEASE(viov->lease); + r++; + } + + Lck_Unlock(&hdl->boc->mtx); + if (state != BOS_FINISHED && hdl->avail == hdl->returned) { + hdl->avail = ObjVAIGetExtend(wrk, hdl->oc, hdl->returned, + &state, &hdl->qe); + } + if (state == BOS_FINISHED && hdl->avail == hdl->returned) + scarab->flags |= VSCARAB_F_END; + return (r); +} + +static void v_matchproto_(vai_return_f) +sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) +{ + struct storage *st; + struct sml_hdl *hdl; + uint64_t *p; + + (void) wrk; + CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); + VSCARET_CHECK_NOTNULL(scaret); + if (scaret->used == 0) + return; + + // callback is only registered if needed + assert(hdl->boc != NULL && (hdl->oc->flags & OC_F_TRANSIENT) != 0); + + // filter noret and last + VSCARET_LOCAL(todo, scaret->used); + VSCARET_FOREACH(p, scaret) { + if (*p == VAI_LEASE_NORET) + continue; + CAST_OBJ_NOTNULL(st, (void *)*p, STORAGE_MAGIC); + if (st == hdl->last) + continue; + VSCARET_ADD(todo, *p); + } + VSCARET_INIT(scaret, scaret->capacity); + + Lck_Lock(&hdl->boc->mtx); + VSCARET_FOREACH(p, todo) { + CAST_OBJ_NOTNULL(st, (void *)*p, STORAGE_MAGIC); + VTAILQ_REMOVE(&hdl->obj->list, st, list); + if (st == hdl->boc->stevedore_priv) + hdl->boc->stevedore_priv = trim_once; + } + Lck_Unlock(&hdl->boc->mtx); + + VSCARET_FOREACH(p, todo) { + CAST_OBJ_NOTNULL(st, (void *)*p, STORAGE_MAGIC); + sml_stv_free(hdl->stv, st); + } +} + +static void v_matchproto_(vai_fini_f) +sml_ai_fini(struct worker *wrk, vai_hdl *vai_hdlp) +{ + struct sml_hdl *hdl; + + AN(vai_hdlp); + CAST_VAI_HDL_NOTNULL(hdl, *vai_hdlp, SML_HDL_MAGIC); + *vai_hdlp = NULL; + + if (hdl->boc != NULL) { + ObjVAICancel(wrk, hdl->boc, &hdl->qe); + HSH_DerefBoc(wrk, hdl->oc); + hdl->boc = NULL; + } + + if (hdl->ws != NULL) + WS_Release(hdl->ws, 0); + else + free(hdl); +} + +static vai_hdl v_matchproto_(vai_init_f) +sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, + vai_notify_cb *notify, void *notify_priv) +{ + struct sml_hdl *hdl; + const size_t sz = sizeof *hdl; + + if (ws != NULL && WS_ReserveSize(ws, (unsigned)sz)) + hdl = WS_Reservation(ws); + else { + hdl = malloc(sz); + ws = NULL; + } + + AN(hdl); + INIT_VAI_HDL(hdl, SML_HDL_MAGIC); + hdl->preamble.vai_lease = sml_ai_lease_simple; + hdl->preamble.vai_fini = sml_ai_fini; + hdl->ws = ws; + + hdl->oc = oc; + hdl->obj = sml_getobj(wrk, oc); + CHECK_OBJ_NOTNULL(hdl->obj, OBJECT_MAGIC); + hdl->stv = oc->stobj->stevedore; + CHECK_OBJ_NOTNULL(hdl->stv, STEVEDORE_MAGIC); + + hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); + CHECK_OBJ_ORNULL(hdl->st, STORAGE_MAGIC); + + hdl->boc = HSH_RefBoc(oc); + if (hdl->boc == NULL) + return (hdl); + /* we only initialize notifications if we have a boc, so + * any wrong attempt triggers magic checks. + */ + hdl->preamble.vai_lease = sml_ai_lease_boc; + if ((hdl->oc->flags & OC_F_TRANSIENT) != 0) + hdl->preamble.vai_return = sml_ai_return; + hdl->qe.magic = VAI_Q_MAGIC; + hdl->qe.cb = notify; + hdl->qe.hdl = hdl; + hdl->qe.priv = notify_priv; + return (hdl); +} + +/* + * trivial notification to allow the iterator to simply block + */ +struct sml_notify { + unsigned magic; +#define SML_NOTIFY_MAGIC 0x4589af31 + unsigned hasmore; + pthread_mutex_t mtx; + pthread_cond_t cond; +}; + +static void +sml_notify_init(struct sml_notify *sn) +{ + + INIT_OBJ(sn, SML_NOTIFY_MAGIC); + AZ(pthread_mutex_init(&sn->mtx, NULL)); + AZ(pthread_cond_init(&sn->cond, NULL)); +} + +static void +sml_notify_fini(struct sml_notify *sn) +{ + + CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); + AZ(pthread_mutex_destroy(&sn->mtx)); + AZ(pthread_cond_destroy(&sn->cond)); +} + +static void v_matchproto_(vai_notify_cb) +sml_notify(vai_hdl hdl, void *priv) +{ + struct sml_notify *sn; + + (void) hdl; + CAST_OBJ_NOTNULL(sn, priv, SML_NOTIFY_MAGIC); + AZ(pthread_mutex_lock(&sn->mtx)); + sn->hasmore = 1; + AZ(pthread_cond_signal(&sn->cond)); + AZ(pthread_mutex_unlock(&sn->mtx)); + +} + +static void +sml_notify_wait(struct sml_notify *sn) +{ + + CHECK_OBJ_NOTNULL(sn, SML_NOTIFY_MAGIC); + AZ(pthread_mutex_lock(&sn->mtx)); + while (sn->hasmore == 0) + AZ(pthread_cond_wait(&sn->cond, &sn->mtx)); + AN(sn->hasmore); + sn->hasmore = 0; + AZ(pthread_mutex_unlock(&sn->mtx)); +} + static int v_matchproto_(objiterator_f) sml_iterator(struct worker *wrk, struct objcore *oc, void *priv, objiterate_f *func, int final) { - struct boc *boc; - enum boc_state_e state; - struct object *obj; - struct storage *st; - struct storage *checkpoint = NULL; - const struct stevedore *stv; - ssize_t checkpoint_len = 0; - ssize_t len = 0; - int ret = 0, ret2; - ssize_t ol; - ssize_t nl; - ssize_t sl; - void *p; - ssize_t l; - unsigned u; - - obj = sml_getobj(wrk, oc); - CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); - stv = oc->stobj->stevedore; - CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); + struct sml_notify sn; + struct viov *vio, *last; + unsigned u, uu; + vai_hdl hdl; + int nn, r, r2, islast; - boc = HSH_RefBoc(oc); + VSCARAB_LOCAL(scarab, 16); + VSCARET_LOCAL(scaret, 16); - if (boc == NULL) { - VTAILQ_FOREACH_REVERSE_SAFE( - st, &obj->list, storagehead, list, checkpoint) { + (void) final; // phase out? + sml_notify_init(&sn); + hdl = ObjVAIinit(wrk, oc, NULL, sml_notify, &sn); + AN(hdl); - u = 0; - if (VTAILQ_PREV(st, storagehead, list) == NULL) + r = u = 0; + + do { + do { + nn = ObjVAIlease(wrk, hdl, scarab); + if (nn <= 0 || scarab->flags & VSCARAB_F_END) + break; + } while (scarab->used < scarab->capacity); + + /* + * nn is the wait/return action or 0 + * nn tells us if to flush + */ + uu = u; + last = VSCARAB_LAST(scarab); + VSCARAB_FOREACH(vio, scarab) { + islast = vio == last; + AZ(u & OBJ_ITER_END); + if (islast && scarab->flags & VSCARAB_F_END) u |= OBJ_ITER_END; - if (final) - u |= OBJ_ITER_FLUSH; - if (ret == 0 && st->len > 0) - ret = func(priv, u, st->ptr, st->len); - if (final) { - VTAILQ_REMOVE(&obj->list, st, list); - sml_stv_free(stv, st); - } else if (ret) + + // flush if it is the scarab's last IOV and we will block next + // or if we need space in the return leases array + uu = u; + if ((islast && nn < 0) || scaret->used == scaret->capacity - 1) + uu |= OBJ_ITER_FLUSH; + r = func(priv, uu, vio->iov.iov_base, vio->iov.iov_len); + if (r != 0) break; - } - return (ret); - } - p = NULL; - l = 0; + // sufficient space ensured by capacity check above + VSCARET_ADD(scaret, vio->lease); - u = 0; - if (boc->fetched_so_far == 0) { - ret = func(priv, OBJ_ITER_FLUSH, NULL, 0); - if (ret) - return (ret); - } - while (1) { - ol = len; - nl = ObjWaitExtend(wrk, oc, ol, &state); - if (state == BOS_FAILED) { - ret = -1; - break; + // whenever we have flushed, return leases + if ((uu & OBJ_ITER_FLUSH) && scaret->used > 0) + ObjVAIreturn(wrk, hdl, scaret); } - if (nl == ol) { - assert(state == BOS_FINISHED); - break; + + // return leases which we did not use if error (break) + VSCARAB_FOREACH_RESUME(vio, scarab) { + if (scaret->used == scaret->capacity) + ObjVAIreturn(wrk, hdl, scaret); + VSCARET_ADD(scaret, vio->lease); } - assert(nl > ol); - Lck_Lock(&boc->mtx); - AZ(VTAILQ_EMPTY(&obj->list)); - if (checkpoint == NULL) { - st = VTAILQ_LAST(&obj->list, storagehead); - sl = 0; - } else { - st = checkpoint; - sl = checkpoint_len; - ol -= checkpoint_len; + + // we have now completed the scarab + VSCARAB_INIT(scarab, scarab->capacity); + + // flush before blocking if we did not already + if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN) && + (uu & OBJ_ITER_FLUSH) == 0) { + r = func(priv, OBJ_ITER_FLUSH, NULL, 0); + if (scaret->used > 0) + ObjVAIreturn(wrk, hdl, scaret); } - while (st != NULL) { - if (st->len > ol) { - p = st->ptr + ol; - l = st->len - ol; - len += l; - break; - } - ol -= st->len; - assert(ol >= 0); - nl -= st->len; - assert(nl > 0); - sl += st->len; - st = VTAILQ_PREV(st, storagehead, list); - if (final && checkpoint != NULL) { - if (checkpoint == boc->stevedore_priv) - boc->stevedore_priv = trim_once; - else - VTAILQ_REMOVE(&obj->list, checkpoint, list); - sml_stv_free(stv, checkpoint); - } - checkpoint = st; - checkpoint_len = sl; + + if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN)) { + assert(scaret->used <= 1); + sml_notify_wait(&sn); } - CHECK_OBJ_NOTNULL(obj, OBJECT_MAGIC); - CHECK_OBJ_NOTNULL(st, STORAGE_MAGIC); - st = VTAILQ_PREV(st, storagehead, list); - if (st != NULL && st->len == 0) - st = NULL; - Lck_Unlock(&boc->mtx); - assert(l > 0 || state == BOS_FINISHED); - u = 0; - if (st == NULL || final) - u |= OBJ_ITER_FLUSH; - if (st == NULL && state == BOS_FINISHED) - u |= OBJ_ITER_END; - ret = func(priv, u, p, l); - if (ret) - break; - } - HSH_DerefBoc(wrk, oc); + else if (r == 0 && nn < 0) + r = -1; + } while (nn != 0 && r == 0); + if ((u & OBJ_ITER_END) == 0) { - ret2 = func(priv, OBJ_ITER_END, NULL, 0); - if (ret == 0) - ret = ret2; + r2 = func(priv, OBJ_ITER_END, NULL, 0); + if (r == 0) + r = r2; } - return (ret); + + if (scaret->used > 0) + ObjVAIreturn(wrk, hdl, scaret); + + ObjVAIfini(wrk, &hdl); + sml_notify_fini(&sn); + + return (r); } /*-------------------------------------------------------------------- @@ -736,6 +1042,7 @@ const struct obj_methods SML_methods = { .objgetattr = sml_getattr, .objsetattr = sml_setattr, .objtouch = LRU_Touch, + .vai_init = sml_ai_init }; static void diff --git a/bin/varnishtest/tests/c00111.vtc b/bin/varnishtest/tests/c00111.vtc index 706ee7041..996d5d258 100644 --- a/bin/varnishtest/tests/c00111.vtc +++ b/bin/varnishtest/tests/c00111.vtc @@ -15,7 +15,8 @@ client c1 { } -run varnish v1 -vsl_catchup -varnish v1 -expect fetch_failed == 1 +# with vai, this no longer fails systematically (which is good) +varnish v1 -expect fetch_failed <= 1 varnish v1 -cliok "param.set transit_buffer 4k" @@ -26,4 +27,4 @@ client c2 { varnish v1 -vsl_catchup varnish v1 -expect s_fetch == 2 -varnish v1 -expect fetch_failed == 1 +varnish v1 -expect fetch_failed <= 1 From nils.goroll at uplex.de Fri Jul 4 17:04:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:04 +0000 (UTC) Subject: [master] dcabe3497 Who said they wanted to keep 32bit alive as an option? 🤔 Message-ID: <20250704170404.2B9F111A91C@lists.varnish-cache.org> commit dcabe34978c974ac715baf1ec231e685cf57a48d Author: Nils Goroll Date: Sat Oct 12 23:45:49 2024 +0200 Who said they wanted to keep 32bit alive as an option? ?? diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index ce185fd1b..cf10dd696 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -329,12 +329,33 @@ struct sml_hdl { struct storage *last; // to resume, held back by _return() }; +static inline uint64_t +st2lease(const struct storage *st) +{ + uint64_t r = (uintptr_t)st; + + if (sizeof(void *) < 8) //lint !e506 !e774 + r <<= 1; + + return (r); +} + +static inline struct storage * +lease2st(uint64_t l) +{ + + if (sizeof(void *) < 8) //lint !e506 !e774 + l >>= 1; + + return ((void *)l); +} + static inline void sml_ai_viov_fill(struct viov *viov, struct storage *st) { viov->iov.iov_base = st->ptr; viov->iov.iov_len = st->len; - viov->lease = (uintptr_t)st; + viov->lease = st2lease(st); VAI_ASSERT_LEASE(viov->lease); } @@ -453,7 +474,7 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", hdl->st_off, l, hdl->st, next, hdl->boc->stevedore_priv); #endif - viov->lease = (uintptr_t)hdl->st; + viov->lease = st2lease(hdl->st); hdl->st_off = 0; hdl->st = next; } @@ -497,7 +518,7 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) VSCARET_FOREACH(p, scaret) { if (*p == VAI_LEASE_NORET) continue; - CAST_OBJ_NOTNULL(st, (void *)*p, STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); if (st == hdl->last) continue; VSCARET_ADD(todo, *p); @@ -506,7 +527,7 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) Lck_Lock(&hdl->boc->mtx); VSCARET_FOREACH(p, todo) { - CAST_OBJ_NOTNULL(st, (void *)*p, STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); VTAILQ_REMOVE(&hdl->obj->list, st, list); if (st == hdl->boc->stevedore_priv) hdl->boc->stevedore_priv = trim_once; @@ -514,7 +535,7 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) Lck_Unlock(&hdl->boc->mtx); VSCARET_FOREACH(p, todo) { - CAST_OBJ_NOTNULL(st, (void *)*p, STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); sml_stv_free(hdl->stv, st); } } From nils.goroll at uplex.de Fri Jul 4 17:04:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:04 +0000 (UTC) Subject: [master] a5c1d3a3e vai: add support to allocate & return buffers and implement for sml Message-ID: <20250704170404.52AAB11A929@lists.varnish-cache.org> commit a5c1d3a3ea5722d962ee99bc8588720ebed8c37d Author: Nils Goroll Date: Mon Jan 6 22:02:44 2025 +0100 vai: add support to allocate & return buffers and implement for sml To bring VAI to filters, we are going to need buffer allocations all over the place, because any new data created by filters needs to survive after the filter function returns. So we add ObjVAIbuffer() to fill a VSCARAB with buffers and teach ObjVAIreturn() to return any kind of lease. We add an implementation for SML. diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 118f65a34..55553e707 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -919,6 +919,7 @@ struct vscaret { vai_hdl ObjVAIinit(struct worker *, struct objcore *, struct ws *, vai_notify_cb *, void *); int ObjVAIlease(struct worker *, vai_hdl, struct vscarab *); +int ObjVAIbuffer(struct worker *, vai_hdl, struct vscarab *); void ObjVAIreturn(struct worker *, vai_hdl, struct vscaret *); void ObjVAIfini(struct worker *, vai_hdl *); diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c index 6fe72f448..dcbb991b6 100644 --- a/bin/varnishd/cache/cache_obj.c +++ b/bin/varnishd/cache/cache_obj.c @@ -218,10 +218,31 @@ ObjIterate(struct worker *wrk, struct objcore *oc, * used by the caller between lease and return, but must be cleared to * zero before returning. * + * ObjVAIbuffer() allocates temporary buffers, returns: + * + * -EAGAIN: allocation can not be fulfilled immediately, storage will notify, + * no use to call again until notification + * -EINVAL: size larger than UINT_MAX requested + * -(errno): other problem, fatal + * n: n > 0, number of viovs filled + * + * The struct vscarab is used on the way in and out: On the way in, the + * iov.iov_len members contain the sizes the caller requests, all other + * members of the struct viovs are expected to be zero initialized. + * + * The maximum size to be requested is UINT_MAX. + * + * ObjVAIbuffer() may return sizes larger than requested. The returned n + * might be smaller than requested. + * * ObjVAIreturn() returns leases collected in a struct vscaret * - * it must be called with a vscaret, which holds an array of lease values from viovs - * received when the caller can guarantee that they are no longer accessed + * it must be called with a vscaret, which holds an array of lease values + * received via ObjVAIlease() or ObjVAIbuffer() when the caller can + * guarantee that they are no longer accessed. + * + * ObjVAIreturn() may retain leases in the vscaret if the implementation + * still requires them, iow, the vscaret might not be empty upon return. * * ObjVAIfini() finalized iteration * @@ -252,6 +273,17 @@ ObjVAIlease(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) return (vaip->vai_lease(wrk, vhdl, scarab)); } +int +ObjVAIbuffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) +{ + struct vai_hdl_preamble *vaip = vhdl; + + AN(vaip); + assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); + AN(vaip->vai_buffer); + return (vaip->vai_buffer(wrk, vhdl, scarab)); +} + void ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) { @@ -259,11 +291,8 @@ ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) AN(vaip); assert(vaip->magic2 == VAI_HDL_PREAMBLE_MAGIC2); - /* vai_return is optional */ - if (vaip->vai_return != NULL) - vaip->vai_return(wrk, vhdl, scaret); - else - VSCARET_INIT(scaret, scaret->capacity); + AN(vaip->vai_return); + vaip->vai_return(wrk, vhdl, scaret); } void diff --git a/bin/varnishd/cache/cache_obj.h b/bin/varnishd/cache/cache_obj.h index 0aff7c8b2..9de3c383d 100644 --- a/bin/varnishd/cache/cache_obj.h +++ b/bin/varnishd/cache/cache_obj.h @@ -50,8 +50,9 @@ typedef void *objsetattr_f(struct worker *, struct objcore *, enum obj_attr attr, ssize_t len, const void *ptr); typedef void objtouch_f(struct worker *, struct objcore *, vtim_real now); -/* called by Obj/storage to notify that the lease function (vai_lease_f) can be - * called again after a -EAGAIN / -ENOBUFS return value +/* called by Obj/storage to notify that the lease function (vai_lease_f) or + * buffer function (vai_buffer_f) can be called again after return of + * -EAGAIN or -ENOBUFS * NOTE: * - the callback gets executed by an arbitrary thread * - WITH the boc mtx held @@ -96,7 +97,26 @@ typedef vai_hdl vai_init_f(struct worker *, struct objcore *, struct ws *, typedef int vai_lease_f(struct worker *, vai_hdl, struct vscarab *); /* - * return leases + * get io vectors with temporary buffers from storage + * + * vai_hdl is from vai_init_f + * the vscarab needs to be initialized with the number of requested elements + * and each iov.iov_len contings the requested sizes. all iov_base need to be + * zero. + * + * after return, the vscarab can be smaller than requested if only some + * allocation requests could be fulfilled + * + * return: + * -EAGAIN: allocation can not be fulfilled immediately, storage will notify, + * no use to call again until notification + * -(errno): other problem, fatal + * n: n > 0, number of viovs filled + */ +typedef int vai_buffer_f(struct worker *, vai_hdl, struct vscarab *); + +/* + * return leases from vai_lease_f or vai_buffer_f */ typedef void vai_return_f(struct worker *, vai_hdl, struct vscaret *); @@ -120,7 +140,8 @@ struct vai_hdl_preamble { unsigned magic2; #define VAI_HDL_PREAMBLE_MAGIC2 0x7a15d162 vai_lease_f *vai_lease; - vai_return_f *vai_return; // optional + vai_buffer_f *vai_buffer; + vai_return_f *vai_return; uintptr_t reserve[4]; // abi fwd compat vai_fini_f *vai_fini; }; diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index cf10dd696..5bd92b5c8 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -315,6 +315,7 @@ struct sml_hdl { struct vai_hdl_preamble preamble; #define SML_HDL_MAGIC 0x37dfd996 struct vai_qe qe; + struct pool_task task; // unfortunate struct ws *ws; // NULL is malloc() struct objcore *oc; struct object *obj; @@ -359,6 +360,72 @@ sml_ai_viov_fill(struct viov *viov, struct storage *st) VAI_ASSERT_LEASE(viov->lease); } +// sml has no mechanism to notify "I got free space again now" +// (we could add that, but because storage.h is used in mgt, a first attempt +// looks at least like this would cause some include spill for vai_q_head or +// something similar) +// +// So anyway, to get ahead we just implement a pretty stupid "call the notify +// some time later" on a thread +static void +sml_ai_later_task(struct worker *wrk, void *priv) +{ + struct sml_hdl *hdl; + const vtim_dur dur = 0.0042; + + (void)wrk; + VTIM_sleep(dur); + CAST_VAI_HDL_NOTNULL(hdl, priv, SML_HDL_MAGIC); + memset(&hdl->task, 0, sizeof hdl->task); + hdl->qe.cb(hdl, hdl->qe.priv); +} +static void +sml_ai_later(struct worker *wrk, struct sml_hdl *hdl) +{ + AZ(hdl->task.func); + AZ(hdl->task.priv); + hdl->task.func = sml_ai_later_task; + hdl->task.priv = hdl; + AZ(Pool_Task(wrk->pool, &hdl->task, TASK_QUEUE_BG)); +} + + +static int +sml_ai_buffer(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) +{ + const struct stevedore *stv; + struct sml_hdl *hdl; + struct storage *st; + struct viov *vio; + int r = 0; + + (void) wrk; + CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); + stv = hdl->stv; + CHECK_OBJ_NOTNULL(stv, STEVEDORE_MAGIC); + + VSCARAB_FOREACH(vio, scarab) + if (vio->iov.iov_len > UINT_MAX) + return (-EINVAL); + + VSCARAB_FOREACH(vio, scarab) { + st = objallocwithnuke(wrk, stv, vio->iov.iov_len, 0); + if (st == NULL) + break; + assert(st->space >= vio->iov.iov_len); + st->flags = STORAGE_F_BUFFER; + st->len = st->space; + + sml_ai_viov_fill(vio, st); + r++; + } + if (r == 0) { + sml_ai_later(wrk, hdl); + r = -EAGAIN; + } + return (r); +} + static int sml_ai_lease_simple(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) { @@ -497,6 +564,29 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) return (r); } +// return only buffers, used if object is not streaming +static void v_matchproto_(vai_return_f) +sml_ai_return_buffers(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) +{ + struct storage *st; + struct sml_hdl *hdl; + uint64_t *p; + + (void) wrk; + CAST_VAI_HDL_NOTNULL(hdl, vhdl, SML_HDL_MAGIC); + + VSCARET_FOREACH(p, scaret) { + if (*p == VAI_LEASE_NORET) + continue; + CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); + if ((st->flags & STORAGE_F_BUFFER) == 0) + continue; + sml_stv_free(hdl->stv, st); + } + VSCARET_INIT(scaret, scaret->capacity); +} + +// generic return for buffers and object leases, used when streaming static void v_matchproto_(vai_return_f) sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) { @@ -528,6 +618,8 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) Lck_Lock(&hdl->boc->mtx); VSCARET_FOREACH(p, todo) { CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); + if ((st->flags & STORAGE_F_BUFFER) != 0) + continue; VTAILQ_REMOVE(&hdl->obj->list, st, list); if (st == hdl->boc->stevedore_priv) hdl->boc->stevedore_priv = trim_once; @@ -578,6 +670,8 @@ sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, AN(hdl); INIT_VAI_HDL(hdl, SML_HDL_MAGIC); hdl->preamble.vai_lease = sml_ai_lease_simple; + hdl->preamble.vai_buffer = sml_ai_buffer; + hdl->preamble.vai_return = sml_ai_return_buffers; hdl->preamble.vai_fini = sml_ai_fini; hdl->ws = ws; @@ -590,6 +684,11 @@ sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); CHECK_OBJ_ORNULL(hdl->st, STORAGE_MAGIC); + hdl->qe.magic = VAI_Q_MAGIC; + hdl->qe.cb = notify; + hdl->qe.hdl = hdl; + hdl->qe.priv = notify_priv; + hdl->boc = HSH_RefBoc(oc); if (hdl->boc == NULL) return (hdl); @@ -599,10 +698,6 @@ sml_ai_init(struct worker *wrk, struct objcore *oc, struct ws *ws, hdl->preamble.vai_lease = sml_ai_lease_boc; if ((hdl->oc->flags & OC_F_TRANSIENT) != 0) hdl->preamble.vai_return = sml_ai_return; - hdl->qe.magic = VAI_Q_MAGIC; - hdl->qe.cb = notify; - hdl->qe.hdl = hdl; - hdl->qe.priv = notify_priv; return (hdl); } From nils.goroll at uplex.de Fri Jul 4 17:04:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:04 +0000 (UTC) Subject: [master] 67f7647dd vmod_debug: Copy transport_reembarking_http1 to transport_vai Message-ID: <20250704170404.9EADF11A949@lists.varnish-cache.org> commit 67f7647dd5fe600d69eee3551b5ca823e71355f5 Author: Nils Goroll Date: Tue Jan 14 18:45:05 2025 +0100 vmod_debug: Copy transport_reembarking_http1 to transport_vai diff --git a/bin/varnishtest/tests/m00061.vtc b/bin/varnishtest/tests/m00061.vtc new file mode 100644 index 000000000..64295775c --- /dev/null +++ b/bin/varnishtest/tests/m00061.vtc @@ -0,0 +1,44 @@ +varnishtest "VMOD debug vai transport" + +server s1 { + rxreq + txresp -gziplen 13107 +} -start + +varnish v1 \ + -arg "-p fetch_chunksize=4k" \ + -vcl+backend { + import debug; + + sub vcl_hash { + hash_data(""); + return (lookup); + } + + sub vcl_deliver { + if (req.url == "/chunked") { + set resp.filters += " debug.chunked"; + } + debug.use_vai_http1(); + set resp.http.filters = resp.filters; + } +} -start + +varnish v1 -cliok "param.set debug +syncvsl" +varnish v1 -cliok "param.set debug +req_state" + +client c1 -repeat 16 -keepalive { + txreq + rxresp + expect resp.bodylen == 13107 +} -start + +client c2 -repeat 16 -keepalive { + txreq -url "/chunked" + rxresp + expect resp.http.Content-Length == + expect resp.bodylen == 13107 +} -start + +client c1 -wait +client c2 -wait diff --git a/vmod/automake_boilerplate_debug.am b/vmod/automake_boilerplate_debug.am index 5eb8eadc4..50d418fa8 100644 --- a/vmod/automake_boilerplate_debug.am +++ b/vmod/automake_boilerplate_debug.am @@ -10,7 +10,8 @@ libvmod_debug_la_SOURCES = \ vmod_debug_dyn.c \ vmod_debug_filters.c \ vmod_debug_obj.c \ - vmod_debug_transport_reembarking_http1.c + vmod_debug_transport_reembarking_http1.c \ + vmod_debug_transport_vai.c libvmod_debug_la_CFLAGS = diff --git a/vmod/vmod_debug.c b/vmod/vmod_debug.c index ecdd41155..5fac83db3 100644 --- a/vmod/vmod_debug.c +++ b/vmod/vmod_debug.c @@ -333,6 +333,7 @@ event_load(VRT_CTX, struct vmod_priv *priv) debug_add_filters(ctx); debug_transport_reembarking_http1_init(); + debug_transport_vai_init(); return (0); } @@ -1289,6 +1290,12 @@ xyzzy_use_reembarking_http1(VRT_CTX) debug_transport_reembarking_http1_use(ctx); } +VCL_VOID +xyzzy_use_vai_http1(VRT_CTX) +{ + debug_transport_vai_use(ctx); +} + static int in_oc(struct worker *wrk, struct objcore *oc, const char *p) { diff --git a/vmod/vmod_debug.h b/vmod/vmod_debug.h index be09c46cc..641148bfd 100644 --- a/vmod/vmod_debug.h +++ b/vmod/vmod_debug.h @@ -39,3 +39,9 @@ void debug_transport_reembarking_http1_use(VRT_CTX); void debug_transport_reembarking_http1_init(void); + +/* vmod_debug_transport_vai.c */ +void +debug_transport_vai_use(VRT_CTX); +void +debug_transport_vai_init(void); diff --git a/vmod/vmod_debug.vcc b/vmod/vmod_debug.vcc index bf69c6e34..d752c1eba 100644 --- a/vmod/vmod_debug.vcc +++ b/vmod/vmod_debug.vcc @@ -491,6 +491,13 @@ Example:: Debug c prefix[0]: (ws) 0x7fe69ef80420 abcd... +$Function VOID use_vai_http1() + +$Restrict vcl_deliver + +Switch to the VAI http1 debug transport. Calling it from any other +transport than http1 results in VCL failure. + DEPRECATED ========== diff --git a/vmod/vmod_debug_transport_vai.c b/vmod/vmod_debug_transport_vai.c new file mode 100644 index 000000000..ca1902804 --- /dev/null +++ b/vmod/vmod_debug_transport_vai.c @@ -0,0 +1,227 @@ +/*- + * Copyright (c) 2006 Verdens Gang AS + * Copyright (c) 2006-2015 Varnish Software AS + * Copyright 2024 UPLEX - Nils Goroll Systemoptimierung + * All rights reserved. + * + * Authors: Poul-Henning Kamp + * Nils Goroll + * + * SPDX-License-Identifier: BSD-2-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "config.h" + +#include "cache/cache_varnishd.h" + +#include "cache/cache_filter.h" +#include "cache/cache_transport.h" +#include "http1/cache_http1.h" + +#include "vmod_debug.h" + +static void +dbg_vai_error(struct req *req, struct v1l **v1lp, const char *msg) +{ + + (void)req; + (void)v1lp; + (void)msg; + INCOMPL(); +} + +static void dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err); +static void dbg_vai_sendbody(struct worker *wrk, void *arg); + +static task_func_t *hack_http1_req = NULL; + +// copied from cache_http_deliver.c, then split & modified +static enum vtr_deliver_e v_matchproto_(vtr_deliver_f) +dbg_vai_deliver(struct req *req, int sendbody) +{ + struct vrt_ctx ctx[1]; + struct v1l *v1l; + + CHECK_OBJ_NOTNULL(req, REQ_MAGIC); + CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC); + CHECK_OBJ_NOTNULL(req->objcore, OBJCORE_MAGIC); + + if (req->doclose == SC_NULL && + http_HdrIs(req->resp, H_Connection, "close")) { + req->doclose = SC_RESP_CLOSE; + } else if (req->doclose != SC_NULL) { + if (!http_HdrIs(req->resp, H_Connection, "close")) { + http_Unset(req->resp, H_Connection); + http_SetHeader(req->resp, "Connection: close"); + } + } else if (!http_GetHdr(req->resp, H_Connection, NULL)) + http_SetHeader(req->resp, "Connection: keep-alive"); + + CHECK_OBJ_NOTNULL(req->wrk, WORKER_MAGIC); + + v1l = V1L_Open(req->ws, &req->sp->fd, req->vsl, + req->t_prev + SESS_TMO(req->sp, send_timeout), + cache_param->http1_iovs); + + if (v1l == NULL) { + dbg_vai_error(req, &v1l, "Failure to init v1d (workspace_thread overflow)"); + return (VTR_D_DONE); + } + + // Do not roll back req->ws upon V1L_Close() + V1L_NoRollback(v1l); + + if (sendbody) { + if (!http_GetHdr(req->resp, H_Content_Length, NULL)) { + if (req->http->protover == 11) { + http_SetHeader(req->resp, + "Transfer-Encoding: chunked"); + } else { + req->doclose = SC_TX_EOF; + } + } + INIT_OBJ(ctx, VRT_CTX_MAGIC); + VCL_Req2Ctx(ctx, req); + if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) { + dbg_vai_error(req, &v1l, "Failure to push v1d processor"); + return (VTR_D_DONE); + } + } + + if (WS_Overflowed(req->ws)) { + dbg_vai_error(req, &v1l, "workspace_client overflow"); + return (VTR_D_DONE); + } + + if (WS_Overflowed(req->sp->ws)) { + dbg_vai_error(req, &v1l, "workspace_session overflow"); + return (VTR_D_DONE); + } + + if (WS_Overflowed(req->wrk->aws)) { + dbg_vai_error(req, &v1l, "workspace_thread overflow"); + return (VTR_D_DONE); + } + + req->acct.resp_hdrbytes += HTTP1_Write(v1l, req->resp, HTTP1_Resp); + + if (! sendbody) { + dbg_vai_deliver_finish(req, &v1l, 0); + return (VTR_D_DONE); + } + + (void)V1L_Flush(v1l); + + if (hack_http1_req == NULL) + hack_http1_req = req->task->func; + AN(hack_http1_req); + + VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_sendbody", req->wrk); + + req->task->func = dbg_vai_sendbody; + req->task->priv = req; + + req->wrk = NULL; + req->vdc->wrk = NULL; + req->transport_priv = v1l; + + AZ(Pool_Task(req->sp->pool, req->task, TASK_QUEUE_RUSH)); + return (VTR_D_DISEMBARK); +} + +static void v_matchproto_(task_func_t) +dbg_vai_sendbody(struct worker *wrk, void *arg) +{ + struct req *req; + struct v1l *v1l; + const char *p; + int err, chunked; + + CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); + CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC); + v1l = req->transport_priv; + req->transport_priv = NULL; + AN(v1l); + + THR_SetRequest(req); + VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_sendbody", wrk); + AZ(req->wrk); + CNT_Embark(wrk, req); + req->vdc->wrk = wrk; // move to CNT_Embark? + + chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0; + if (chunked) + V1L_Chunked(v1l); + err = VDP_DeliverObj(req->vdc, req->objcore); + if (!err && chunked) + V1L_EndChunk(v1l); + dbg_vai_deliver_finish(req, &v1l, err); + + VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk); + wrk->task->func = hack_http1_req; + wrk->task->priv = req; +} + +static void +dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err) +{ + stream_close_t sc; + uint64_t bytes; + + sc = V1L_Close(v1lp, &bytes); + + req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc); + + if (sc == SC_NULL && err && req->sp->fd >= 0) + sc = SC_REM_CLOSE; + if (sc != SC_NULL) + Req_Fail(req, sc); +} + +static struct transport DBG_transport; + +void +debug_transport_vai_init(void) +{ + DBG_transport = HTTP1_transport; + DBG_transport.name = "DBG VAI"; + DBG_transport.deliver = dbg_vai_deliver; +} + +void +debug_transport_vai_use(VRT_CTX) +{ + struct req *req; + + CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); + req = ctx->req; + CHECK_OBJ_NOTNULL(req, REQ_MAGIC); + + if (req->transport != &HTTP1_transport) { + VRT_fail(ctx, "Only works on built-in http1 transport"); + return; + } + AZ(req->transport_priv); + req->transport = &DBG_transport; +} From nils.goroll at uplex.de Fri Jul 4 17:04:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:04 +0000 (UTC) Subject: [master] 3b3173449 cache_deliver_proc: Add VDPIO for VAI-based delivery processing (filters) Message-ID: <20250704170404.BE46911A95E@lists.varnish-cache.org> commit 3b3173449b2b9097eb8f5d3b09cfca3306e47096 Author: Nils Goroll Date: Wed Jan 8 09:43:31 2025 +0100 cache_deliver_proc: Add VDPIO for VAI-based delivery processing (filters) Context: Please read the message of previous commit "cache_obj: Add an asynchronous iteration API" before continuing here. Why? The VAI interface lay ground for asynchronous iteration, but did not yet address filters. The existing VDP model is a "push" design: VDP_ObjIterate() calls VDP_bytes() with data from storage. VDP_bytes() calls the first VDP, which does its processing, calls VDP_bytes() for the next VDP etc until the last VDP sends data out somewhere. This is a good model for our existing "synchronous I/O from threads" design, but it is, again, fundamentally incompatible with async I/O (see Context): Any filter in the chain can assume that VDP_bytes(..., VDP_FLUSH) will, when it returns, be done with the buffer, such that the filter may issue the next VDP_byte(), potentially on the same buffer. For async I/O, we need a model where buffers are handed out and explicitly returned when done with. Discarded prior work Before ending up at the model in this patch, a "push" design had been attempted where buffers would be handed from filter to filter, with the option for each filter to say "first I need more input" or "I have more output after this". This model turned out overly complicated, so it was discarded. How? The model in this patch takes VDP from "push" to "pull": The top level delivery code asks the filter chain for buffers like it would ask the ObjVAI API for data. An example is coming up in patch "vmod_debug: Switch transport_vai to VDPIO Upgrade", it basically looks like this: do { nbufs = vdpio_pull(req->vdc, NULL, scarab); send_data(scarab); } while ((scarab->flags & VSCARAB_F_END) == 0) Similarly to VDP_bytes(), vdpio_pull() calls into the VDP layer, but this time in the other direction, from last VDP to first to storage. Each VDP now returns buffers it has ready, and when it needs more data, it calls vdpio_pull() to get more input buffers from the next layer and ultimately from the storage engine. Different to VDP_bytes(), vdpio_pull() has a tail call to the next layer and can be inlined, which shifts some responsibility, more on that in the next section. API Usage The VDP filter API is similar to the existing API in that it consists of an initializer, a finalizer and a "bytes" function, which is now called "lease" to match the lease concept introduced with VAI. The lease function is called from vdpio_pull(). It is responsible for vdpio_pull()'ing data from the previous layer, processing it and putting buffers into a provided output vscarab. The lease function return values are the same as of ObjVAIlease(): negative for "call again later" and errors, otherwise the number of extents added. The lease function SHOULD signal end-of-data by setting the vscarab flag VSCARAB_F_END in the scarab which contains the last chunk of data. If it can not do this, it MUST set the vscarab flag VSCARAB_F_END for all subsequent calls (for which 0 is returned). The lease function MUST accept a partially filled vscarab. If it can not add anything to it, because the minimum capacity is not available, it MUST return zero. The lease function is now responsible for maintaining the .calls and .bytes_in members of its struct vdp_entry. Any data which the VDP creates needs to either put into buffers allocated from storage via ObjVAIbuffer(), or be guaranteed to remain valid until the end of delivery (like static and workspace pointers) and carry the lease VAI_LEASE_NORET. Any buffers which the VDP receives from a previous layer and does not emit in the output vscarab need to be returned with ObjVAIreturn(). To batch these returns, a VSCARET is kept in the VDP context and managed by these helpers: - vdpio_return_lease() to return a single lease to the batch - vdpio_return_vscarab() to return a full vscarab to the batch - vdpio_consolidate_vscarab() to return all leases with a zero size, where the zero size marks them being consumed. This is intended to facilitate input vscarabs. Naturally, this API is likely to still evolve. VDPIO management & transitional interface The basic model for VDPIO is kept from VDP: There is VDPIO_Push() to match VDP_Push() and VDPIO_Close() to match VDP_Close(). Yet, for the time being, we need to have VDP and VDPIO co-exist: Not all filters will be ready for VDPIO and there will be bugs, so we will want the option to go back to the old interface. This is VDPIO_Upgrade() used: It works an already initialized VDP filter list and retuns if it can be upgraded to VDPIO. To do so, it calls the vdpio_upgrade function of each VDP. If a vdpio_upgrade function is missing for any filter, all of the upgrade fails and the caller is expected to fall back to traditional VDP. VDPIO_Push() can be used to push additional VDPIO-enabled VDPs after a successful upgrade, or if only VDPIO-enabled VDPs are used. diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c index 83ee48e61..7b1fd0ab9 100644 --- a/bin/varnishd/cache/cache_deliver_proc.c +++ b/bin/varnishd/cache/cache_deliver_proc.c @@ -244,6 +244,139 @@ VDP_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) /*--------------------------------------------------------------------*/ +/* + * Push a VDPIO vdp. This can only be used with only vdpio-enabled VDPs or + * after a successful upgrade + */ +int +VDPIO_Push(VRT_CTX, struct vdp_ctx *vdc, struct ws *ws, const struct vdp *vdp, + void *priv) +{ + struct vdp_entry *vdpe; + int r; + + CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); + CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); + AN(vdc->clen); + assert(*vdc->clen >= -1); + AN(ws); + AN(vdp); + AN(vdp->name); + + if (vdc->retval < 0) + return (vdc->retval); + + AN(vdp->io_init); + + // the first VDP (which leases from storage) only gets the minimum + // capacity requirement of 1 + if (vdc->retval == 0) { + assert(VTAILQ_EMPTY(&vdc->vdp)); + vdc->retval = 1; + } + + if (DO_DEBUG(DBG_PROCESSORS)) + VSLb(vdc->vsl, SLT_Debug, "VDPIO_push(%s)", vdp->name); + + vdpe = WS_Alloc(ws, sizeof *vdpe); + if (vdpe == NULL) { + vdc->retval = -ENOMEM; + return (vdc->retval); + } + INIT_OBJ(vdpe, VDP_ENTRY_MAGIC); + vdpe->vdp = vdp; + vdpe->priv = priv; + VTAILQ_INSERT_TAIL(&vdc->vdp, vdpe, list); + vdc->nxt = VTAILQ_FIRST(&vdc->vdp); + + assert(vdc->retval > 0); + if (vdpe->vdp->io_init != NULL) { + r = vdpe->vdp->io_init(ctx, vdc, &vdpe->priv, vdc->retval); + if (r <= 0) { + VTAILQ_REMOVE(&vdc->vdp, vdpe, list); + vdc->nxt = VTAILQ_FIRST(&vdc->vdp); + } + else + AN(vdp->io_lease); + if (r != 0) + vdc->retval = r; + } + vdc->oc = NULL; + return (vdc->retval); +} + +/* + * upgrade an already initialized VDP filter chain to VDPIO, if possible + * returns: + * > 0 cap + * -ENOTSUP io_upgrade missing for at least one filter + * vdc->retval if < 0 + */ +int +VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc) +{ + struct vdp_entry *vdpe; + int cap, r; + + CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + + VTAILQ_FOREACH(vdpe, &vdc->vdp, list) + if (vdpe->vdp->io_upgrade == NULL) + return (-ENOTSUP); + + if (vdc->retval < 0) + return (vdc->retval); + + // minimum capacity requirement for the first filter (after storage) + r = cap = 1; + VTAILQ_FOREACH(vdpe, &vdc->vdp, list) { + r = vdpe->vdp->io_upgrade(ctx, vdc, &vdpe->priv, cap); + if (DO_DEBUG(DBG_PROCESSORS)) { + VSLb(vdc->vsl, SLT_Debug, "VDPIO_Upgrade " + "%d = %s(cap = %d)", + r, vdpe->vdp->name, cap); + } + if (r < 0) + return ((vdc->retval = r)); + // XXX remove if filter does not want to be pushed? + assert(r != 0); + cap = r; + } + return ((vdc->retval = r)); +} + +uint64_t +VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) +{ + struct vdp_entry *vdpe; + uint64_t rv = 0; + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + CHECK_OBJ_NOTNULL(vdc->wrk, WORKER_MAGIC); + CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); + CHECK_OBJ_ORNULL(boc, BOC_MAGIC); + + while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) { + CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC); + rv = vdpe->bytes_in; + VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, + (uintmax_t)vdpe->calls, (uintmax_t)rv); + if (vdpe->vdp->io_fini != NULL) + vdpe->vdp->io_fini(vdc, &vdpe->priv); + AZ(vdpe->priv); + VTAILQ_REMOVE(&vdc->vdp, vdpe, list); + vdc->nxt = VTAILQ_FIRST(&vdc->vdp); + } + + if (oc != NULL) + HSH_Cancel(vdc->wrk, oc, boc); + return (rv); +} + +/*--------------------------------------------------------------------*/ int v_matchproto_(objiterate_f) VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len) { diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index 329ab9982..ce3cdabc2 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -33,6 +33,7 @@ struct req; struct vfp_entry; struct vfp_ctx; struct vdp_ctx; +struct vdp_entry; /* Fetch processors --------------------------------------------------*/ @@ -125,12 +126,55 @@ typedef int vdp_fini_f(struct vdp_ctx *, void **priv); typedef int vdp_bytes_f(struct vdp_ctx *, enum vdp_action, void **priv, const void *ptr, ssize_t len); +/* + * ============================================================ + * vdpio io-vector interface + */ +typedef int vdpio_init_f(VRT_CTX, struct vdp_ctx *, void **priv, int capacity); +/* + * the vdpio_init_f() are called front (object iterator) to back (consumer). + * + * each init function returns the minimum number of io vectors (vscarab + * capacity) that it requires the next filter to accept. This capacity is + * passed to the next init function such that it can allocate sufficient + * space to fulfil the requirement of the previous filter. + * + * Return values: + * < 0 : Error + * == 0 ; NOOP, do not push this filter + * >= 1 : capacity requirement + * + * typedef is shared with upgrade + */ + +typedef int vdpio_lease_f(struct vdp_ctx *, struct vdp_entry *, struct vscarab *scarab); +/* + * vdpio_lease_f() returns leases provided by this filter layer in the vscarab + * probided by the caller. + * + * called via vdpio_pull(): the last filter is called first by delivery. Each + * filter calls the previous layer for leases. The first filter calls storage. + * + * return values are as for ObjVAIlease() + * + * Other notable differences to vdp_bytes_f: + * - responsible for updating (struct vdp_entry).bytes_in and .calls + * + */ + +typedef void vdpio_fini_f(struct vdp_ctx *, void **priv); + struct vdp { const char *name; vdp_init_f *init; vdp_bytes_f *bytes; vdp_fini_f *fini; const void *priv1; + + vdpio_init_f *io_init; + vdpio_init_f *io_upgrade; + vdpio_lease_f *io_lease; + vdpio_fini_f *io_fini; }; struct vdp_entry { @@ -149,10 +193,10 @@ VTAILQ_HEAD(vdp_entry_s, vdp_entry); struct vdp_ctx { unsigned magic; #define VDP_CTX_MAGIC 0xee501df7 - int retval; - uint64_t bytes_done; + int retval; // vdpio: error or capacity + uint64_t bytes_done; // not used with vdpio struct vdp_entry_s vdp; - struct vdp_entry *nxt; + struct vdp_entry *nxt; // not needed for vdpio struct worker *wrk; struct vsl_log *vsl; // NULL'ed after the first filter has been pushed @@ -160,10 +204,119 @@ struct vdp_ctx { // NULL'ed for delivery struct http *hp; intmax_t *clen; + // only for vdpio + vai_hdl vai_hdl; + struct vscaret *scaret; }; int VDP_bytes(struct vdp_ctx *, enum vdp_action act, const void *, ssize_t); +/* + * vdpe == NULL: get lesaes from the last layer + * vdpe != NULL: get leases from the previous layer or storage + * + * conversely to VDP_bytes, vdpio calls happen back (delivery) to front (storage) + * + * ends up in a tail call to the previous layer to save stack space + */ +static inline int +vdpio_pull(struct vdp_ctx *vdc, struct vdp_entry *vdpe, struct vscarab *scarab) +{ + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + + if (vdpe == NULL) + vdpe = VTAILQ_LAST(&vdc->vdp, vdp_entry_s); + else { + CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC); + vdpe = VTAILQ_PREV(vdpe, vdp_entry_s, list); + } + + if (vdpe != NULL) + return (vdpe->vdp->io_lease(vdc, vdpe, scarab)); + else + return (ObjVAIlease(vdc->wrk, vdc->vai_hdl, scarab)); +} + +/* + * ============================================================ + * VDPIO helpers + */ + +/* + * l bytes have been written to buf. save these to out and checkpoint buf for + * the remaining free space + */ +static inline void +iovec_collect(struct iovec *buf, struct iovec *out, size_t l) +{ + if (out->iov_base == NULL) + out->iov_base = buf->iov_base; + assert((char *)out->iov_base + out->iov_len == buf->iov_base); + out->iov_len += l; + buf->iov_base = (char *)buf->iov_base + l; + buf->iov_len -= l; +} + +/* + * return a single lease via the vdc vscaret + */ +static inline +void vdpio_return_lease(const struct vdp_ctx *vdc, uint64_t lease) +{ + struct vscaret *scaret; + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + scaret = vdc->scaret; + VSCARET_CHECK_NOTNULL(scaret); + + if (scaret->used == scaret->capacity) + ObjVAIreturn(vdc->wrk, vdc->vai_hdl, scaret); + VSCARET_ADD(scaret, lease); +} + +/* + * add all leases from the vscarab to the vscaret + */ +static inline +void vdpio_return_vscarab(const struct vdp_ctx *vdc, struct vscarab *scarab) +{ + struct viov *v; + + VSCARAB_CHECK_NOTNULL(scarab); + VSCARAB_FOREACH(v, scarab) + vdpio_return_lease(vdc, v->lease); + VSCARAB_INIT(scarab, scarab->capacity); +} + +/* + * return used up iovs (len == 0) + * move remaining to the beginning of the scarab + */ +static inline void +vdpio_consolidate_vscarab(const struct vdp_ctx *vdc, struct vscarab *scarab) +{ + struct viov *v, *f = NULL; + + VSCARAB_CHECK_NOTNULL(scarab); + VSCARAB_FOREACH(v, scarab) { + if (v->iov.iov_len == 0) { + AN(v->iov.iov_base); + vdpio_return_lease(vdc, v->lease); + if (f == NULL) + f = v; + continue; + } + else if (f == NULL) + continue; + memmove(f, v, scarab->used - (v - scarab->s) * sizeof (*v)); + break; + } + if (f != NULL) + scarab->used = f - scarab->s; +} + + void v_deprecated_ VRT_AddVDP(VRT_CTX, const struct vdp *); void v_deprecated_ VRT_RemoveVDP(VRT_CTX, const struct vdp *); diff --git a/bin/varnishd/cache/cache_varnishd.h b/bin/varnishd/cache/cache_varnishd.h index 6d73c04fd..2892ef188 100644 --- a/bin/varnishd/cache/cache_varnishd.h +++ b/bin/varnishd/cache/cache_varnishd.h @@ -199,6 +199,11 @@ extern const struct vdp VDP_gunzip; extern const struct vdp VDP_esi; extern const struct vdp VDP_range; +uint64_t VDPIO_Close(struct vdp_ctx *, struct objcore *, struct boc *); +int VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc); +int VDPIO_Push(VRT_CTX, struct vdp_ctx *, struct ws *, const struct vdp *, + void *priv); + /* cache_exp.c */ vtim_real EXP_Ttl(const struct req *, const struct objcore *); From nils.goroll at uplex.de Fri Jul 4 17:04:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:04 +0000 (UTC) Subject: [master] b1f659e20 cache_gzip: split out common init code Message-ID: <20250704170404.E1AC111A974@lists.varnish-cache.org> commit b1f659e20e3e92f1dda92fc4461f8f7130e352eb Author: Nils Goroll Date: Wed Jan 8 09:50:42 2025 +0100 cache_gzip: split out common init code diff --git a/bin/varnishd/cache/cache_gzip.c b/bin/varnishd/cache/cache_gzip.c index 4f7205c92..bb3afcb0f 100644 --- a/bin/varnishd/cache/cache_gzip.c +++ b/bin/varnishd/cache/cache_gzip.c @@ -300,15 +300,13 @@ VGZ_Gzip(struct vgz *vg, const void **pptr, ssize_t *plen, enum vgz_flag flags) * VDP for gunzip'ing */ +// common for traditional interface and vdpio +static int vdp_gunzip_init_common(struct vdp_ctx *vdc); + static int v_matchproto_(vdp_init_f) vdp_gunzip_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) { struct vgz *vg; - struct boc *boc; - enum boc_state_e bos; - const char *p; - ssize_t dl; - uint64_t u; CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); @@ -326,7 +324,17 @@ vdp_gunzip_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) VGZ_Obuf(vg, vg->m_buf, vg->m_sz); *priv = vg; + return (vdp_gunzip_init_common(vdc)); +} +static int +vdp_gunzip_init_common(struct vdp_ctx *vdc) +{ + struct boc *boc; + enum boc_state_e bos; + const char *p; + ssize_t dl; + uint64_t u; http_Unset(vdc->hp, H_Content_Encoding); *vdc->clen = -1; From nils.goroll at uplex.de Fri Jul 4 17:04:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:05 +0000 (UTC) Subject: [master] 6052da363 cache_gzip: Add VDPIO support Message-ID: <20250704170405.077B911A980@lists.varnish-cache.org> commit 6052da3631b17771ebf66454c6af7fd4e7b82646 Author: Nils Goroll Date: Wed Jan 8 10:15:22 2025 +0100 cache_gzip: Add VDPIO support diff --git a/bin/varnishd/cache/cache_gzip.c b/bin/varnishd/cache/cache_gzip.c index bb3afcb0f..0b2e948db 100644 --- a/bin/varnishd/cache/cache_gzip.c +++ b/bin/varnishd/cache/cache_gzip.c @@ -255,6 +255,44 @@ VGZ_Gunzip(struct vgz *vg, const void **pptr, ssize_t *plen) return (VGZ_ERROR); } +/* set vz pointers for in and out iovecs */ +static inline void +vgz_iovec_update(struct vgz *vg, const struct iovec *in, const struct iovec *buf) +{ + /* in: either fully consumed or the same */ + assert(vg->vz.avail_in == 0 || vg->vz.next_in == in->iov_base); + vg->vz.next_in = in->iov_base; + vg->vz.avail_in = in->iov_len; + vg->vz.next_out = buf->iov_base; + vg->vz.avail_out = buf->iov_len; +} + +static enum vgzret_e +vgz_gunzip_iovec(struct vgz *vg, struct iovec *in, struct iovec *buf, struct iovec *out) +{ + int i; + + CHECK_OBJ_NOTNULL(vg, VGZ_MAGIC); + AN(in && buf && out); + vgz_iovec_update(vg, in, buf); + + i = inflate(&vg->vz, 0); + if (i == Z_OK || i == Z_STREAM_END) { + iovec_collect(buf, out, pdiff(buf->iov_base, vg->vz.next_out)); + in->iov_base = vg->vz.next_in; + in->iov_len = vg->vz.avail_in; + } + vg->last_i = i; + if (i == Z_OK) + return (VGZ_OK); + if (i == Z_STREAM_END) + return (VGZ_END); + if (i == Z_BUF_ERROR) + return (VGZ_STUCK); + VSLb(vg->vsl, SLT_Gzip, "Gunzip error: %d (%s)", i, vgz_msg(vg)); + return (VGZ_ERROR); +} + /*--------------------------------------------------------------------*/ enum vgzret_e @@ -404,6 +442,7 @@ vdp_gunzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, vg->m_len += dl; if (vr < VGZ_OK) return (-1); + // END or STUCK if (vg->m_len == vg->m_sz || vr != VGZ_OK) { if (VDP_bytes(vdc, vr == VGZ_END ? VDP_END : VDP_FLUSH, vg->m_buf, vg->m_len)) @@ -416,11 +455,179 @@ vdp_gunzip_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, return (0); } +#ifdef LATER +/* + * XXX does it make sense to work on more than one buffer? + */ +static int v_matchproto_(vdpio_init_f) +vdpio_gunzip_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) +{ + struct vgz *vg; + struct vscarab *in; + + CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + CHECK_OBJ_ORNULL(vdc->oc, OBJCORE_MAGIC); + CHECK_OBJ_NOTNULL(vdc->hp, HTTP_MAGIC); + AN(vdc->clen); + AN(priv); + assert(capacity >= 1); + + if (capacity < 4) + capacity = 4; + + in = WS_Alloc(ctx->ws, VSCARAB_SIZE(capacity)); + if (in == NULL) + return (-1); + + vg = VGZ_NewGunzip(vdc->vsl, "U D -"); + if (vg == NULL) + return (-1); + + AZ(vg->m_buf); + vg->m_buf = (void *)in; + + *priv = vg; + AZ(vdp_gunzip_init_common(vdc)); + return (1); +} +#endif + +static int v_matchproto_(vdpio_init_f) +vdpio_gunzip_upgrade(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) +{ + struct vgz *vg; + struct vscarab *in; + + CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + AN(priv); + assert(capacity >= 1); + + if (capacity < 4) + capacity = 4; + + /* done in vdp_gunzip_init */ + CAST_OBJ_NOTNULL(vg, *priv, VGZ_MAGIC); + + in = WS_Alloc(ctx->ws, VSCARAB_SIZE(capacity)); + if (in == NULL) + return (-1); + VSCARAB_INIT(in, capacity); + + // XXX duplicate work - remove when completing transition to VAI + AN(vg->m_buf); + AN(vg->stvbuf); + STV_FreeBuf(vdc->wrk, &vg->stvbuf); + vg->stvbuf = NULL; + vg->m_buf = (void *)in; + + return (1); +} + +static int v_matchproto_(vdpio_lease_f) +vdpio_gunzip_lease(struct vdp_ctx *vdc, struct vdp_entry *this, struct vscarab *out) +{ + struct vscarab *in; + enum vgzret_e vr; + struct vgz *vg; + struct viov *v, *b, *o; + int r; + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + CHECK_OBJ_NOTNULL(this, VDP_ENTRY_MAGIC); + CAST_OBJ_NOTNULL(vg, this->priv, VGZ_MAGIC); + CAST_OBJ_NOTNULL(in, (void*)vg->m_buf, VSCARAB_MAGIC); + + this->calls++; + + if (out->used == out->capacity) + return (0); + + if (in->used < in->capacity && (in->flags & VSCARAB_F_END) == 0) + r = vdpio_pull(vdc, this, in); + else + r = 0; + + if (in->used == 0) { + out->flags = in->flags & VSCARAB_F_END; + return (r); + } + + // XXX more than one buffer? + VSCARAB_LOCAL(buf, 1); + b = VSCARAB_GET(buf); + AN(b); + b->iov.iov_len = cache_param->gzip_buffer; + r = ObjVAIbuffer(vdc->wrk, vdc->vai_hdl, buf); + if (r < 0) + return (out->used ? 0 : r); + + o = VSCARAB_GET(out); + AN(o); + r = 0; + + VSCARAB_FOREACH(v, in) { + this->bytes_in += v->iov.iov_len; + vr = vgz_gunzip_iovec(vg, &v->iov, &b->iov, &o->iov); + if (vr == VGZ_END && v->iov.iov_len > 0) { + VSLb(vg->vsl, SLT_Gzip, "G(un)zip error: %d (%s)", + vr, "junk after VGZ_END"); + r = -EMSGSIZE; + break; + } + if (vr < VGZ_OK) + break; + + if (b->iov.iov_len == 0 || vr != VGZ_OK) { + r = 1; + break; + } + } + + if (r <= 0) { + o->iov.iov_base = NULL; + o->iov.iov_len = 0; + vdpio_return_lease(vdc, b->lease); + return (r); + } + + o->lease = b->lease; + b->lease = 0; + + vdpio_consolidate_vscarab(vdc, in); + if (in->used == 0) + out->flags = in->flags & VSCARAB_F_END; + + return (r); +} + +static void v_matchproto_(vdpio_fini_f) +vdpio_gunzip_fini(struct vdp_ctx *vdc, void **priv) +{ + struct vscarab *in; + struct vgz *vg; + + (void)vdc; + TAKE_OBJ_NOTNULL(vg, priv, VGZ_MAGIC); + CAST_OBJ_NOTNULL(in, (void *)vg->m_buf, VSCARAB_MAGIC); + vg->m_buf = NULL; + + (void)VGZ_Destroy(vdc->wrk, &vg); +} + const struct vdp VDP_gunzip = { .name = "gunzip", .init = vdp_gunzip_init, .bytes = vdp_gunzip_bytes, .fini = vdp_gunzip_fini, + +#ifdef LATER + .io_init = vdpio_gunzip_init, +#endif + .io_upgrade = vdpio_gunzip_upgrade, + .io_lease = vdpio_gunzip_lease, + .io_fini = vdpio_gunzip_fini, }; /*--------------------------------------------------------------------*/ From nils.goroll at uplex.de Fri Jul 4 17:04:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:05 +0000 (UTC) Subject: [master] 5ead061b0 cache_http1_line: Add VDPIO Support Message-ID: <20250704170405.30E1E11A98F@lists.varnish-cache.org> commit 5ead061b041eb247de4d63ddeab35612d530645d Author: Nils Goroll Date: Wed Jan 15 09:54:01 2025 +0100 cache_http1_line: Add VDPIO Support diff --git a/bin/varnishd/http1/cache_http1_line.c b/bin/varnishd/http1/cache_http1_line.c index abf853ae2..fca1f6e21 100644 --- a/bin/varnishd/http1/cache_http1_line.c +++ b/bin/varnishd/http1/cache_http1_line.c @@ -393,8 +393,78 @@ v1l_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, return (0); } +/*-------------------------------------------------------------------- + * VDPIO using V1L + * + * this is deliverately half-baked to reduce work in progress while heading + * towards VAI/VDPIO: we update the v1l with the scarab, which we + * return unmodified. + * + */ + +/* remember priv pointer for V1L_Close() to clear */ +static int v_matchproto_(vpio_init_f) +v1l_io_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) +{ + struct v1l *v1l; + + (void) ctx; + (void) vdc; + AN(priv); + + CAST_OBJ_NOTNULL(v1l, *priv, V1L_MAGIC); + + v1l->vdp_priv = priv; + return (capacity); +} + +static int v_matchproto_(vpio_init_f) +v1l_io_upgrade(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) +{ + return (v1l_io_init(ctx, vdc, priv, capacity)); +} + +/* + * API note + * + * this VDP is special in that it does not transform data, but prepares + * the write. From the perspective of VDPIO, its current state is only + * transitional. + * + * Because the VDP prepares the actual writes, but the caller needs + * to return the scarab's leases, the caller in this case is + * required to empty the scarab after V1L_Flush()'ing. + */ + +static int v_matchproto_(vdpio_lease_f) +v1l_io_lease(struct vdp_ctx *vdc, struct vdp_entry *this, struct vscarab *scarab) +{ + struct v1l *v1l; + struct viov *v; + int r; + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + CHECK_OBJ_NOTNULL(this, VDP_ENTRY_MAGIC); + CAST_OBJ_NOTNULL(v1l, this->priv, V1L_MAGIC); + VSCARAB_CHECK(scarab); + AZ(scarab->used); // see note above + this->calls++; + r = vdpio_pull(vdc, this, scarab); + if (r < 0) + return (r); + VSCARAB_FOREACH(v, scarab) + this->bytes_in += V1L_Write(v1l, v->iov.iov_base, v->iov.iov_len); + return (r); +} + const struct vdp * const VDP_v1l = &(struct vdp){ .name = "V1B", .init = v1l_init, .bytes = v1l_bytes, + +#ifdef LATER + .io_init = v1l_io_init, +#endif + .io_upgrade = v1l_io_upgrade, + .io_lease = v1l_io_lease, }; From nils.goroll at uplex.de Fri Jul 4 17:04:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:05 +0000 (UTC) Subject: [master] adc810c2f vmod_debug: Switch transport_vai to VDPIO Upgrade Message-ID: <20250704170405.4C95F11A99E@lists.varnish-cache.org> commit adc810c2f7ac7620bbe2014bb510f170188116a9 Author: Nils Goroll Date: Wed Feb 19 10:21:25 2025 +0100 vmod_debug: Switch transport_vai to VDPIO Upgrade diff --git a/bin/varnishtest/tests/m00061.vtc b/bin/varnishtest/tests/m00061.vtc index 64295775c..1031c36e6 100644 --- a/bin/varnishtest/tests/m00061.vtc +++ b/bin/varnishtest/tests/m00061.vtc @@ -24,21 +24,42 @@ varnish v1 \ } } -start +logexpect l1 -v v1 -g raw { + fail add * Debug "scheduling dbg_vai_deliverobj" + expect * * Debug "scheduling dbg_vai_lease" + expect * * ReqHeader "Last: Request" + fail clear +} -start + varnish v1 -cliok "param.set debug +syncvsl" varnish v1 -cliok "param.set debug +req_state" +varnish v1 -cliok "param.set debug +processors" -client c1 -repeat 16 -keepalive { - txreq +client c0 -repeat 16 -keepalive { + txreq -hdr "Accept-Encoding: gzip" rxresp - expect resp.bodylen == 13107 } -start -client c2 -repeat 16 -keepalive { - txreq -url "/chunked" +client c1 -repeat 16 -keepalive { + txreq rxresp - expect resp.http.Content-Length == expect resp.bodylen == 13107 } -start +#client c2 -repeat 16 -keepalive { +# txreq -url "/chunked" +# rxresp +# expect resp.http.Content-Length == +# expect resp.bodylen == 13107 +#} -start + +client c0 -wait client c1 -wait -client c2 -wait +#client c2 -wait + +client c0 { + txreq -hdr "Accept-Encoding: gzip" -hdr "Last: Request" + rxresp +} -run + +logexpect l1 -wait diff --git a/vmod/vmod_debug_transport_vai.c b/vmod/vmod_debug_transport_vai.c index ca1902804..da5f8a60b 100644 --- a/vmod/vmod_debug_transport_vai.c +++ b/vmod/vmod_debug_transport_vai.c @@ -52,7 +52,8 @@ dbg_vai_error(struct req *req, struct v1l **v1lp, const char *msg) } static void dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err); -static void dbg_vai_sendbody(struct worker *wrk, void *arg); +static void dbg_vai_deliverobj(struct worker *wrk, void *arg); +static void dbg_vai_lease(struct worker *wrk, void *arg); static task_func_t *hack_http1_req = NULL; @@ -62,6 +63,7 @@ dbg_vai_deliver(struct req *req, int sendbody) { struct vrt_ctx ctx[1]; struct v1l *v1l; + int cap = 0; CHECK_OBJ_NOTNULL(req, REQ_MAGIC); CHECK_OBJ_ORNULL(req->boc, BOC_MAGIC); @@ -107,6 +109,7 @@ dbg_vai_deliver(struct req *req, int sendbody) dbg_vai_error(req, &v1l, "Failure to push v1d processor"); return (VTR_D_DONE); } + cap = VDPIO_Upgrade(ctx, req->vdc); } if (WS_Overflowed(req->ws)) { @@ -137,9 +140,14 @@ dbg_vai_deliver(struct req *req, int sendbody) hack_http1_req = req->task->func; AN(hack_http1_req); - VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_sendbody", req->wrk); - - req->task->func = dbg_vai_sendbody; + if (cap > 0) { + VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_lease cap %d", req->wrk, cap); + req->task->func = dbg_vai_lease; + } + else { + VSLb(req->vsl, SLT_Debug, "w=%p scheduling dbg_vai_deliverobj", req->wrk); + req->task->func = dbg_vai_deliverobj; + } req->task->priv = req; req->wrk = NULL; @@ -151,7 +159,7 @@ dbg_vai_deliver(struct req *req, int sendbody) } static void v_matchproto_(task_func_t) -dbg_vai_sendbody(struct worker *wrk, void *arg) +dbg_vai_deliverobj(struct worker *wrk, void *arg) { struct req *req; struct v1l *v1l; @@ -165,7 +173,7 @@ dbg_vai_sendbody(struct worker *wrk, void *arg) AN(v1l); THR_SetRequest(req); - VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_sendbody", wrk); + VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_deliverobj", wrk); AZ(req->wrk); CNT_Embark(wrk, req); req->vdc->wrk = wrk; // move to CNT_Embark? @@ -183,6 +191,133 @@ dbg_vai_sendbody(struct worker *wrk, void *arg) wrk->task->priv = req; } +/* + * copied from sml_notfiy + */ +struct dbg_vai_notify { + unsigned magic; +#define DBG_VAI_NOTIFY_MAGIC 0xa0154ed5 + unsigned hasmore; + pthread_mutex_t mtx; + pthread_cond_t cond; +}; + +static void +dbg_vai_notify_init(struct dbg_vai_notify *sn) +{ + + INIT_OBJ(sn, DBG_VAI_NOTIFY_MAGIC); + AZ(pthread_mutex_init(&sn->mtx, NULL)); + AZ(pthread_cond_init(&sn->cond, NULL)); +} + +static void +dbg_vai_notify_fini(struct dbg_vai_notify *sn) +{ + + CHECK_OBJ_NOTNULL(sn, DBG_VAI_NOTIFY_MAGIC); + AZ(pthread_mutex_destroy(&sn->mtx)); + AZ(pthread_cond_destroy(&sn->cond)); +} + +static void v_matchproto_(vai_notify_cb) +dbg_vai_notify(vai_hdl hdl, void *priv) +{ + struct dbg_vai_notify *sn; + + (void) hdl; + CAST_OBJ_NOTNULL(sn, priv, DBG_VAI_NOTIFY_MAGIC); + AZ(pthread_mutex_lock(&sn->mtx)); + sn->hasmore = 1; + AZ(pthread_cond_signal(&sn->cond)); + AZ(pthread_mutex_unlock(&sn->mtx)); + +} + +static void +dbg_vai_notify_wait(struct dbg_vai_notify *sn) +{ + + CHECK_OBJ_NOTNULL(sn, DBG_VAI_NOTIFY_MAGIC); + AZ(pthread_mutex_lock(&sn->mtx)); + while (sn->hasmore == 0) + AZ(pthread_cond_wait(&sn->cond, &sn->mtx)); + AN(sn->hasmore); + sn->hasmore = 0; + AZ(pthread_mutex_unlock(&sn->mtx)); +} + +static void v_matchproto_(task_func_t) +dbg_vai_lease(struct worker *wrk, void *arg) +{ + struct req *req; + struct v1l *v1l; + const char *p; + unsigned flags = 0; + int r, cap, err, chunked; + + CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); + CAST_OBJ_NOTNULL(req, arg, REQ_MAGIC); + v1l = req->transport_priv; + req->transport_priv = NULL; + AN(v1l); + + THR_SetRequest(req); + VSLb(req->vsl, SLT_Debug, "w=%p enter dbg_vai_lease", wrk); + AZ(req->wrk); + CNT_Embark(wrk, req); + req->vdc->wrk = wrk; // move to CNT_Embark? + + cap = req->vdc->retval; + req->vdc->retval = 0; + assert(cap > 0); + + chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0; + if (chunked) + V1L_Chunked(v1l); + + struct dbg_vai_notify notify; + dbg_vai_notify_init(¬ify); + req->vdc->vai_hdl = ObjVAIinit(wrk, req->objcore, req->ws, dbg_vai_notify, ¬ify); + AN(req->vdc->vai_hdl); + + VSCARAB_LOCAL(scarab, cap); + VSCARET_LOCAL(scaret, cap); + req->vdc->scaret = scaret; + + err = 0; + do { + r = vdpio_pull(req->vdc, NULL, scarab); + flags = scarab->flags; // because vdpio_return_vscarab + VSLb(req->vsl, SLT_Debug, "%d = vdpio_pull()", r); + (void)V1L_Flush(v1l); + vdpio_return_vscarab(req->vdc, scarab); + + if (r == -ENOBUFS || r == -EAGAIN) { + ObjVAIreturn(wrk, req->vdc->vai_hdl, scaret); + dbg_vai_notify_wait(¬ify); + } + else if (r < 0) { + err = r; + break; + } + } while ((flags & VSCARAB_F_END) == 0); + + vdpio_return_vscarab(req->vdc, scarab); + ObjVAIreturn(wrk, req->vdc->vai_hdl, scaret); + + req->vdc->scaret = NULL; + if (!err && chunked) + V1L_EndChunk(v1l); + dbg_vai_deliver_finish(req, &v1l, err); + ObjVAIfini(wrk, &req->vdc->vai_hdl); + dbg_vai_notify_fini(¬ify); + + VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk); + wrk->task->func = hack_http1_req; + wrk->task->priv = req; +} + static void dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err) { @@ -191,6 +326,8 @@ dbg_vai_deliver_finish(struct req *req, struct v1l **v1lp, int err) sc = V1L_Close(v1lp, &bytes); + if (req->vdc->vai_hdl != NULL) + req->acct.resp_bodybytes += VDPIO_Close(req->vdc, req->objcore, req->boc); req->acct.resp_bodybytes += VDP_Close(req->vdc, req->objcore, req->boc); if (sc == SC_NULL && err && req->sp->fd >= 0) From nils.goroll at uplex.de Fri Jul 4 17:04:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:05 +0000 (UTC) Subject: [master] 37306d264 cache_deliver_proc: Add VDPIO_Close1() Message-ID: <20250704170405.6C3FB11A9A8@lists.varnish-cache.org> commit 37306d2641230a6e49dd044565179469b7d2e2bb Author: Nils Goroll Date: Mon Mar 3 10:01:14 2025 +0100 cache_deliver_proc: Add VDPIO_Close1() Some VDPs might reach a point where there are done with their work, such that they would only pass data on. Avoid unnecessary overhead by allowing them to remove themselves from the filter chain. The added test brings with it some changes to vmod_debug_transport_vai.c, which have the nice side effect to also test VDP_Push() after a failed VDPIO_Upgrade now. diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c index 7b1fd0ab9..037aeda49 100644 --- a/bin/varnishd/cache/cache_deliver_proc.c +++ b/bin/varnishd/cache/cache_deliver_proc.c @@ -348,6 +348,23 @@ VDPIO_Upgrade(VRT_CTX, struct vdp_ctx *vdc) return ((vdc->retval = r)); } +uint64_t +VDPIO_Close1(struct vdp_ctx *vdc, struct vdp_entry *vdpe) +{ + uint64_t rv; + + CHECK_OBJ_NOTNULL(vdpe, VDP_ENTRY_MAGIC); + rv = vdpe->bytes_in; + VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, + (uintmax_t)vdpe->calls, (uintmax_t)rv); + if (vdpe->vdp->io_fini != NULL) + vdpe->vdp->io_fini(vdc, &vdpe->priv); + AZ(vdpe->priv); + VTAILQ_REMOVE(&vdc->vdp, vdpe, list); + vdc->nxt = VTAILQ_FIRST(&vdc->vdp); + return (rv); +} + uint64_t VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) { @@ -359,17 +376,8 @@ VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) CHECK_OBJ_ORNULL(oc, OBJCORE_MAGIC); CHECK_OBJ_ORNULL(boc, BOC_MAGIC); - while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) { - CHECK_OBJ(vdpe, VDP_ENTRY_MAGIC); - rv = vdpe->bytes_in; - VSLb(vdc->vsl, SLT_VdpAcct, "%s %ju %ju", vdpe->vdp->name, - (uintmax_t)vdpe->calls, (uintmax_t)rv); - if (vdpe->vdp->io_fini != NULL) - vdpe->vdp->io_fini(vdc, &vdpe->priv); - AZ(vdpe->priv); - VTAILQ_REMOVE(&vdc->vdp, vdpe, list); - vdc->nxt = VTAILQ_FIRST(&vdc->vdp); - } + while ((vdpe = VTAILQ_FIRST(&vdc->vdp)) != NULL) + rv = VDPIO_Close1(vdc, vdpe); if (oc != NULL) HSH_Cancel(vdc->wrk, oc, boc); diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index ce3cdabc2..4e1abe93e 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -238,6 +238,8 @@ vdpio_pull(struct vdp_ctx *vdc, struct vdp_entry *vdpe, struct vscarab *scarab) return (ObjVAIlease(vdc->wrk, vdc->vai_hdl, scarab)); } +uint64_t VDPIO_Close1(struct vdp_ctx *, struct vdp_entry *vdpe); + /* * ============================================================ * VDPIO helpers diff --git a/bin/varnishd/http1/cache_http1_line.c b/bin/varnishd/http1/cache_http1_line.c index fca1f6e21..09c9d2eaa 100644 --- a/bin/varnishd/http1/cache_http1_line.c +++ b/bin/varnishd/http1/cache_http1_line.c @@ -462,9 +462,7 @@ const struct vdp * const VDP_v1l = &(struct vdp){ .init = v1l_init, .bytes = v1l_bytes, -#ifdef LATER .io_init = v1l_io_init, -#endif .io_upgrade = v1l_io_upgrade, .io_lease = v1l_io_lease, }; diff --git a/bin/varnishtest/tests/m00061.vtc b/bin/varnishtest/tests/m00061.vtc index 1031c36e6..04f3489b4 100644 --- a/bin/varnishtest/tests/m00061.vtc +++ b/bin/varnishtest/tests/m00061.vtc @@ -43,7 +43,8 @@ client c0 -repeat 16 -keepalive { client c1 -repeat 16 -keepalive { txreq rxresp - expect resp.bodylen == 13107 + expect resp.bodylen == 13113 + expect req.body ~ "^hello " } -start #client c2 -repeat 16 -keepalive { diff --git a/vmod/vmod_debug_transport_vai.c b/vmod/vmod_debug_transport_vai.c index da5f8a60b..3ca5496b3 100644 --- a/vmod/vmod_debug_transport_vai.c +++ b/vmod/vmod_debug_transport_vai.c @@ -41,6 +41,58 @@ #include "vmod_debug.h" +#define HELLO "hello " + +static int v_matchproto_(vdpio_init_f) +vdpio_hello_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) +{ + + (void)ctx; + (void)priv; + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + AN(vdc->clen); + + if (*vdc->clen < 0) + return (capacity); + + *vdc->clen += strlen(HELLO); + http_Unset(vdc->hp, H_Content_Length); + http_PrintfHeader(vdc->hp, "Content-Length: %zd", *vdc->clen); + return (capacity); +} + +static int v_matchproto_(vdpio_lease_f) +vdpio_hello_lease(struct vdp_ctx *vdc, struct vdp_entry *this, + struct vscarab *scarab) +{ + int r; + + VSCARAB_CHECK_NOTNULL(scarab); + if (scarab->used == scarab->capacity) + return (0); + //lint -e{446} side effects in initializer - uh? + VSCARAB_ADD_IOV_NORET(scarab, ((struct iovec) + {.iov_base = TRUST_ME(HELLO), .iov_len = strlen(HELLO)})); + r = vdpio_pull(vdc, this, scarab); + + (void) VDPIO_Close1(vdc, this); + + // return error from pull + if (r < 0) + r = 1; + else + r += 1; + + return (r); +} + +static const struct vdp vdp_hello = { + .name = "hello", + .io_init = vdpio_hello_init, + .io_lease = vdpio_hello_lease +}; + static void dbg_vai_error(struct req *req, struct v1l **v1lp, const char *msg) { @@ -87,14 +139,15 @@ dbg_vai_deliver(struct req *req, int sendbody) cache_param->http1_iovs); if (v1l == NULL) { - dbg_vai_error(req, &v1l, "Failure to init v1d (workspace_thread overflow)"); + dbg_vai_error(req, &v1l, "Failure to init v1d " + "(workspace_thread overflow)"); return (VTR_D_DONE); } // Do not roll back req->ws upon V1L_Close() V1L_NoRollback(v1l); - if (sendbody) { + while (sendbody) { if (!http_GetHdr(req->resp, H_Content_Length, NULL)) { if (req->http->protover == 11) { http_SetHeader(req->resp, @@ -105,11 +158,25 @@ dbg_vai_deliver(struct req *req, int sendbody) } INIT_OBJ(ctx, VRT_CTX_MAGIC); VCL_Req2Ctx(ctx, req); - if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) { - dbg_vai_error(req, &v1l, "Failure to push v1d processor"); + cap = VDPIO_Upgrade(ctx, req->vdc); + if (cap <= 0) { + if (VDP_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l)) { + dbg_vai_error(req, &v1l, "Failure to push v1d"); + return (VTR_D_DONE); + } + break; + } + cap = VDPIO_Push(ctx, req->vdc, req->ws, &vdp_hello, NULL); + if (cap < 1) { + dbg_vai_error(req, &v1l, "Failure to push hello"); return (VTR_D_DONE); } - cap = VDPIO_Upgrade(ctx, req->vdc); + cap = VDPIO_Push(ctx, req->vdc, req->ws, VDP_v1l, v1l); + if (cap < 1) { + dbg_vai_error(req, &v1l, "Failure to push v1d (vdpio)"); + return (VTR_D_DONE); + } + break; } if (WS_Overflowed(req->ws)) { From nils.goroll at uplex.de Fri Jul 4 17:04:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:05 +0000 (UTC) Subject: [master] 95dc46857 cache_deliver_proc: Wrap VAI management for VDPIO Message-ID: <20250704170405.8A96D11A9B9@lists.varnish-cache.org> commit 95dc46857a2120976918c4fcc0d5d4612735860a Author: Nils Goroll Date: Tue Mar 4 12:03:34 2025 +0100 cache_deliver_proc: Wrap VAI management for VDPIO Transports should not need to talk to the VAI Object interface directly, because its state is kept in the vdp_ctx and the lease interface is already wrapped through vdpio_pull(). So we add wrappers which manage vdc->vai_hdl and vdc->scaret: VDPIO_Init(), VDPIO_Return() and VDPIO_Fini() diff --git a/bin/varnishd/cache/cache_deliver_proc.c b/bin/varnishd/cache/cache_deliver_proc.c index 037aeda49..36fb20cbf 100644 --- a/bin/varnishd/cache/cache_deliver_proc.c +++ b/bin/varnishd/cache/cache_deliver_proc.c @@ -384,6 +384,52 @@ VDPIO_Close(struct vdp_ctx *vdc, struct objcore *oc, struct boc *boc) return (rv); } +/* + * ============================================================ + * VDPIO helpers: VAI management + * + * Transports should not need to talk to the VAI Object interface directly, + * because its state is kept in the vdp_ctx + * + * So we wrap init, return and fini + */ + +// return true if error +int +VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws, + vai_notify_cb *notify_cb, void *notify_priv, struct vscaret *scaret) +{ + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + VSCARET_CHECK_NOTNULL(scaret); + AN(scaret->capacity); + AZ(scaret->used); + + AZ(vdc->vai_hdl); + vdc->vai_hdl = ObjVAIinit(vdc->wrk, oc, ws, notify_cb, notify_priv); + if (vdc->vai_hdl == NULL) + return (1); + vdc->scaret = scaret; + return (0); +} + +// return leases stashed in scaret +void +VDPIO_Return(const struct vdp_ctx *vdc) +{ + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + + ObjVAIreturn(vdc->wrk, vdc->vai_hdl, vdc->scaret); +} + +void +VDPIO_Fini(struct vdp_ctx *vdc) +{ + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + + VDPIO_Return(vdc); + ObjVAIfini(vdc->wrk, &vdc->vai_hdl); +} + /*--------------------------------------------------------------------*/ int v_matchproto_(objiterate_f) VDP_ObjIterate(void *priv, unsigned flush, const void *ptr, ssize_t len) diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index 4e1abe93e..a24d669fc 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -318,6 +318,11 @@ vdpio_consolidate_vscarab(const struct vdp_ctx *vdc, struct vscarab *scarab) scarab->used = f - scarab->s; } +// Lifecycle management in cache_deliver_proc.c +int VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws, + vai_notify_cb *notify_cb, void *notify_priv, struct vscaret *scaret); +void VDPIO_Return(const struct vdp_ctx *vdc); +void VDPIO_Fini(struct vdp_ctx *vdc); void v_deprecated_ VRT_AddVDP(VRT_CTX, const struct vdp *); void v_deprecated_ VRT_RemoveVDP(VRT_CTX, const struct vdp *); diff --git a/vmod/vmod_debug_transport_vai.c b/vmod/vmod_debug_transport_vai.c index 3ca5496b3..0dfd32a06 100644 --- a/vmod/vmod_debug_transport_vai.c +++ b/vmod/vmod_debug_transport_vai.c @@ -314,6 +314,14 @@ dbg_vai_notify_wait(struct dbg_vai_notify *sn) AZ(pthread_mutex_unlock(&sn->mtx)); } +static void +dbg_vai_lease_done(struct worker *wrk, struct req *req) +{ + VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk); + wrk->task->func = hack_http1_req; + wrk->task->priv = req; +} + static void v_matchproto_(task_func_t) dbg_vai_lease(struct worker *wrk, void *arg) { @@ -339,18 +347,22 @@ dbg_vai_lease(struct worker *wrk, void *arg) req->vdc->retval = 0; assert(cap > 0); + VSCARAB_LOCAL(scarab, cap); + VSCARET_LOCAL(scaret, cap); + chunked = http_GetHdr(req->resp, H_Transfer_Encoding, &p) && strcmp(p, "chunked") == 0; if (chunked) V1L_Chunked(v1l); struct dbg_vai_notify notify; dbg_vai_notify_init(¬ify); - req->vdc->vai_hdl = ObjVAIinit(wrk, req->objcore, req->ws, dbg_vai_notify, ¬ify); - AN(req->vdc->vai_hdl); - VSCARAB_LOCAL(scarab, cap); - VSCARET_LOCAL(scaret, cap); - req->vdc->scaret = scaret; + if (VDPIO_Init(req->vdc, req->objcore, req->ws, dbg_vai_notify, ¬ify, scaret)) { + dbg_vai_notify_fini(¬ify); + dbg_vai_deliver_finish(req, &v1l, 1); + dbg_vai_lease_done(wrk, req); + return; + } err = 0; do { @@ -361,7 +373,7 @@ dbg_vai_lease(struct worker *wrk, void *arg) vdpio_return_vscarab(req->vdc, scarab); if (r == -ENOBUFS || r == -EAGAIN) { - ObjVAIreturn(wrk, req->vdc->vai_hdl, scaret); + VDPIO_Return(req->vdc); dbg_vai_notify_wait(¬ify); } else if (r < 0) { @@ -370,19 +382,12 @@ dbg_vai_lease(struct worker *wrk, void *arg) } } while ((flags & VSCARAB_F_END) == 0); - vdpio_return_vscarab(req->vdc, scarab); - ObjVAIreturn(wrk, req->vdc->vai_hdl, scaret); - - req->vdc->scaret = NULL; if (!err && chunked) V1L_EndChunk(v1l); dbg_vai_deliver_finish(req, &v1l, err); - ObjVAIfini(wrk, &req->vdc->vai_hdl); + VDPIO_Fini(req->vdc); dbg_vai_notify_fini(¬ify); - - VSLb(req->vsl, SLT_Debug, "w=%p resuming http1_req", wrk); - wrk->task->func = hack_http1_req; - wrk->task->priv = req; + dbg_vai_lease_done(wrk, req); } static void From nils.goroll at uplex.de Fri Jul 4 17:04:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:04:05 +0000 (UTC) Subject: [master] 773fd2397 cache_range: Add VDPIO Support Message-ID: <20250704170405.AB23311A9C9@lists.varnish-cache.org> commit 773fd239762b0c5236e23b0dea9b934be60d8b49 Author: Nils Goroll Date: Tue Mar 11 12:11:20 2025 +0100 cache_range: Add VDPIO Support diff --git a/bin/varnishd/cache/cache_range.c b/bin/varnishd/cache/cache_range.c index 2a60fa03b..73e7cc1a4 100644 --- a/bin/varnishd/cache/cache_range.c +++ b/bin/varnishd/cache/cache_range.c @@ -233,11 +233,111 @@ vrg_range_init(VRT_CTX, struct vdp_ctx *vdc, void **priv) return (1); } +static int v_matchproto_(vdpio_init_f) +vrg_range_io_upgrade(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) +{ + + (void)ctx; + (void)vdc; + (void)priv; + + return (capacity); +} + +static int v_matchproto_(vdpio_lease_f) +vrg_range_io_lease(struct vdp_ctx *vdc, struct vdp_entry *this, struct vscarab *out) +{ + struct vrg_priv *vrg_priv; + struct viov *v; + ssize_t l, ll; + int r; + + CHECK_OBJ_NOTNULL(vdc, VDP_CTX_MAGIC); + CHECK_OBJ_NOTNULL(this, VDP_ENTRY_MAGIC); + VSCARAB_CHECK(out); + + CAST_OBJ_NOTNULL(vrg_priv, this->priv, VRG_PRIV_MAGIC); + + if (vrg_priv->range_off >= vrg_priv->range_high) { + out->flags |= VSCARAB_F_END; + return (0); + } + if (out->capacity == out->used) + return (0); + + // ensure we do not pull more than we can return + VSCARAB_LOCAL(in, out->capacity - out->used); + + while (vrg_priv->range_off < vrg_priv->range_low) { + r = vdpio_pull(vdc, this, in); + out->flags |= in->flags; + if (r <= 0) + return (r); + l = vrg_priv->range_low - vrg_priv->range_off; + VSCARAB_FOREACH(v, in) { + ll = vmin(l, (ssize_t)v->iov.iov_len); + v->iov.iov_base = (char *)v->iov.iov_base + ll; + v->iov.iov_len -= ll; + l -= ll; + if (l == 0) + break; + } + vrg_priv->range_off = vrg_priv->range_low - l; + + vdpio_consolidate_vscarab(vdc, in); + + if (l != 0) + AZ(in->used); + else + assert(vrg_priv->range_off == vrg_priv->range_low); + } + + assert(vrg_priv->range_off >= vrg_priv->range_low); + assert(vrg_priv->range_off <= vrg_priv->range_high); + + if (in->used == 0) { + r = vdpio_pull(vdc, this, in); + out->flags |= in->flags; + if (r <= 0) + return (r); + } + + AN(in->used); + + r = 0; + l = vrg_priv->range_high - vrg_priv->range_off; + VSCARAB_FOREACH(v, in) { + vrg_priv->range_off += (ssize_t)v->iov.iov_len; + ll = vmin(l, (ssize_t)v->iov.iov_len); + v->iov.iov_len = ll; + if (ll == 0) + vdpio_return_lease(vdc, v->lease); + else { + VSCARAB_ADD(out, *v); + l -= ll; + r++; + } + } + if (vrg_priv->range_off >= vrg_priv->range_high) + out->flags |= VSCARAB_F_END; + return (r); +} + +static void v_matchproto_(vdpio_fini_f) +vrg_range_io_fini(struct vdp_ctx *vdc, void **priv) +{ + AZ(vrg_range_fini(vdc, priv)); +} + const struct vdp VDP_range = { .name = "range", .init = vrg_range_init, .bytes = vrg_range_bytes, .fini = vrg_range_fini, + + .io_upgrade = vrg_range_io_upgrade, + .io_lease = vrg_range_io_lease, + .io_fini = vrg_range_io_fini, }; /*--------------------------------------------------------------------*/ From nils.goroll at uplex.de Fri Jul 4 17:31:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:31:05 +0000 (UTC) Subject: [master] 36225c0e3 doc: remove now invalid references Message-ID: <20250704173105.115F311CB71@lists.varnish-cache.org> commit 36225c0e3a4f48bab10ebb93e6a183b9203a242b Author: Nils Goroll Date: Fri Jul 4 19:23:23 2025 +0200 doc: remove now invalid references Ref 337e95a1de25884cfc135a9447048c918ff4bd63 diff --git a/doc/sphinx/whats-new/upgrading-6.2.rst b/doc/sphinx/whats-new/upgrading-6.2.rst index 84f64fb17..e01f32b25 100644 --- a/doc/sphinx/whats-new/upgrading-6.2.rst +++ b/doc/sphinx/whats-new/upgrading-6.2.rst @@ -86,10 +86,10 @@ but the following differences should be noted: The following functions are deprecated and should be replaced by the new conversion functions: -* :ref:`std.real2integer()` -* :ref:`std.real2time()` -* :ref:`std.time2integer()` -* :ref:`std.time2real()` +* ``std.real2integer()`` +* ``std.real2time()`` +* ``std.time2integer()`` +* ``std.time2real()`` They will be removed in a future version of Varnish. From nils.goroll at uplex.de Fri Jul 4 17:31:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:31:05 +0000 (UTC) Subject: [master] 9611d7f0d Flexelint silencing Message-ID: <20250704173105.2775E11CB74@lists.varnish-cache.org> commit 9611d7f0de67403f9c452ee258e1b197cac80b63 Author: Nils Goroll Date: Fri Jul 4 19:28:52 2025 +0200 Flexelint silencing I had not seen this on linux, and I suspect the reason is that on the bsd running the CI-flexelint, offsetof() is defined as some pointer difference using the & operator. #... ave -e413 */(offsetof(struct vscarab, s) + (save2910) * sizeof(((struct v #... arab), 0, VFLA_SIZE(vscarab, s, save2910)); (scarab)->magic = (0x05ca7ab0) #... 0, s, scarab->capacity, VUNIQ_NAME(save)) #... arab, VSCARAB_MAGIC, s, scarab->capacity) VSCARAB_INIT(scarab, scarab->capacity); Warning 545: Suspicious use of & diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 55553e707..8885721d6 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -815,6 +815,7 @@ struct vscarab { // VFLA_SIZE ignores the cap == 0 case, we assert in _INIT // offsetoff ref: https://gustedt.wordpress.com/2011/03/14/flexible-array-member/ //lint -emacro(413, VFLA_SIZE) +//lint -emacro(545, VFLA_SIZE) bsd offsetof() seems to be using & #define VFLA_SIZE(type, fam, cap) (offsetof(struct type, fam) + \ (cap) * sizeof(((struct type *)0)->fam[0])) #define VFLA_INIT_(type, cptr, mag, fam, cap, save) do { \ From nils.goroll at uplex.de Fri Jul 4 17:37:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:37:05 +0000 (UTC) Subject: [master] 3ba772c44 vmod_debug: Fix format specifier Message-ID: <20250704173705.445EF11D12E@lists.varnish-cache.org> commit 3ba772c4441c7a9075f02aec28787204546e9a9d Author: Nils Goroll Date: Fri Jul 4 19:36:13 2025 +0200 vmod_debug: Fix format specifier diff --git a/vmod/vmod_debug_transport_vai.c b/vmod/vmod_debug_transport_vai.c index 0dfd32a06..6578940ba 100644 --- a/vmod/vmod_debug_transport_vai.c +++ b/vmod/vmod_debug_transport_vai.c @@ -58,7 +58,7 @@ vdpio_hello_init(VRT_CTX, struct vdp_ctx *vdc, void **priv, int capacity) *vdc->clen += strlen(HELLO); http_Unset(vdc->hp, H_Content_Length); - http_PrintfHeader(vdc->hp, "Content-Length: %zd", *vdc->clen); + http_PrintfHeader(vdc->hp, "Content-Length: %jd", *vdc->clen); return (capacity); } From nils.goroll at uplex.de Fri Jul 4 17:46:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:46:04 +0000 (UTC) Subject: [master] 70d14f0ff cache_gzip: Address SunCC complaint Message-ID: <20250704174605.0E2C911D70A@lists.varnish-cache.org> commit 70d14f0ff7fe3c4d9f0b27439a3a9df08419c148 Author: Nils Goroll Date: Fri Jul 4 19:42:17 2025 +0200 cache_gzip: Address SunCC complaint "cache/cache_gzip.c", line 263: operands have incompatible pointer types: op "==" (E_OPERANDS_INCOMPAT_PTR_TYPES) diff --git a/bin/varnishd/cache/cache_gzip.c b/bin/varnishd/cache/cache_gzip.c index 0b2e948db..5d303b64c 100644 --- a/bin/varnishd/cache/cache_gzip.c +++ b/bin/varnishd/cache/cache_gzip.c @@ -260,7 +260,7 @@ static inline void vgz_iovec_update(struct vgz *vg, const struct iovec *in, const struct iovec *buf) { /* in: either fully consumed or the same */ - assert(vg->vz.avail_in == 0 || vg->vz.next_in == in->iov_base); + assert(vg->vz.avail_in == 0 || vg->vz.next_in == TRUST_ME(in->iov_base)); vg->vz.next_in = in->iov_base; vg->vz.avail_in = in->iov_len; vg->vz.next_out = buf->iov_base; From nils.goroll at uplex.de Fri Jul 4 17:46:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:46:05 +0000 (UTC) Subject: [master] 1c7db798e cache_gzip: Address old gcc's complaints Message-ID: <20250704174605.3108E11D70F@lists.varnish-cache.org> commit 1c7db798e71ff844650eb740dd371c47d4ba1d8d Author: Nils Goroll Date: Fri Jul 4 19:44:47 2025 +0200 cache_gzip: Address old gcc's complaints cache/cache_gzip.c:264:17: error: pointer targets in assignment differ in signedness [-Werror=pointer-sign] cache/cache_gzip.c:266:18: error: pointer targets in assignment differ in signedness [-Werror=pointer-sign] cache/cache_gzip.c:282:16: error: pointer targets in assignment differ in signedness [-Werror=pointer-sign] diff --git a/bin/varnishd/cache/cache_esi_deliver.c b/bin/varnishd/cache/cache_esi_deliver.c index ccb44d32d..c020fb8bd 100644 --- a/bin/varnishd/cache/cache_esi_deliver.c +++ b/bin/varnishd/cache/cache_esi_deliver.c @@ -311,6 +311,9 @@ ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv) return (0); } +static void v_matchproto_(vdpio_fini_f) + + static int v_matchproto_(vdp_bytes_f) ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, const void *ptr, ssize_t len) @@ -485,6 +488,13 @@ const struct vdp VDP_esi = { .init = ved_vdp_esi_init, .bytes = ved_vdp_esi_bytes, .fini = ved_vdp_esi_fini, + +#ifdef LATER + .io_init = ved_vdpio_esi_init, +#endif + .io_upgrade = ved_vdpio_esi_upgrade, + .io_lease = ved_vdpio_esi_lease, + .io_fini = ved_vdpio_esi_fini, }; /* diff --git a/bin/varnishd/cache/cache_gzip.c b/bin/varnishd/cache/cache_gzip.c index 5d303b64c..e7fab9fac 100644 --- a/bin/varnishd/cache/cache_gzip.c +++ b/bin/varnishd/cache/cache_gzip.c @@ -261,9 +261,9 @@ vgz_iovec_update(struct vgz *vg, const struct iovec *in, const struct iovec *buf { /* in: either fully consumed or the same */ assert(vg->vz.avail_in == 0 || vg->vz.next_in == TRUST_ME(in->iov_base)); - vg->vz.next_in = in->iov_base; + vg->vz.next_in = TRUST_ME(in->iov_base); vg->vz.avail_in = in->iov_len; - vg->vz.next_out = buf->iov_base; + vg->vz.next_out = TRUST_ME(buf->iov_base); vg->vz.avail_out = buf->iov_len; } @@ -279,7 +279,7 @@ vgz_gunzip_iovec(struct vgz *vg, struct iovec *in, struct iovec *buf, struct iov i = inflate(&vg->vz, 0); if (i == Z_OK || i == Z_STREAM_END) { iovec_collect(buf, out, pdiff(buf->iov_base, vg->vz.next_out)); - in->iov_base = vg->vz.next_in; + in->iov_base = TRUST_ME(vg->vz.next_in); in->iov_len = vg->vz.avail_in; } vg->last_i = i; From nils.goroll at uplex.de Fri Jul 4 17:58:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:58:05 +0000 (UTC) Subject: [master] 66ce409b7 Flexelint silencing for BSD Message-ID: <20250704175805.925ED11DF88@lists.varnish-cache.org> commit 66ce409b7633b164748728a5ee599860c36befdf Author: Nils Goroll Date: Fri Jul 4 19:54:31 2025 +0200 Flexelint silencing for BSD No idea when this popped up, but I suppose it must have come with an update of the system flexelint runs on File /usr/include/sys/queue.h, Line 425 _ "not head's first field address", \ Warning 607: Parameter 'head' of macro found within string (and others) diff --git a/bin/varnishd/flint.lnt b/bin/varnishd/flint.lnt index 6e3051b88..302c532ae 100644 --- a/bin/varnishd/flint.lnt +++ b/bin/varnishd/flint.lnt @@ -121,6 +121,8 @@ ////////////// +libh netinet/tcp.h -elib(46) ++libh sys/queue.h +-elib(607) ////////////// +libh mgt_event.h From nils.goroll at uplex.de Fri Jul 4 17:58:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 17:58:05 +0000 (UTC) Subject: [master] 53a8ab2df Flexelint silencing of deliberate conflict Message-ID: <20250704175805.A4F6D11DF8C@lists.varnish-cache.org> commit 53a8ab2dfef5b21979fd1ae8615b583553dbd254 Author: Nils Goroll Date: Fri Jul 4 19:57:40 2025 +0200 Flexelint silencing of deliberate conflict Ref 37040b4d83e865718a1ec1429c46d1f97bc9efbc diff --git a/bin/varnishd/mgt/mgt.h b/bin/varnishd/mgt/mgt.h index 146ed69a6..3f20777c7 100644 --- a/bin/varnishd/mgt/mgt.h +++ b/bin/varnishd/mgt/mgt.h @@ -251,6 +251,7 @@ extern const char *mgt_vmod_path; #error "Keep pthreads out of the manager process" #endif +//lint -e{401} deliberate conflict triggers flexelint static inline void pthread_create(void) { From nils.goroll at uplex.de Fri Jul 4 18:01:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:01:05 +0000 (UTC) Subject: [master] 505cc2d2b Fix "cache_gzip: Address old gcc's complaints" Message-ID: <20250704180105.BA27211E3E6@lists.varnish-cache.org> commit 505cc2d2b443a734eaae4e73e85fe40aca62a9b8 Author: Nils Goroll Date: Fri Jul 4 19:59:08 2025 +0200 Fix "cache_gzip: Address old gcc's complaints" I had accidentally committed some WIP which should have been on the stash This reverts parts of commit 1c7db798e71ff844650eb740dd371c47d4ba1d8d. diff --git a/bin/varnishd/cache/cache_esi_deliver.c b/bin/varnishd/cache/cache_esi_deliver.c index c020fb8bd..ccb44d32d 100644 --- a/bin/varnishd/cache/cache_esi_deliver.c +++ b/bin/varnishd/cache/cache_esi_deliver.c @@ -311,9 +311,6 @@ ved_vdp_esi_fini(struct vdp_ctx *vdc, void **priv) return (0); } -static void v_matchproto_(vdpio_fini_f) - - static int v_matchproto_(vdp_bytes_f) ved_vdp_esi_bytes(struct vdp_ctx *vdc, enum vdp_action act, void **priv, const void *ptr, ssize_t len) @@ -488,13 +485,6 @@ const struct vdp VDP_esi = { .init = ved_vdp_esi_init, .bytes = ved_vdp_esi_bytes, .fini = ved_vdp_esi_fini, - -#ifdef LATER - .io_init = ved_vdpio_esi_init, -#endif - .io_upgrade = ved_vdpio_esi_upgrade, - .io_lease = ved_vdpio_esi_lease, - .io_fini = ved_vdpio_esi_fini, }; /* From nils.goroll at uplex.de Fri Jul 4 18:50:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:50:05 +0000 (UTC) Subject: [master] 5d54f8d7a vai: More adjustments for 32 bit Message-ID: <20250704185005.3B5E511FE85@lists.varnish-cache.org> commit 5d54f8d7a56c0647b35dfe6146303af36e4b127e Author: Nils Goroll Date: Fri Jul 4 20:12:04 2025 +0200 vai: More adjustments for 32 bit Truncate the uint64_t interpreted as a pointer first, then cast to an actual pointer. So yes, 32bit still keeps us real... diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 5bd92b5c8..26b8306fb 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -348,7 +348,7 @@ lease2st(uint64_t l) if (sizeof(void *) < 8) //lint !e506 !e774 l >>= 1; - return ((void *)l); + return ((void *)(uintptr_t)l); } static inline void From nils.goroll at uplex.de Fri Jul 4 18:50:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:50:05 +0000 (UTC) Subject: [master] 7fdf9d884 vai: Address old gcc's complaints Message-ID: <20250704185005.5452511FE88@lists.varnish-cache.org> commit 7fdf9d8849dc2fa02599d61d681bd59c621d0a50 Author: Nils Goroll Date: Fri Jul 4 20:14:14 2025 +0200 vai: Address old gcc's complaints diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 26b8306fb..4a47f5560 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -354,7 +354,7 @@ lease2st(uint64_t l) static inline void sml_ai_viov_fill(struct viov *viov, struct storage *st) { - viov->iov.iov_base = st->ptr; + viov->iov.iov_base = TRUST_ME(st->ptr); viov->iov.iov_len = st->len; viov->lease = st2lease(st); VAI_ASSERT_LEASE(viov->lease); @@ -528,7 +528,7 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) AN(l); if (l > av) l = av; - viov->iov.iov_base = hdl->st->ptr + hdl->st_off; + viov->iov.iov_base = TRUST_ME(hdl->st->ptr + hdl->st_off); viov->iov.iov_len = l; if (hdl->st_off + l == hdl->st->space) { next = VTAILQ_PREV(hdl->st, storagehead, list); From nils.goroll at uplex.de Fri Jul 4 18:50:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:50:05 +0000 (UTC) Subject: [master] 33d85cfcb esi_parse_fuzzer: more pro-forma functions Message-ID: <20250704185005.6B0C411FE8B@lists.varnish-cache.org> commit 33d85cfcb657457d8f078d53569bf6350611ef68 Author: Nils Goroll Date: Fri Jul 4 20:25:51 2025 +0200 esi_parse_fuzzer: more pro-forma functions diff --git a/bin/varnishd/fuzzers/esi_parse_fuzzer.c b/bin/varnishd/fuzzers/esi_parse_fuzzer.c index 95b25c391..e9c11dffd 100644 --- a/bin/varnishd/fuzzers/esi_parse_fuzzer.c +++ b/bin/varnishd/fuzzers/esi_parse_fuzzer.c @@ -40,6 +40,7 @@ #include "cache/cache_vgz.h" /* enum vgz_flag */ #include "cache/cache_esi.h" #include "cache/cache_filter.h" /* struct vfp_ctx */ +#include "cache/cache_obj.h" /* cache_filter.h uses ObjVAI */ #include "vfil.h" @@ -89,6 +90,24 @@ VSLb_ts(struct vsl_log *l, const char *event, vtim_real first, vtim_real *pprev, (void)now; } +int +ObjVAIlease(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) +{ + + (void)wrk; + (void)vhdl; + (void)scarab; + return (0); +} + +void +ObjVAIreturn(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) +{ + (void)wrk; + (void)vhdl; + (void)scaret; +} + void WRK_Log(enum VSL_tag_e tag, const char *fmt, ...) { From nils.goroll at uplex.de Fri Jul 4 18:50:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:50:05 +0000 (UTC) Subject: [master] eab881fc7 Flexelint: Adjust to vtest2 Message-ID: <20250704185005.864EE11FE90@lists.varnish-cache.org> commit eab881fc738631ae69e169d34435204931fceb4f Author: Nils Goroll Date: Fri Jul 4 20:27:55 2025 +0200 Flexelint: Adjust to vtest2 diff --git a/bin/varnishtest/flint.lnt b/bin/varnishtest/flint.lnt index 3d087918f..332dbf6a6 100644 --- a/bin/varnishtest/flint.lnt +++ b/bin/varnishtest/flint.lnt @@ -3,7 +3,7 @@ // See LICENSE file for full text of license -+libh(teken.h) ++libh(vtest2/src/teken.h) // Tell FlexeLint when these don't return -function(exit, vtc_fatal) From nils.goroll at uplex.de Fri Jul 4 18:50:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:50:05 +0000 (UTC) Subject: [master] bae4b19cb Flexelint: Allow goto in vtest2 Message-ID: <20250704185005.9E7A911FE94@lists.varnish-cache.org> commit bae4b19cb61ea9e0c180fba5b91260d0f8621bbf Author: Nils Goroll Date: Fri Jul 4 20:30:14 2025 +0200 Flexelint: Allow goto in vtest2 I personally think we should allow it in general for the error handling pattern, but if anything it should be ok in vtest2 diff --git a/bin/varnishtest/flint.lnt b/bin/varnishtest/flint.lnt index 332dbf6a6..dfcf259b8 100644 --- a/bin/varnishtest/flint.lnt +++ b/bin/varnishtest/flint.lnt @@ -44,6 +44,7 @@ -emacro(835, STRTOU32_CHECK) // A zero has been given as ___ argument to operator '___' -e788 // enum value not used in defaulted switch +-e801 // goto -efile(451, cmds.h) -efile(451, vmods.h) From nils.goroll at uplex.de Fri Jul 4 18:50:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 18:50:05 +0000 (UTC) Subject: [master] facd9a09e Update vtest2 / adjust to vudp.[ch] Message-ID: <20250704185005.B897111FE98@lists.varnish-cache.org> commit facd9a09e956528158bd6ea1bd7fcbe19d3da508 Author: Nils Goroll Date: Fri Jul 4 20:33:36 2025 +0200 Update vtest2 / adjust to vudp.[ch] This is now the first time that we use lib sources from vtest2 which are not in varnish-cache. Background: vudp has been moved into its own source files from vtc_syslog.c diff --git a/bin/varnishtest/Makefile.am b/bin/varnishtest/Makefile.am index a1d5b31fb..260044ebf 100644 --- a/bin/varnishtest/Makefile.am +++ b/bin/varnishtest/Makefile.am @@ -10,7 +10,8 @@ AM_CPPFLAGS = \ -I$(top_srcdir)/include \ -I$(top_builddir)/include \ -I$(top_builddir) \ - -I$(top_srcdir)/lib/libvgz + -I$(top_srcdir)/lib/libvgz \ + -Ivtest2/lib bin_PROGRAMS = varnishtest @@ -63,7 +64,8 @@ varnishtest_SOURCES = \ vtest2/src/vtc_syslog.c \ vtest2/src/vtc_tunnel.c \ vtest2/src/vtc_varnish.c \ - vtest2/src/vtc_vsm.c + vtest2/src/vtc_vsm.c \ + vtest2/lib/vudp.c varnishtest_LDADD = \ $(top_builddir)/lib/libvarnishapi/libvarnishapi.la \ diff --git a/bin/varnishtest/vtest2 b/bin/varnishtest/vtest2 index 63b53d697..36b037561 160000 --- a/bin/varnishtest/vtest2 +++ b/bin/varnishtest/vtest2 @@ -1 +1 @@ -Subproject commit 63b53d697f264754792845fa1974cb1be55e3344 +Subproject commit 36b03756196cde1088a18e4108e13d8b25a82988 From nils.goroll at uplex.de Fri Jul 4 19:15:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 19:15:05 +0000 (UTC) Subject: [master] ca236b6d5 Update vtest2: fix distcheck Message-ID: <20250704191505.516B41212F8@lists.varnish-cache.org> commit ca236b6d5b6d99a3d191cace96f02d9cea452eae Author: Nils Goroll Date: Fri Jul 4 21:07:08 2025 +0200 Update vtest2: fix distcheck diff --git a/bin/varnishtest/Makefile.am b/bin/varnishtest/Makefile.am index 260044ebf..f001eb215 100644 --- a/bin/varnishtest/Makefile.am +++ b/bin/varnishtest/Makefile.am @@ -11,7 +11,7 @@ AM_CPPFLAGS = \ -I$(top_builddir)/include \ -I$(top_builddir) \ -I$(top_srcdir)/lib/libvgz \ - -Ivtest2/lib + -I$(srcdir)/vtest2/lib bin_PROGRAMS = varnishtest @@ -65,6 +65,7 @@ varnishtest_SOURCES = \ vtest2/src/vtc_tunnel.c \ vtest2/src/vtc_varnish.c \ vtest2/src/vtc_vsm.c \ + vtest2/lib/vudp.h \ vtest2/lib/vudp.c varnishtest_LDADD = \ From nils.goroll at uplex.de Fri Jul 4 19:22:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Fri, 4 Jul 2025 19:22:05 +0000 (UTC) Subject: [master] a561e72f9 Update vtest2 Message-ID: <20250704192205.B431C1217F7@lists.varnish-cache.org> commit a561e72f92910dcdf4b6995c5db7bf4fd96021db Author: Nils Goroll Date: Fri Jul 4 21:21:35 2025 +0200 Update vtest2 diff --git a/bin/varnishtest/vtest2 b/bin/varnishtest/vtest2 index 36b037561..dc46782ef 160000 --- a/bin/varnishtest/vtest2 +++ b/bin/varnishtest/vtest2 @@ -1 +1 @@ -Subproject commit 36b03756196cde1088a18e4108e13d8b25a82988 +Subproject commit dc46782efa2adb08e91968efbf154c73acdee67f From nils.goroll at uplex.de Sat Jul 5 12:39:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Sat, 5 Jul 2025 12:39:05 +0000 (UTC) Subject: [master] 6b4562869 vtest.sh: update submodules when looping Message-ID: <20250705123905.7DB1E11C10A@lists.varnish-cache.org> commit 6b456286908c192b7fb18dd7c65e397b0d86d27f Author: Nils Goroll Date: Sat Jul 5 14:38:08 2025 +0200 vtest.sh: update submodules when looping diff --git a/tools/vtest.sh b/tools/vtest.sh index 5dfad996a..7526f40a1 100755 --- a/tools/vtest.sh +++ b/tools/vtest.sh @@ -233,7 +233,7 @@ do rm -rf varnish-trunk > /dev/null 2>&1 || true git reset --hard > /dev/null 2>&1 || true git clean -df > /dev/null 2>&1 || true - git pull > /dev/null 2>&1 || true + git pull --recurse-submodules=yes > /dev/null 2>&1 || true ) rev=`cd "${SRCDIR}" && git show -s --pretty=format:%H` if [ "x${rev}" != "x${orev}" ] ; then From nils.goroll at uplex.de Sun Jul 6 14:48:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Sun, 6 Jul 2025 14:48:04 +0000 (UTC) Subject: [master] bc28f0c92 vai: Hunting c00111 failure in vtest Message-ID: <20250706144804.F0DCE1009B4@lists.varnish-cache.org> commit bc28f0c92a02bfb1ad5b6f7a30d7ae353a79eaeb Author: Nils Goroll Date: Sun Jul 6 14:21:33 2025 +0200 vai: Hunting c00111 failure in vtest diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 4a47f5560..23b6d2564 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -628,6 +628,9 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) VSCARET_FOREACH(p, todo) { CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); +#ifdef VAI_DBG + VSLb(wrk->vsl, SLT_Debug, "ret %p", st); +#endif sml_stv_free(hdl->stv, st); } } @@ -808,6 +811,11 @@ sml_iterator(struct worker *wrk, struct objcore *oc, // sufficient space ensured by capacity check above VSCARET_ADD(scaret, vio->lease); +#ifdef VAI_DBG + VSLb(wrk->vsl, SLT_Debug, "len %zu scaret %u uu %u", + vio->iov.iov_len, scaret->used, uu); +#endif + // whenever we have flushed, return leases if ((uu & OBJ_ITER_FLUSH) && scaret->used > 0) ObjVAIreturn(wrk, hdl, scaret); @@ -823,6 +831,11 @@ sml_iterator(struct worker *wrk, struct objcore *oc, // we have now completed the scarab VSCARAB_INIT(scarab, scarab->capacity); +#ifdef VAI_DBG + VSLb(wrk->vsl, SLT_Debug, "r %d nn %d uu %u", + r, nn, uu); +#endif + // flush before blocking if we did not already if (r == 0 && (nn == -ENOBUFS || nn == -EAGAIN) && (uu & OBJ_ITER_FLUSH) == 0) { diff --git a/bin/varnishtest/tests/c00111.vtc b/bin/varnishtest/tests/c00111.vtc index 996d5d258..50ebd606d 100644 --- a/bin/varnishtest/tests/c00111.vtc +++ b/bin/varnishtest/tests/c00111.vtc @@ -23,6 +23,7 @@ varnish v1 -cliok "param.set transit_buffer 4k" client c2 { txreq -method POST rxresp + expect resp.bodylen == 1850000 } -run varnish v1 -vsl_catchup From nils.goroll at uplex.de Sun Jul 6 14:48:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Sun, 6 Jul 2025 14:48:05 +0000 (UTC) Subject: [master] 84d1fc92f vai: fix lease leak Message-ID: <20250706144805.164CC1009B7@lists.varnish-cache.org> commit 84d1fc92f436e9c94d127ec45e779b36f0f7c2a7 Author: Nils Goroll Date: Sun Jul 6 15:33:56 2025 +0200 vai: fix lease leak since the merge, the great diversity of our vtest machines exposed a vai lease leak via c00111.vtc: In storage_simple.c, we can not free the last struct storage in the storage list before we potentially advanced to the next. The previous code was failing to return it. We now use a null_iov pointer with a zero length viov to pass the lease to the iterator with the only purpose to return it. diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 23b6d2564..6ea4821f8 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -48,6 +48,8 @@ // marker pointer for sml_trimstore static void *trim_once = &trim_once; +// for delayed return of hdl->last resume pointer +static void *null_iov = &null_iov; /*-------------------------------------------------------------------*/ @@ -496,23 +498,16 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) assert(state < BOS_FINISHED); } Lck_Lock(&hdl->boc->mtx); - if (hdl->st == NULL && hdl->last != NULL) { - /* when the "last" st completed, we did not yet have a next, so - * resume from there. Because "last" might have been returned and - * deleted, we can not just use the pointer, but rather need to - * iterate the st list. - * if we can not find "last", it also has been returned and - * deleted, and the current write head (VTAILQ_LAST) is our next - * st, which can also be null if we are done. - */ - VTAILQ_FOREACH_REVERSE(next, &hdl->obj->list, storagehead, list) { - if (next == hdl->last) { - hdl->st = VTAILQ_PREV(next, storagehead, list); - break; - } - } + if (hdl->st == NULL && hdl->last != NULL) + hdl->st = VTAILQ_PREV(hdl->last, storagehead, list); + if (hdl->last != NULL && state < BOS_FINISHED) { + viov = VSCARAB_GET(scarab); + AN(viov); + viov->iov.iov_base = null_iov; + viov->iov.iov_len = 0; + viov->lease = st2lease(hdl->last); + hdl->last = NULL; } - hdl->last = NULL; if (hdl->st == NULL) { assert(hdl->returned == 0 || hdl->avail == hdl->returned); hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); @@ -533,15 +528,18 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) if (hdl->st_off + l == hdl->st->space) { next = VTAILQ_PREV(hdl->st, storagehead, list); AZ(hdl->last); - if (next == NULL) + if (next == NULL) { hdl->last = hdl->st; - else + viov->lease = VAI_LEASE_NORET; + } + else { CHECK_OBJ(next, STORAGE_MAGIC); + viov->lease = st2lease(hdl->st); + } #ifdef VAI_DBG VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", hdl->st_off, l, hdl->st, next, hdl->boc->stevedore_priv); #endif - viov->lease = st2lease(hdl->st); hdl->st_off = 0; hdl->st = next; } @@ -609,8 +607,6 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) if (*p == VAI_LEASE_NORET) continue; CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); - if (st == hdl->last) - continue; VSCARET_ADD(todo, *p); } VSCARET_INIT(scaret, scaret->capacity); @@ -804,7 +800,13 @@ sml_iterator(struct worker *wrk, struct objcore *oc, uu = u; if ((islast && nn < 0) || scaret->used == scaret->capacity - 1) uu |= OBJ_ITER_FLUSH; - r = func(priv, uu, vio->iov.iov_base, vio->iov.iov_len); + + // null iov with the only purpose to return the resume ptr lease + // exception needed because assert(len > 0) in VDP_bytes() + if (vio->iov.iov_base == null_iov) + r = 0; + else + r = func(priv, uu, vio->iov.iov_base, vio->iov.iov_len); if (r != 0) break; From dridi.boukelmoune at gmail.com Mon Jul 7 08:08:06 2025 From: dridi.boukelmoune at gmail.com (Dridi Boukelmoune) Date: Mon, 7 Jul 2025 08:08:06 +0000 (UTC) Subject: [master] ae83488d9 obj: Extract unlock step from boc notification Message-ID: <20250707080806.46AE399AE@lists.varnish-cache.org> commit ae83488d909cab23f201b706a530d79a2fbc4fd9 Author: Dridi Boukelmoune Date: Mon Jul 7 10:03:44 2025 +0200 obj: Extract unlock step from boc notification The current usage of obj_boc_notify_Unlock() was always following a call to Lck_Lock(), and never in a context where the BOC mutex was already held. Conflating the broadcast and unlock operations prevents conditional notifications from the #4073 candidate. diff --git a/bin/varnishd/cache/cache_obj.c b/bin/varnishd/cache/cache_obj.c index dcbb991b6..7e1625582 100644 --- a/bin/varnishd/cache/cache_obj.c +++ b/bin/varnishd/cache/cache_obj.c @@ -356,10 +356,9 @@ obj_extend_condwait(const struct objcore *oc) } // notify of an extension of the boc or state change -//lint -sem(obj_boc_notify_Unlock, thread_unlock) static void -obj_boc_notify_Unlock(struct boc *boc) +obj_boc_notify(struct boc *boc) { struct vai_qe *qe, *next; @@ -375,7 +374,6 @@ obj_boc_notify_Unlock(struct boc *boc) qe->cb(qe->hdl, qe->priv); qe = next; } - Lck_Unlock(&boc->mtx); } void @@ -393,7 +391,8 @@ ObjExtend(struct worker *wrk, struct objcore *oc, ssize_t l, int final) obj_extend_condwait(oc); om->objextend(wrk, oc, l); oc->boc->fetched_so_far += l; - obj_boc_notify_Unlock(oc->boc); + obj_boc_notify(oc->boc); + Lck_Unlock(&oc->boc->mtx); } assert(oc->boc->state < BOS_FINISHED); @@ -510,7 +509,8 @@ ObjSetState(struct worker *wrk, const struct objcore *oc, Lck_Lock(&oc->boc->mtx); oc->boc->state = next; - obj_boc_notify_Unlock(oc->boc); + obj_boc_notify(oc->boc); + Lck_Unlock(&oc->boc->mtx); } /*==================================================================== From nils.goroll at uplex.de Mon Jul 7 13:17:06 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 7 Jul 2025 13:17:06 +0000 (UTC) Subject: [master] 01b73e018 vai: fix glitch from 84d1fc92f436e9c94d127ec45e779b36f0f7c2a7 Message-ID: <20250707131706.12F6D10598B@lists.varnish-cache.org> commit 01b73e0182de168514becb15dede383311810a0e Author: Nils Goroll Date: Mon Jul 7 15:06:59 2025 +0200 vai: fix glitch from 84d1fc92f436e9c94d127ec45e779b36f0f7c2a7 If even if we do not hand hdl->last to the caller to return because we are in a finished or error bos state, we are still done with it and need to clear it for re-use. Fixes #4356 diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index 6ea4821f8..f4a5ec67f 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -506,8 +506,9 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) viov->iov.iov_base = null_iov; viov->iov.iov_len = 0; viov->lease = st2lease(hdl->last); - hdl->last = NULL; } + if (hdl->last != NULL) + hdl->last = NULL; if (hdl->st == NULL) { assert(hdl->returned == 0 || hdl->avail == hdl->returned); hdl->st = VTAILQ_LAST(&hdl->obj->list, storagehead); From phk at FreeBSD.org Mon Jul 7 14:30:06 2025 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Mon, 7 Jul 2025 14:30:06 +0000 (UTC) Subject: [master] 7a59b745a Make flint.sh find vudp.h Message-ID: <20250707143006.366F6108AE5@lists.varnish-cache.org> commit 7a59b745a517c17eda77ef65442dc6bd921d7a0a Author: Poul-Henning Kamp Date: Mon Jul 7 14:28:38 2025 +0000 Make flint.sh find vudp.h diff --git a/bin/varnishtest/flint.sh b/bin/varnishtest/flint.sh index 7089a4ff1..a961c60dd 100644 --- a/bin/varnishtest/flint.sh +++ b/bin/varnishtest/flint.sh @@ -9,5 +9,6 @@ FLOPS=' -DVTEST_WITH_VTC_VARNISH -DTOP_BUILDDIR="foo" -I../../lib/libvgz - *.c vtest2/src/*.c -' ../../tools/flint_skel.sh $* + -Ivtest2/lib + vtest2/src/*.c +' ../../tools/flint_skel.sh From phk at FreeBSD.org Tue Jul 8 06:36:05 2025 From: phk at FreeBSD.org (Poul-Henning Kamp) Date: Tue, 8 Jul 2025 06:36:05 +0000 (UTC) Subject: [master] 9d5fad66d Magic gcov rule for vudp.c Message-ID: <20250708063605.66AD2102C30@lists.varnish-cache.org> commit 9d5fad66d6fda5b7aa7204fe5f987d364d9e45a3 Author: Poul-Henning Kamp Date: Tue Jul 8 06:35:09 2025 +0000 Magic gcov rule for vudp.c diff --git a/tools/gcov_digest.py b/tools/gcov_digest.py index 0e59043ab..529b103db 100644 --- a/tools/gcov_digest.py +++ b/tools/gcov_digest.py @@ -162,6 +162,10 @@ def run_gcov(prog, subdir): subdir = root.split("/")[-1] cmd = ["cd " + root + "/.. && " + "exec " + prog + " " + subdir + "/" + fn] rpath = "/../" + elif "vudp" in fn: + cmd = ["cd " + root + "/../.. && " + "exec " + prog + " vtest2/lib/" + fn] + rpath = "/../../" + #print("VT2.LIB") elif "vtest2" in root: cmd = ["cd " + root + "/../.. && " + "exec " + prog + " vtest2/src/" + fn] rpath = "/../../" From nils.goroll at uplex.de Tue Jul 8 06:54:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Tue, 8 Jul 2025 06:54:05 +0000 (UTC) Subject: [master] 7da5a4b33 vai: Avoid potential out-of-bounds access in VSCARAB_LAST Message-ID: <20250708065405.689D11036CF@lists.varnish-cache.org> commit 7da5a4b331a6f1a73f2b5031ad0ea6418e5abcba Author: Nils Goroll Date: Tue Jul 8 08:52:59 2025 +0200 vai: Avoid potential out-of-bounds access in VSCARAB_LAST Fixes #4357 diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 8885721d6..707447cd0 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -865,7 +865,8 @@ struct vscarab { //lint -emacro(64, VSCARAB_ADD_IOV_NORET) weird flexelint bug? #define VSCARAB_ADD_IOV_NORET(scarab, vec) \ VSCARAB_ADD(scarab, ((struct viov){.lease = VAI_LEASE_NORET, .iov = (vec)})) -#define VSCARAB_LAST(scarab) (&(scarab)->s[(scarab)->used - 1]) +#define VSCARAB_LAST(scarab) ((scarab)->used > 0 ? \ + &(scarab)->s[(scarab)->used - 1] : NULL) #define VSCARAB_CHECK(scarab) do { \ CHECK_OBJ(scarab, VSCARAB_MAGIC); \ From nils.goroll at uplex.de Tue Jul 8 07:14:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Tue, 8 Jul 2025 07:14:04 +0000 (UTC) Subject: [master] bf75bb690 flexelint: exclude teken.c from vtest2 again Message-ID: <20250708071404.E9116104317@lists.varnish-cache.org> commit bf75bb690ec24e56d5546abb0de6fa0625d8df63 Author: Nils Goroll Date: Tue Jul 8 09:12:30 2025 +0200 flexelint: exclude teken.c from vtest2 again Since 362c24c3339fb778dd29829337d756729fa365bc we did not flexelint the teken code, and accidentally brought it back with 48e21c3c5f5e73ae02912f83d96198bc140dcecf because in vtest{,2} it no longer lives in a subdirectory. diff --git a/bin/varnishtest/flint.sh b/bin/varnishtest/flint.sh index a961c60dd..f1a34f748 100644 --- a/bin/varnishtest/flint.sh +++ b/bin/varnishtest/flint.sh @@ -4,11 +4,11 @@ # SPDX-License-Identifier: BSD-2-Clause # See LICENSE file for full text of license -FLOPS=' +FLOPS=" -DVTEST_WITH_VTC_LOGEXPECT -DVTEST_WITH_VTC_VARNISH -DTOP_BUILDDIR="foo" -I../../lib/libvgz -Ivtest2/lib - vtest2/src/*.c -' ../../tools/flint_skel.sh + $(ls vtest2/src/*.c| grep -v /teken.) +" ../../tools/flint_skel.sh From nils.goroll at uplex.de Tue Jul 8 07:38:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Tue, 8 Jul 2025 07:38:05 +0000 (UTC) Subject: [master] 3f23fb434 Revert "Flexelint: Adjust to vtest2" Message-ID: <20250708073805.53AF61050B6@lists.varnish-cache.org> commit 3f23fb4342d6e6a0f381cf298214a93ae7fba473 Author: Nils Goroll Date: Tue Jul 8 09:34:11 2025 +0200 Revert "Flexelint: Adjust to vtest2" This reverts commit eab881fc738631ae69e169d34435204931fceb4f. Adding the path apparently was wrong, I now suspect that the actual cause was the one addressed by bf75bb690ec24e56d5546abb0de6fa0625d8df63 diff --git a/bin/varnishtest/flint.lnt b/bin/varnishtest/flint.lnt index dfcf259b8..d8f8159f2 100644 --- a/bin/varnishtest/flint.lnt +++ b/bin/varnishtest/flint.lnt @@ -3,7 +3,7 @@ // See LICENSE file for full text of license -+libh(vtest2/src/teken.h) ++libh(teken.h) // Tell FlexeLint when these don't return -function(exit, vtc_fatal) From nils.goroll at uplex.de Wed Jul 9 07:34:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 07:34:05 +0000 (UTC) Subject: [master] d9d38175a vmod_vtc: Insignificant polish Message-ID: <20250709073405.1A94D110637@lists.varnish-cache.org> commit d9d38175a7c3b9e8ab3568e37d4751d37df0814f Author: Nils Goroll Date: Wed Jul 9 09:29:01 2025 +0200 vmod_vtc: Insignificant polish diff --git a/vmod/vmod_vtc.c b/vmod/vmod_vtc.c index 191282b2b..f43995813 100644 --- a/vmod/vmod_vtc.c +++ b/vmod/vmod_vtc.c @@ -477,9 +477,10 @@ vsl_line(VRT_CTX, char *str) return; str = strtok_r(NULL, "\r\n", &save); - s = TOSTRAND(str); if (str == NULL) s = vrt_null_strands; + else + s = TOSTRAND(str); vmod_vsl(ctx, id, tag, side, s); } From nils.goroll at uplex.de Wed Jul 9 13:46:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 13:46:05 +0000 (UTC) Subject: [master] d97c4e339 vcc_acl: Turn on folding by default Message-ID: <20250709134605.9A76711C83A@lists.varnish-cache.org> commit d97c4e3392ebe57dbe11c7f0c4e1cd621270dfe6 Author: Nils Goroll Date: Wed Jul 2 10:21:16 2025 +0200 vcc_acl: Turn on folding by default diff --git a/bin/varnishtest/tests/c00005.vtc b/bin/varnishtest/tests/c00005.vtc index 8854329a4..0a6e90517 100644 --- a/bin/varnishtest/tests/c00005.vtc +++ b/bin/varnishtest/tests/c00005.vtc @@ -72,7 +72,7 @@ varnish v1 -vcl { backend dummy None; - acl acl1 +log -pedantic { + acl acl1 +log -fold -pedantic { # bad notation (confusing) "1.2.3.4"/24; "1.2.3.66"/26; @@ -206,7 +206,8 @@ varnish v1 -vcl { backend dummy None; - acl acl1 +log +pedantic +fold { + // +fold and +pedantic are default + acl acl1 +log { # bad notation (confusing) "1.2.3.0"/24; "1.2.3.64"/26; diff --git a/doc/changes.rst b/doc/changes.rst index ca0c5ff58..94b9de0e0 100644 --- a/doc/changes.rst +++ b/doc/changes.rst @@ -41,6 +41,11 @@ Varnish Cache NEXT (8.0, 2025-09-15) .. PLEASE keep this roughly in commit order as shown by git-log / tig (new to old) +* The ACL option ``+fold`` is now default. This means that ACL entries will + automatically be merged for adjacent networks and subnets will be removed in + the presence of supernets. This affects logging. The old default behavior can + be restored by adding the ``-fold`` option to ACLs. + * The VMOD functions ``std.real2integer()``, ``std.real2time()``, ``std.time2integer()`` and ``std.time2real()`` have been removed. They had been marked deprecated since Varnish Cache release 6.2.0 (2019-03-15). diff --git a/doc/sphinx/reference/vcl.rst b/doc/sphinx/reference/vcl.rst index 4c069eb8f..3df2d4d0d 100644 --- a/doc/sphinx/reference/vcl.rst +++ b/doc/sphinx/reference/vcl.rst @@ -344,13 +344,13 @@ individually: However, if the name resolves to both IPv4 and IPv6 you will still get an error. -* `+fold` - Fold ACL supernets and adjacent networks. +* `-fold` - Do not fold ACL supernets and adjacent networks. - With this parameter set to on, ACLs are optimized in that subnets - contained in other entries are skipped (e.g. if 1.2.3.0/24 is part - of the ACL, an entry for 1.2.3.128/25 will not be added) and - adjacent entries get folded (e.g. if both 1.2.3.0/25 and - 1.2.3.128/25 are added, they will be folded to 1.2.3.0/24). + With this parameter set to on (default), ACLs are optimized in that subnets + contained in other entries are skipped (e.g. if 1.2.3.0/24 is part of the + ACL, an entry for 1.2.3.128/25 will not be added) and adjacent entries get + folded (e.g. if both 1.2.3.0/25 and 1.2.3.128/25 are added, they will be + folded to 1.2.3.0/24). Skip and fold operations on VCL entries are output as warnings during VCL compilation as entries from the VCL are processed in diff --git a/lib/libvcc/vcc_acl.c b/lib/libvcc/vcc_acl.c index b360fa682..188e63c6d 100644 --- a/lib/libvcc/vcc_acl.c +++ b/lib/libvcc/vcc_acl.c @@ -813,6 +813,7 @@ vcc_ParseAcl(struct vcc *tl) INIT_OBJ(acl, VCC_ACL_MAGIC); tl->acl = acl; acl->flag_pedantic = 1; + acl->flag_fold = 1; vcc_NextToken(tl); VRBT_INIT(&acl->acl_tree); From nils.goroll at uplex.de Wed Jul 9 13:53:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 13:53:05 +0000 (UTC) Subject: [master] 39ee90b5b vrt: Remove VRT_{Add,Remove}_{VDP,VFP} Message-ID: <20250709135305.4189C11CD65@lists.varnish-cache.org> commit 39ee90b5b8d0188a541ef627c694b4cc8bf3a05c Author: Nils Goroll Date: Wed Jul 9 15:48:12 2025 +0200 vrt: Remove VRT_{Add,Remove}_{VDP,VFP} Ref #3352 diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index a24d669fc..d9f7141f6 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -102,9 +102,6 @@ enum vfp_status VFP_Suck(struct vfp_ctx *, void *p, ssize_t *lp); enum vfp_status VFP_Error(struct vfp_ctx *, const char *fmt, ...) v_printflike_(2, 3); -void v_deprecated_ VRT_AddVFP(VRT_CTX, const struct vfp *); -void v_deprecated_ VRT_RemoveVFP(VRT_CTX, const struct vfp *); - /* Deliver processors ------------------------------------------------*/ enum vdp_action { @@ -324,9 +321,6 @@ int VDPIO_Init(struct vdp_ctx *vdc, struct objcore *oc, struct ws *ws, void VDPIO_Return(const struct vdp_ctx *vdc); void VDPIO_Fini(struct vdp_ctx *vdc); -void v_deprecated_ VRT_AddVDP(VRT_CTX, const struct vdp *); -void v_deprecated_ VRT_RemoveVDP(VRT_CTX, const struct vdp *); - /* Registry functions -------------------------------------------------*/ const char *VRT_AddFilter(VRT_CTX, const struct vfp *, const struct vdp *); void VRT_RemoveFilter(VRT_CTX, const struct vfp *, const struct vdp *); diff --git a/bin/varnishd/cache/cache_vrt_filter.c b/bin/varnishd/cache/cache_vrt_filter.c index 2ebe3183d..025a1f57d 100644 --- a/bin/varnishd/cache/cache_vrt_filter.c +++ b/bin/varnishd/cache/cache_vrt_filter.c @@ -135,18 +135,6 @@ VRT_AddFilter(VRT_CTX, const struct vfp *vfp, const struct vdp *vdp) return (vrt_addfilter(ctx, vfp, vdp)); } -void -VRT_AddVFP(VRT_CTX, const struct vfp *filter) -{ - AZ(VRT_AddFilter(ctx, filter, NULL)); -} - -void -VRT_AddVDP(VRT_CTX, const struct vdp *filter) -{ - AZ(VRT_AddFilter(ctx, NULL, filter)); -} - void VRT_RemoveFilter(VRT_CTX, const struct vfp *vfp, const struct vdp *vdp) { @@ -174,20 +162,6 @@ VRT_RemoveFilter(VRT_CTX, const struct vfp *vfp, const struct vdp *vdp) FREE_OBJ(vp); } -void -VRT_RemoveVFP(VRT_CTX, const struct vfp *filter) -{ - - VRT_RemoveFilter(ctx, filter, NULL); -} - -void -VRT_RemoveVDP(VRT_CTX, const struct vdp *filter) -{ - - VRT_RemoveFilter(ctx, NULL, filter); -} - static const struct vfilter vfilter_error[1] = {{0}}; static const struct vfilter * diff --git a/include/vrt.h b/include/vrt.h index 0c17c956f..dabc33c47 100644 --- a/include/vrt.h +++ b/include/vrt.h @@ -82,6 +82,10 @@ * VRT_r_param_timeout_idle() added * VRT_r_param_transit_buffer() added * VRT_r_param_uncacheable_ttl() added + * VRT_AddVFP() removed + * VRT_AddVDP() removed + * VRT_RemoveVFP() removed + * VRT_RemoveVDP() removed * 21.0 (2025-03-17) * VRT_u_req_grace() added * VRT_u_req_ttl() added From nils.goroll at uplex.de Wed Jul 9 13:59:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 13:59:04 +0000 (UTC) Subject: [master] 3321dc20c vmod_cookie: remove cookie.format_rfc1123 Message-ID: <20250709135904.DB31E11D204@lists.varnish-cache.org> commit 3321dc20cc10664e0fdbe681105a3bf28bc3120d Author: Nils Goroll Date: Wed Jul 9 15:58:15 2025 +0200 vmod_cookie: remove cookie.format_rfc1123 diff --git a/doc/changes.rst b/doc/changes.rst index 94b9de0e0..06e8d9036 100644 --- a/doc/changes.rst +++ b/doc/changes.rst @@ -41,6 +41,9 @@ Varnish Cache NEXT (8.0, 2025-09-15) .. PLEASE keep this roughly in commit order as shown by git-log / tig (new to old) +* The VMOD function ``cookie.format_rfc1123()`` is now removed. It had been + renamed to ``cookie.format_date()``. + * The ACL option ``+fold`` is now default. This means that ACL entries will automatically be merged for adjacent networks and subnets will be removed in the presence of supernets. This affects logging. The old default behavior can diff --git a/vmod/tests/cookie_b00010.vtc b/vmod/tests/cookie_b00010.vtc index fc8cbdd35..f67469c4e 100644 --- a/vmod/tests/cookie_b00010.vtc +++ b/vmod/tests/cookie_b00010.vtc @@ -5,7 +5,7 @@ varnish v1 -vcl { backend be none; sub vcl_recv { return (synth(200)); } sub vcl_synth { - set resp.http.x-date = cookie.format_rfc1123(now, 1d); + set resp.http.x-date = cookie.format_date(now, 1d); } } -start diff --git a/vmod/vmod_cookie.vcc b/vmod/vmod_cookie.vcc index 69f167d02..cbc133e47 100644 --- a/vmod/vmod_cookie.vcc +++ b/vmod/vmod_cookie.vcc @@ -250,8 +250,3 @@ Example:: cookie.set("cookie1", "value1"); std.log("cookie1 value is: " + cookie.get("cookie1")); } - -DEPRECATED -========== - -$Alias format_rfc1123 format_date From walid.boudebouda at gmail.com Wed Jul 9 14:35:05 2025 From: walid.boudebouda at gmail.com (Walid Boudebouda) Date: Wed, 9 Jul 2025 14:35:05 +0000 (UTC) Subject: [master] 52b0d2594 param: Make ban_any_varian default to 0 Message-ID: <20250709143505.9632911E663@lists.varnish-cache.org> commit 52b0d2594ccab339be6fc6e82c9e9af592a09aef Author: Walid Boudebouda Date: Wed Jul 9 16:33:08 2025 +0200 param: Make ban_any_varian default to 0 diff --git a/bin/varnishtest/tests/c00133.vtc b/bin/varnishtest/tests/c00133.vtc index a52174867..9e94827b6 100644 --- a/bin/varnishtest/tests/c00133.vtc +++ b/bin/varnishtest/tests/c00133.vtc @@ -11,7 +11,7 @@ server s0 { txresp -hdr "vary: version" -body "New variant B" } -start -varnish v1 -arg "-p ban_any_variant=0" -vcl+backend {} -start +varnish v1 -vcl+backend {} -start client c1 { txreq -hdr "version: a" diff --git a/bin/varnishtest/tests/c00134.vtc b/bin/varnishtest/tests/c00134.vtc index 67de581bc..f88c4ba88 100644 --- a/bin/varnishtest/tests/c00134.vtc +++ b/bin/varnishtest/tests/c00134.vtc @@ -28,7 +28,7 @@ server s1 { } -start -varnish v1 -arg "-p ban_any_variant=0" -vcl+backend { +varnish v1 -vcl+backend { sub vcl_backend_response { set beresp.http.url = bereq.url; } diff --git a/include/tbl/params.h b/include/tbl/params.h index c613d86c8..df7415536 100644 --- a/include/tbl/params.h +++ b/include/tbl/params.h @@ -762,7 +762,7 @@ PARAM_SIMPLE( /* type */ uint, /* min */ "0", /* max */ NULL, - /* def */ "10000", + /* def */ "0", /* units */ "checks", /* descr */ "Maximum number of possibly non matching variants that we evaluate " From nils.goroll at uplex.de Wed Jul 9 14:39:38 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 16:39:38 +0200 Subject: [master] 52b0d2594 param: Make ban_any_varian default to 0 In-Reply-To: <20250709143505.9632911E663@lists.varnish-cache.org> References: <20250709143505.9632911E663@lists.varnish-cache.org> Message-ID: thx. Can you please write the changelog entry also with the big red flag? -------------- next part -------------- A non-text attachment was scrubbed... Name: OpenPGP_0x1DCD8F57A3868BD7.asc Type: application/pgp-keys Size: 3943 bytes Desc: OpenPGP public key URL: -------------- next part -------------- A non-text attachment was scrubbed... Name: OpenPGP_signature.asc Type: application/pgp-signature Size: 488 bytes Desc: OpenPGP digital signature URL: From walid.boudebouda at varnish-software.com Wed Jul 9 14:43:18 2025 From: walid.boudebouda at varnish-software.com (Walid Boudebouda) Date: Wed, 9 Jul 2025 16:43:18 +0200 Subject: [master] d9d38175a vmod_vtc: Insignificant polish In-Reply-To: <20250709073405.1A94D110637@lists.varnish-cache.org> References: <20250709073405.1A94D110637@lists.varnish-cache.org> Message-ID: This has triggered a new CI failure: vmod_vtc.c:485:9: error: dangling pointer 's' to an unnamed temporary may > be used [-Werror=dangling-pointer=] > 485 | vmod_vsl(ctx, id, tag, side, s); On Wed, Jul 9, 2025 at 9:34?AM Nils Goroll wrote: > > commit d9d38175a7c3b9e8ab3568e37d4751d37df0814f > Author: Nils Goroll > Date: Wed Jul 9 09:29:01 2025 +0200 > > vmod_vtc: Insignificant polish > > diff --git a/vmod/vmod_vtc.c b/vmod/vmod_vtc.c > index 191282b2b..f43995813 100644 > --- a/vmod/vmod_vtc.c > +++ b/vmod/vmod_vtc.c > @@ -477,9 +477,10 @@ vsl_line(VRT_CTX, char *str) > return; > > str = strtok_r(NULL, "\r\n", &save); > - s = TOSTRAND(str); > if (str == NULL) > s = vrt_null_strands; > + else > + s = TOSTRAND(str); > > vmod_vsl(ctx, id, tag, side, s); > } > _______________________________________________ > varnish-commit mailing list > varnish-commit at varnish-cache.org > https://www.varnish-cache.org/lists/mailman/listinfo/varnish-commit > -------------- next part -------------- An HTML attachment was scrubbed... URL: From walid.boudebouda at gmail.com Wed Jul 9 15:47:06 2025 From: walid.boudebouda at gmail.com (Walid Boudebouda) Date: Wed, 9 Jul 2025 15:47:06 +0000 (UTC) Subject: [master] 2c820c6c0 Changes: Mention ban_any_variant new default value Message-ID: <20250709154706.1E0101212E5@lists.varnish-cache.org> commit 2c820c6c0a2679f657adb2b9c6dcc531f113be81 Author: Walid Boudebouda Date: Wed Jul 9 17:45:01 2025 +0200 Changes: Mention ban_any_variant new default value Refs #3352 diff --git a/doc/changes.rst b/doc/changes.rst index 06e8d9036..a882f69ad 100644 --- a/doc/changes.rst +++ b/doc/changes.rst @@ -41,6 +41,15 @@ Varnish Cache NEXT (8.0, 2025-09-15) .. PLEASE keep this roughly in commit order as shown by git-log / tig (new to old) +* The default value for ``ban_any_variant`` is now ``0``. This means that + during a lookup, only the matching variants of an object will be evaluated + against the ban list. + + As a side effect, variants that are rarely requested may never get a chance + to be tested against ``req`` based bans, which can lead to an accumulation + of bans over time. In such cases, it is recommended to set + ``ban_any_variant`` to a higher value. + * The VMOD function ``cookie.format_rfc1123()`` is now removed. It had been renamed to ``cookie.format_date()``. From walid.boudebouda at varnish-software.com Wed Jul 9 15:48:56 2025 From: walid.boudebouda at varnish-software.com (Walid Boudebouda) Date: Wed, 9 Jul 2025 17:48:56 +0200 Subject: [master] 52b0d2594 param: Make ban_any_varian default to 0 In-Reply-To: References: <20250709143505.9632911E663@lists.varnish-cache.org> Message-ID: https://github.com/varnishcache/varnish-cache/commit/2c820c6c0a2679f657adb2b9c6dcc531f113be81 Feel free to adjust if I missed anything :-) -------------- next part -------------- An HTML attachment was scrubbed... URL: From nils.goroll at uplex.de Wed Jul 9 16:58:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 16:58:05 +0000 (UTC) Subject: [master] 233f5ab00 Revert "vmod_vtc: Insignificant polish" Message-ID: <20250709165805.7C3E34B2F@lists.varnish-cache.org> commit 233f5ab0094bdeae58a1bd941494aad598811d42 Author: Nils Goroll Date: Wed Jul 9 18:55:29 2025 +0200 Revert "vmod_vtc: Insignificant polish" d-oh, I undid a fix which I applied and explained myself before in 1157dfdc61211b95375a3ed10f076f2d7875439c This reverts commit d9d38175a7c3b9e8ab3568e37d4751d37df0814f. diff --git a/vmod/vmod_vtc.c b/vmod/vmod_vtc.c index f43995813..ace8728c3 100644 --- a/vmod/vmod_vtc.c +++ b/vmod/vmod_vtc.c @@ -477,10 +477,10 @@ vsl_line(VRT_CTX, char *str) return; str = strtok_r(NULL, "\r\n", &save); + // needs to be assigned here because of the compound literal lifetime + s = TOSTRAND(str); if (str == NULL) s = vrt_null_strands; - else - s = TOSTRAND(str); vmod_vsl(ctx, id, tag, side, s); } From nils.goroll at uplex.de Wed Jul 9 19:42:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 19:42:05 +0000 (UTC) Subject: [master] d48043e72 vai: add missing parenthesis in VSCARAB_CHECK Message-ID: <20250709194205.B8E286EE53@lists.varnish-cache.org> commit d48043e72e10be54d8c9cd9ab4771c8ba5367dba Author: Nils Goroll Date: Wed Jul 9 21:41:40 2025 +0200 vai: add missing parenthesis in VSCARAB_CHECK diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 707447cd0..439390065 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -870,7 +870,7 @@ struct vscarab { #define VSCARAB_CHECK(scarab) do { \ CHECK_OBJ(scarab, VSCARAB_MAGIC); \ - assert(scarab->used <= scarab->capacity); \ + assert((scarab)->used <= (scarab)->capacity); \ } while(0) #define VSCARAB_CHECK_NOTNULL(scarab) do { \ From nils.goroll at uplex.de Wed Jul 9 20:42:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Wed, 9 Jul 2025 20:42:05 +0000 (UTC) Subject: [master] c97c0d663 vai: Improve Flexarray foreachs (VFLA_FOREACH): NULL var after loop Message-ID: <20250709204205.48EDA1020E1@lists.varnish-cache.org> commit c97c0d66371fa35c99b699e6840e428ccf5eb6b6 Author: Nils Goroll Date: Wed Jul 9 22:19:03 2025 +0200 vai: Improve Flexarray foreachs (VFLA_FOREACH): NULL var after loop When the loop completes, the loop variable is now set to NULL. This allows to easily differentiate between a completion and a break, avoids an out-of-bounds value after the loop and matches the vqueue macros. diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index 439390065..c94615395 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -843,10 +843,15 @@ struct vscarab { VFLA_INIT(type, name, mag, fam, cap); \ } while(0) #define VFLA_FOREACH(var, cptr, fam) \ - for (var = &(cptr)->fam[0]; var < &(cptr)->fam[(cptr)->used]; var++) + for (var = &(cptr)->fam[0]; \ + (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \ + var++) // continue iterating after a break of a _FOREACH #define VFLA_FOREACH_RESUME(var, cptr, fam) \ - for (; var != NULL && var < &(cptr)->fam[(cptr)->used]; var++) + for (; \ + var != NULL && \ + (var = (var < &(cptr)->fam[(cptr)->used] ? var : NULL)) != NULL; \ + var++) #define VFLA_GET(cptr, fam) ((cptr)->used < (cptr)->capacity ? &(cptr)->fam[(cptr)->used++] : NULL) // asserts sufficient capacity #define VFLA_ADD(cptr, fam, val) do { \ diff --git a/bin/varnishd/cache/cache_filter.h b/bin/varnishd/cache/cache_filter.h index d9f7141f6..60c182330 100644 --- a/bin/varnishd/cache/cache_filter.h +++ b/bin/varnishd/cache/cache_filter.h @@ -285,6 +285,7 @@ void vdpio_return_vscarab(const struct vdp_ctx *vdc, struct vscarab *scarab) VSCARAB_CHECK_NOTNULL(scarab); VSCARAB_FOREACH(v, scarab) vdpio_return_lease(vdc, v->lease); + AZ(v); VSCARAB_INIT(scarab, scarab->capacity); } From nils.goroll at uplex.de Thu Jul 10 06:48:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Thu, 10 Jul 2025 06:48:05 +0000 (UTC) Subject: [master] 8cbf914a1 vai: generalize pointer / lease conversion Message-ID: <20250710064805.6F188117FA2@lists.varnish-cache.org> commit 8cbf914a106e19bbec12f0fcd5c78348b2f52828 Author: Nils Goroll Date: Thu Jul 10 08:46:09 2025 +0200 vai: generalize pointer / lease conversion Likely every storage engine needs it diff --git a/bin/varnishd/storage/storage.h b/bin/varnishd/storage/storage.h index c4ff0af5d..b030f2c54 100644 --- a/bin/varnishd/storage/storage.h +++ b/bin/varnishd/storage/storage.h @@ -75,6 +75,29 @@ typedef void sml_free_f(struct storage *); typedef ct stv_var_##nm(const struct stevedore *); #include "tbl/vrt_stv_var.h" +/* VAI helpers -------------------------------------------------------*/ + +static inline uint64_t +ptr2lease(const void *ptr) +{ + uint64_t r = (uintptr_t)ptr; + + if (sizeof(void *) < 8) //lint !e506 !e774 + r <<= 1; + + return (r); +} + +static inline void * +lease2ptr(uint64_t l) +{ + + if (sizeof(void *) < 8) //lint !e506 !e774 + l >>= 1; + + return ((void *)(uintptr_t)l); +} + /*--------------------------------------------------------------------*/ struct stevedore { diff --git a/bin/varnishd/storage/storage_simple.c b/bin/varnishd/storage/storage_simple.c index f4a5ec67f..4ddab3590 100644 --- a/bin/varnishd/storage/storage_simple.c +++ b/bin/varnishd/storage/storage_simple.c @@ -332,33 +332,12 @@ struct sml_hdl { struct storage *last; // to resume, held back by _return() }; -static inline uint64_t -st2lease(const struct storage *st) -{ - uint64_t r = (uintptr_t)st; - - if (sizeof(void *) < 8) //lint !e506 !e774 - r <<= 1; - - return (r); -} - -static inline struct storage * -lease2st(uint64_t l) -{ - - if (sizeof(void *) < 8) //lint !e506 !e774 - l >>= 1; - - return ((void *)(uintptr_t)l); -} - static inline void sml_ai_viov_fill(struct viov *viov, struct storage *st) { viov->iov.iov_base = TRUST_ME(st->ptr); viov->iov.iov_len = st->len; - viov->lease = st2lease(st); + viov->lease = ptr2lease(st); VAI_ASSERT_LEASE(viov->lease); } @@ -505,7 +484,7 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) AN(viov); viov->iov.iov_base = null_iov; viov->iov.iov_len = 0; - viov->lease = st2lease(hdl->last); + viov->lease = ptr2lease(hdl->last); } if (hdl->last != NULL) hdl->last = NULL; @@ -535,7 +514,7 @@ sml_ai_lease_boc(struct worker *wrk, vai_hdl vhdl, struct vscarab *scarab) } else { CHECK_OBJ(next, STORAGE_MAGIC); - viov->lease = st2lease(hdl->st); + viov->lease = ptr2lease(hdl->st); } #ifdef VAI_DBG VSLb(wrk->vsl, SLT_Debug, "off %zu + l %zu == space st %p next st %p stvprv %p", @@ -577,7 +556,7 @@ sml_ai_return_buffers(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) VSCARET_FOREACH(p, scaret) { if (*p == VAI_LEASE_NORET) continue; - CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); if ((st->flags & STORAGE_F_BUFFER) == 0) continue; sml_stv_free(hdl->stv, st); @@ -607,14 +586,14 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) VSCARET_FOREACH(p, scaret) { if (*p == VAI_LEASE_NORET) continue; - CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); VSCARET_ADD(todo, *p); } VSCARET_INIT(scaret, scaret->capacity); Lck_Lock(&hdl->boc->mtx); VSCARET_FOREACH(p, todo) { - CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); if ((st->flags & STORAGE_F_BUFFER) != 0) continue; VTAILQ_REMOVE(&hdl->obj->list, st, list); @@ -624,7 +603,7 @@ sml_ai_return(struct worker *wrk, vai_hdl vhdl, struct vscaret *scaret) Lck_Unlock(&hdl->boc->mtx); VSCARET_FOREACH(p, todo) { - CAST_OBJ_NOTNULL(st, lease2st(*p), STORAGE_MAGIC); + CAST_OBJ_NOTNULL(st, lease2ptr(*p), STORAGE_MAGIC); #ifdef VAI_DBG VSLb(wrk->vsl, SLT_Debug, "ret %p", st); #endif From nils.goroll at uplex.de Sun Jul 13 14:43:06 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Sun, 13 Jul 2025 14:43:06 +0000 (UTC) Subject: [master] 56b1415d4 cache.h: defrag struct vsl_log Message-ID: <20250713144306.85B4F120323@lists.varnish-cache.org> commit 56b1415d4029a114d6d88824959acc17cf6d0c2d Author: Nils Goroll Date: Sun Jul 13 16:40:26 2025 +0200 cache.h: defrag struct vsl_log There was a hole in it since we made vxid_t 64bit (gdb) ptype /o struct vsl_log /* offset | size */ type = struct vsl_log { /* 0 | 8 */ uint32_t *wlb; /* 8 | 8 */ uint32_t *wlp; /* 16 | 8 */ uint32_t *wle; /* 24 | 4 */ unsigned int wlr; /* XXX 4-byte hole */ /* 32 | 8 */ vxid_t wid; /* total size (bytes): 40 */ } diff --git a/bin/varnishd/cache/cache.h b/bin/varnishd/cache/cache.h index c94615395..fd6cdfab8 100644 --- a/bin/varnishd/cache/cache.h +++ b/bin/varnishd/cache/cache.h @@ -193,8 +193,8 @@ struct acct_bereq { struct vsl_log { uint32_t *wlb, *wlp, *wle; - unsigned wlr; vxid_t wid; + unsigned wlr; }; /*--------------------------------------------------------------------*/ From nils.goroll at uplex.de Mon Jul 14 15:54:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 15:54:05 +0000 (UTC) Subject: [master] 486a32583 Update vtest2 Message-ID: <20250714155405.B280D102211@lists.varnish-cache.org> commit 486a325836e41a3468007ac62246c3e8d822fdb7 Author: Nils Goroll Date: Mon Jul 14 17:37:29 2025 +0200 Update vtest2 diff --git a/bin/varnishtest/vtest2 b/bin/varnishtest/vtest2 index dc46782ef..11ee91098 160000 --- a/bin/varnishtest/vtest2 +++ b/bin/varnishtest/vtest2 @@ -1 +1 @@ -Subproject commit dc46782efa2adb08e91968efbf154c73acdee67f +Subproject commit 11ee91098c7a1f0a1f8afab9a7356609bfd3cb2d From nils.goroll at uplex.de Mon Jul 14 15:54:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 15:54:05 +0000 (UTC) Subject: [master] 57a4456b6 vtest.sh: Did I still not do enough about the submodule? Message-ID: <20250714155405.C9A26102214@lists.varnish-cache.org> commit 57a4456b6b4ebb7d0ed1a57bd9db6b7c1d259c49 Author: Nils Goroll Date: Mon Jul 14 17:53:28 2025 +0200 vtest.sh: Did I still not do enough about the submodule? diff --git a/tools/vtest.sh b/tools/vtest.sh index 7526f40a1..bb89f7f9b 100755 --- a/tools/vtest.sh +++ b/tools/vtest.sh @@ -234,6 +234,7 @@ do git reset --hard > /dev/null 2>&1 || true git clean -df > /dev/null 2>&1 || true git pull --recurse-submodules=yes > /dev/null 2>&1 || true + git submodule update --init 2>&1 || true ) rev=`cd "${SRCDIR}" && git show -s --pretty=format:%H` if [ "x${rev}" != "x${orev}" ] ; then From nils.goroll at uplex.de Mon Jul 14 16:25:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 16:25:04 +0000 (UTC) Subject: [master] 4117949ba coverage: Try to make the tested code actually show up in gcov Message-ID: <20250714162505.03011103586@lists.varnish-cache.org> commit 4117949ba257437c2e1cedb5e7a2dbbc2daf88f9 Author: Nils Goroll Date: Mon Jul 14 18:23:57 2025 +0200 coverage: Try to make the tested code actually show up in gcov I suspect that gcov might not register properly when the process terminates with a panic? Ref #4366 diff --git a/bin/varnishtest/tests/m00059.vtc b/bin/varnishtest/tests/m00059.vtc index 19eb8ce35..07a28b86e 100644 --- a/bin/varnishtest/tests/m00059.vtc +++ b/bin/varnishtest/tests/m00059.vtc @@ -1,6 +1,6 @@ varnishtest "VMOD debug.chk*" -server s1 { +server s1 -repeat 2 { rxreq expect req.url == "/ok" txresp \ @@ -73,7 +73,13 @@ varnish v1 -vsl_catchup logexpect l1 -wait logexpect l2 -wait -client c1 { +varnish v1 -stop + +varnish v1 -start + +client c2 { + txreq -url "/ok" + rxresp txreq -url "/wrong" -hdr "panic: yes" rxresp } -run From nils.goroll at uplex.de Mon Jul 14 17:30:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 17:30:05 +0000 (UTC) Subject: [master] 2139bb6ac debug.chk* filters: Improve coverage Message-ID: <20250714173005.899EB10599C@lists.varnish-cache.org> commit 2139bb6ac3bf79d4f3c1ad4505fbf2708db66579 Author: Nils Goroll Date: Mon Jul 14 19:25:11 2025 +0200 debug.chk* filters: Improve coverage diff --git a/bin/varnishtest/tests/m00059.vtc b/bin/varnishtest/tests/m00059.vtc index 07a28b86e..a08b5ec45 100644 --- a/bin/varnishtest/tests/m00059.vtc +++ b/bin/varnishtest/tests/m00059.vtc @@ -19,7 +19,7 @@ server s1 -repeat 2 { -hdr "sha256: 9cbca99698fee7cefd93bc6db1c53226fdecae730197fd793a54e170a30af045" \ -hdr "crc32: 3177021206" \ -hdr "len: 0" \ - -body "" + -body " " } -start varnish v1 \ @@ -56,8 +56,15 @@ logexpect l2 -v v1 -g vxid -q "vxid == 1003" { fail add * End expect * 1003 Begin expect * = Debug "^sha256 checksum mismatch" - expect 0 = Debug "^got: 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + expect 0 = Debug "^got: 0x36a9e7f1c95b82ffb99743e0c5c4ce95d83c9a430aac59f84ef3cbfab6145068" expect 0 = Debug "^exp: 0x9cbca99698fee7cefd93bc6db1c53226fdecae730197fd793a54e170a30af045" + expect 0 = Debug "^crc32 checksum mismatch" + expect 0 = Debug "^got: e96ccf45" + expect 0 = Debug "^exp: bd5d7f16" + expect 0 = Debug "^length mismatch" + expect 0 = Debug "^got: 1" + expect 0 = Debug "^exp: 0" + fail clear } -start From nils.goroll at uplex.de Mon Jul 14 17:30:05 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 17:30:05 +0000 (UTC) Subject: [master] b10d610bb debug.slow filter: Coverage Message-ID: <20250714173005.A438710599F@lists.varnish-cache.org> commit b10d610bb331a543b43a4b8cfc3242d5ef1f68b8 Author: Nils Goroll Date: Mon Jul 14 19:28:58 2025 +0200 debug.slow filter: Coverage diff --git a/bin/varnishtest/tests/m00048.vtc b/bin/varnishtest/tests/m00048.vtc index bcd94925a..a643c83d6 100644 --- a/bin/varnishtest/tests/m00048.vtc +++ b/bin/varnishtest/tests/m00048.vtc @@ -30,7 +30,7 @@ varnish v1 -vcl+backend { import debug; sub vcl_backend_response { - set beresp.filters = "rot13 rot13a"; + set beresp.filters = "rot13 debug.slow rot13a"; } } From nils.goroll at uplex.de Mon Jul 14 17:46:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 17:46:04 +0000 (UTC) Subject: [master] 4ba9d42ce vmod_debug_transport_vai: Coverage Message-ID: <20250714174604.A4EFC1065C1@lists.varnish-cache.org> commit 4ba9d42cef28da0bfffe3c17a9595936db7ebebd Author: Nils Goroll Date: Mon Jul 14 19:37:48 2025 +0200 vmod_debug_transport_vai: Coverage Test chunked Reduce number of repeats because of vtc_log limit diff --git a/bin/varnishtest/tests/m00061.vtc b/bin/varnishtest/tests/m00061.vtc index 04f3489b4..3ef261539 100644 --- a/bin/varnishtest/tests/m00061.vtc +++ b/bin/varnishtest/tests/m00061.vtc @@ -35,28 +35,28 @@ varnish v1 -cliok "param.set debug +syncvsl" varnish v1 -cliok "param.set debug +req_state" varnish v1 -cliok "param.set debug +processors" -client c0 -repeat 16 -keepalive { +client c0 -repeat 8 -keepalive { txreq -hdr "Accept-Encoding: gzip" rxresp } -start -client c1 -repeat 16 -keepalive { +client c1 -repeat 8 -keepalive { txreq rxresp expect resp.bodylen == 13113 expect req.body ~ "^hello " } -start -#client c2 -repeat 16 -keepalive { -# txreq -url "/chunked" -# rxresp -# expect resp.http.Content-Length == -# expect resp.bodylen == 13107 -#} -start +client c2 -repeat 8 -keepalive { + txreq -url "/chunked" + rxresp + expect resp.http.Content-Length == + expect resp.bodylen == 13113 +} -start client c0 -wait client c1 -wait -#client c2 -wait +client c2 -wait client c0 { txreq -hdr "Accept-Encoding: gzip" -hdr "Last: Request" From nils.goroll at uplex.de Mon Jul 14 17:46:04 2025 From: nils.goroll at uplex.de (Nils Goroll) Date: Mon, 14 Jul 2025 17:46:04 +0000 (UTC) Subject: [master] 886c59260 vmod_debug_transport_vai: Coverage, excercise fallback to VDP Message-ID: <20250714174604.BD2041065C4@lists.varnish-cache.org> commit 886c592608848a29d6e25a8a94060e4f76409ed7 Author: Nils Goroll Date: Mon Jul 14 19:44:28 2025 +0200 vmod_debug_transport_vai: Coverage, excercise fallback to VDP diff --git a/bin/varnishtest/tests/m00061.vtc b/bin/varnishtest/tests/m00061.vtc index 3ef261539..ec8cf4a37 100644 --- a/bin/varnishtest/tests/m00061.vtc +++ b/bin/varnishtest/tests/m00061.vtc @@ -16,6 +16,9 @@ varnish v1 \ } sub vcl_deliver { + if (req.url == "/rot13") { + set resp.filters += " rot13"; + } if (req.url == "/chunked") { set resp.filters += " debug.chunked"; } @@ -64,3 +67,11 @@ client c0 { } -run logexpect l1 -wait + +# Fallback to DeliverObj because of VDP +client c3 -repeat 8 -keepalive { + txreq -url "/rot13" + rxresp + expect resp.http.Content-Length == 13107 + expect resp.bodylen == 13107 +} -run