[PATCH 08/10] Defer syncing signs inside the segments for later, and do it without locks in smp_thread().

Martin Blix Grydeland martin at varnish-software.com
Wed Oct 10 16:27:21 CEST 2012


This is to avoid doing long IO operations while holding the mutex.
---
 bin/varnishd/storage/storage_persistent.c      |   26 ++++++++++++--
 bin/varnishd/storage/storage_persistent.h      |    6 +++-
 bin/varnishd/storage/storage_persistent_silo.c |   44 +++++++++++-------------
 3 files changed, 50 insertions(+), 26 deletions(-)

diff --git a/bin/varnishd/storage/storage_persistent.c b/bin/varnishd/storage/storage_persistent.c
index 021c5e2..00bd940 100644
--- a/bin/varnishd/storage/storage_persistent.c
+++ b/bin/varnishd/storage/storage_persistent.c
@@ -375,6 +375,26 @@ smp_save_segs(struct smp_sc *sc)
 		FREE_OBJ(sg);
 	}
 
+	/* Sync the signs of any segments marked as needing it */
+	VTAILQ_FOREACH(sg, &sc->segments, list) {
+		if (sg->flags & SMP_SEG_SYNCSIGNS) {
+			AZ(sg->flags & SMP_SEG_NEW);
+			AN(sg->nalloc); /* Empty segments shouldn't be here */
+
+			/* Make sure they have all been set up */
+			AN(sg->ctx_head.ss);
+			AN(sg->ctx_obj.ss);
+			AN(sg->ctx_tail.ss);
+
+			sg->flags &= ~SMP_SEG_SYNCSIGNS;
+			Lck_Unlock(&sc->mtx);
+			smp_sync_sign(&sg->ctx_head);
+			smp_sync_sign(&sg->ctx_obj);
+			smp_sync_sign(&sg->ctx_tail);
+			Lck_Lock(&sc->mtx);
+		}
+	}
+
 	Lck_Unlock(&sc->mtx);
 	AZ(smp_chk_signspace(&sc->seg1)); /* Page in */
 	smp_reset_signspace(&sc->seg1);
@@ -589,7 +609,8 @@ smp_allocx(struct stevedore *st, size_t min_size, size_t max_size,
 	if (left < extra + min_size) {
 		if (sc->cur_seg != NULL)
 			smp_close_seg(sc, sc->cur_seg);
-		smp_new_seg(sc);
+		if (sc->cur_seg == NULL)
+			smp_new_seg(sc);
 		if (sc->cur_seg != NULL)
 			left = smp_spaceleft(sc, sc->cur_seg);
 		else
@@ -800,7 +821,8 @@ debug_persistent(struct cli *cli, const char * const * av, void *priv)
 			VTIM_sleep(0.1);
 			Lck_Lock(&sc->mtx);
 		}
-		smp_new_seg(sc);
+		if (sc->cur_seg == NULL)
+			smp_new_seg(sc);
 	} else if (!strcmp(av[3], "dump")) {
 		debug_report_silo(cli, sc, 1);
 	} else {
diff --git a/bin/varnishd/storage/storage_persistent.h b/bin/varnishd/storage/storage_persistent.h
index cb2fc82..93ec45e 100644
--- a/bin/varnishd/storage/storage_persistent.h
+++ b/bin/varnishd/storage/storage_persistent.h
@@ -88,6 +88,7 @@ struct smp_seg {
 #define SMP_SEG_MUSTLOAD	(1 << 0)
 #define SMP_SEG_LOADED		(1 << 1)
 #define SMP_SEG_NEW		(1 << 2)
+#define SMP_SEG_SYNCSIGNS	(1 << 3)
 
 	uint32_t		nobj;		/* Number of objects */
 	uint32_t		nalloc;		/* Allocations */
@@ -95,7 +96,10 @@ struct smp_seg {
 
 	/* Only for open segment */
 	struct smp_object	*objs;		/* objdesc array */
-	struct smp_signctx	ctx[1];
+
+	struct smp_signctx	ctx_head;
+	struct smp_signctx	ctx_obj;
+	struct smp_signctx	ctx_tail;
 };
 
 VTAILQ_HEAD(smp_seghead, smp_seg);
diff --git a/bin/varnishd/storage/storage_persistent_silo.c b/bin/varnishd/storage/storage_persistent_silo.c
index 981d1c6..addb836 100644
--- a/bin/varnishd/storage/storage_persistent_silo.c
+++ b/bin/varnishd/storage/storage_persistent_silo.c
@@ -81,7 +81,6 @@ smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
 	struct objcore *oc;
 	uint32_t no;
 	double t_now = VTIM_real();
-	struct smp_signctx ctx[1];
 
 	ASSERT_SILO_THREAD(sc);
 	CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
@@ -92,8 +91,8 @@ smp_load_seg(struct worker *wrk, const struct smp_sc *sc,
 	AN(sg->p.offset);
 	if (sg->p.objlist == 0)
 		return;
-	smp_def_sign(sc, ctx, sg->p.offset, "SEGHEAD");
-	if (smp_chk_sign(ctx))
+	smp_def_sign(sc, &sg->ctx_head, sg->p.offset, "SEGHEAD");
+	if (smp_chk_sign(&sg->ctx_head))
 		return;
 
 	/* test SEGTAIL */
@@ -186,12 +185,6 @@ smp_new_seg(struct smp_sc *sc)
 
 	VTAILQ_INSERT_TAIL(&sc->segments, sg, list);
 
-	/* Neuter the new segment in case there is an old one there */
-	AN(sg->p.offset);
-	smp_def_sign(sc, sg->ctx, sg->p.offset, "SEGHEAD");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
 	/* Set up our allocation points */
 	sc->cur_seg = sg;
 	sc->next_bot = sg->p.offset + IRNUP(sc, SMP_SIGN_SPACE);
@@ -200,6 +193,11 @@ smp_new_seg(struct smp_sc *sc)
 	IASSERTALIGN(sc, sc->next_bot);
 	IASSERTALIGN(sc, sc->next_top);
 	sg->objs = (void*)(sc->base + sc->next_top);
+
+	/* Neuter the new segment in case there is an old one there */
+	AN(sg->p.offset);
+	smp_def_sign(sc, &sg->ctx_head, sg->p.offset, "SEGHEAD");
+	smp_reset_sign(&sg->ctx_head);
 }
 
 /*--------------------------------------------------------------------
@@ -247,29 +245,29 @@ smp_close_seg(struct smp_sc *sc, struct smp_seg *sg)
 		sg->p.length = (sc->next_top - sg->p.offset)
 		     + len + IRNUP(sc, SMP_SIGN_SPACE);
 		(void)smp_spaceleft(sc, sg);	/* for the asserts */
-
 	}
+	sc->free_offset = smp_segend(sg);
 
 	/* Update the segment header */
 	sg->p.objlist = sc->next_top;
 
-	/* Write the (empty) OBJIDX signature */
-	sc->next_top -= IRNUP(sc, SMP_SIGN_SPACE);
-	assert(sc->next_top >= sc->next_bot);
-	smp_def_sign(sc, sg->ctx, sc->next_top, "OBJIDX");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
+	dst = sc->next_top - IRNUP(sc, SMP_SIGN_SPACE);
+	assert(dst >= sc->next_bot);
 
+	/* Write the (empty) OBJIDX signature */
+	smp_def_sign(sc, &sg->ctx_obj, dst, "OBJIDX");
+	smp_reset_sign(&sg->ctx_obj);
 	/* Write the (empty) SEGTAIL signature */
-	smp_def_sign(sc, sg->ctx,
-	    sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE), "SEGTAIL");
-	smp_reset_sign(sg->ctx);
-	smp_sync_sign(sg->ctx);
-
-	/* Request sync of segment list */
+	smp_def_sign(sc, &sg->ctx_tail,
+	    sg->p.offset + sg->p.length - IRNUP(sc, SMP_SIGN_SPACE),
+	    "SEGTAIL");
+	smp_reset_sign(&sg->ctx_tail);
+	/* Ask smp_thread() to sync the signs */
+	sg->flags |= SMP_SEG_SYNCSIGNS;
+
+	/* Remove the new flag and request sync of segment list */
 	sg->flags &= ~SMP_SEG_NEW;
 	smp_sync_segs(sc);
-	sc->free_offset = smp_segend(sg);
 }
 
 /*---------------------------------------------------------------------
-- 
1.7.9.5




More information about the varnish-dev mailing list