varnish-cache/bin/varnishd/storage/storage_lru.c
0
/*-
1
 * Copyright (c) 2007-2015 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Dag-Erling Smørgav <des@des.no>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Least-Recently-Used logic for freeing space in stevedores.
30
 */
31
32
#include "config.h"
33
34
#include <stdlib.h>
35
36
#include "cache/cache_varnishd.h"
37
#include "cache/cache_objhead.h"
38
39
#include "storage/storage.h"
40
41
struct lru {
42
        unsigned                magic;
43
#define LRU_MAGIC               0x3fec7bb0
44
        VTAILQ_HEAD(,objcore)   lru_head;
45
        struct lock             mtx;
46
};
47
48
static struct lru *
49 43207
lru_get(const struct objcore *oc)
50
{
51 43207
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
52 43207
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC);
53 43207
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore->lru, LRU_MAGIC);
54 43207
        return (oc->stobj->stevedore->lru);
55
}
56
57
struct lru *
58 43638
LRU_Alloc(void)
59
{
60
        struct lru *lru;
61
62 43638
        ALLOC_OBJ(lru, LRU_MAGIC);
63 43638
        AN(lru);
64 43638
        VTAILQ_INIT(&lru->lru_head);
65 43638
        Lck_New(&lru->mtx, lck_lru);
66 43638
        return (lru);
67
}
68
69
void
70 0
LRU_Free(struct lru **pp)
71
{
72
        struct lru *lru;
73
74 0
        TAKE_OBJ_NOTNULL(lru, pp, LRU_MAGIC);
75 0
        Lck_Lock(&lru->mtx);
76 0
        AN(VTAILQ_EMPTY(&lru->lru_head));
77 0
        Lck_Unlock(&lru->mtx);
78 0
        Lck_Delete(&lru->mtx);
79 0
        FREE_OBJ(lru);
80 0
}
81
82
void
83 64900
LRU_Add(struct objcore *oc, vtim_real now)
84
{
85
        struct lru *lru;
86
87 64900
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
88
89 64900
        if (oc->flags & OC_F_PRIVATE)
90 31350
                return;
91
92 33550
        AZ(oc->boc);
93 33550
        AN(isnan(oc->last_lru));
94 33550
        AZ(isnan(now));
95 33550
        lru = lru_get(oc);
96 33550
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
97 33550
        Lck_Lock(&lru->mtx);
98 33550
        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
99 33550
        oc->last_lru = now;
100 33550
        AZ(isnan(oc->last_lru));
101 33550
        Lck_Unlock(&lru->mtx);
102 64900
}
103
104
void
105 40189
LRU_Remove(struct objcore *oc)
106
{
107
        struct lru *lru;
108
109 40189
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
110
111 40189
        if (oc->flags & OC_F_PRIVATE)
112 31350
                return;
113
114 8839
        AZ(oc->boc);
115 8839
        lru = lru_get(oc);
116 8839
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
117 8839
        Lck_Lock(&lru->mtx);
118 8839
        AZ(isnan(oc->last_lru));
119 8839
        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
120 8839
        oc->last_lru = NAN;
121 8839
        Lck_Unlock(&lru->mtx);
122 40189
}
123
124
void v_matchproto_(objtouch_f)
125 76266
LRU_Touch(struct worker *wrk, struct objcore *oc, vtim_real now)
126
{
127
        struct lru *lru;
128
129 76266
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
130 76266
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
131
132 76266
        if (oc->flags & OC_F_PRIVATE || isnan(oc->last_lru))
133 33503
                return;
134
135
        /*
136
         * To avoid the exphdl->mtx becoming a hotspot, we only
137
         * attempt to move objects if they have not been moved
138
         * recently and if the lock is available.  This optimization
139
         * obviously leaves the LRU list imperfectly sorted.
140
         */
141
142 42763
        if (now - oc->last_lru < cache_param->lru_interval)
143 41943
                return;
144
145 820
        lru = lru_get(oc);
146 820
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
147
148 820
        if (Lck_Trylock(&lru->mtx))
149 1
                return;
150
151 819
        if (!isnan(oc->last_lru)) {
152 819
                VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
153 819
                VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
154 819
                VSC_C_main->n_lru_moved++;
155 819
                oc->last_lru = now;
156 819
        }
157 819
        Lck_Unlock(&lru->mtx);
158 76266
}
159
160
/*--------------------------------------------------------------------
161
 * Attempt to make space by nuking the oldest object on the LRU list
162
 * which isn't in use.
163
 * Returns: 1: did, 0: didn't;
164
 */
165
166
int
167 675
LRU_NukeOne(struct worker *wrk, struct lru *lru)
168
{
169
        struct objcore *oc, *oc2;
170
171 675
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
172 675
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
173
174 675
        if (wrk->strangelove-- <= 0) {
175 325
                VSLb(wrk->vsl, SLT_ExpKill, "LRU reached nuke_limit");
176 325
                VSC_C_main->n_lru_limited++;
177 325
                return (0);
178
        }
179
180
        /* Find the first currently unused object on the LRU.  */
181 350
        Lck_Lock(&lru->mtx);
182 350
        VTAILQ_FOREACH_SAFE(oc, &lru->lru_head, lru_list, oc2) {
183 275
                CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
184 275
                AZ(isnan(oc->last_lru));
185
186 550
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d",
187 275
                    oc, oc->flags, oc->refcnt);
188
189 275
                if (HSH_Snipe(wrk, oc)) {
190 275
                        VSC_C_main->n_lru_nuked++; // XXX per lru ?
191 275
                        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
192 275
                        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
193 275
                        break;
194
                }
195 0
        }
196 350
        Lck_Unlock(&lru->mtx);
197
198 350
        if (oc == NULL) {
199 75
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Fail");
200 75
                return (0);
201
        }
202
203
        /* XXX: We could grab and return one storage segment to our caller */
204 275
        ObjSlim(wrk, oc);
205
206 275
        VSLb(wrk->vsl, SLT_ExpKill, "LRU xid=%ju", VXID(ObjGetXID(wrk, oc)));
207 275
        (void)HSH_DerefObjCore(wrk, &oc, 0);    // Ref from HSH_Snipe
208 275
        return (1);
209 675
}