varnish-cache/bin/varnishd/storage/storage_lru.c
0
/*-
1
 * Copyright (c) 2007-2015 Varnish Software AS
2
 * All rights reserved.
3
 *
4
 * Author: Dag-Erling Smørgav <des@des.no>
5
 *
6
 * SPDX-License-Identifier: BSD-2-Clause
7
 *
8
 * Redistribution and use in source and binary forms, with or without
9
 * modification, are permitted provided that the following conditions
10
 * are met:
11
 * 1. Redistributions of source code must retain the above copyright
12
 *    notice, this list of conditions and the following disclaimer.
13
 * 2. Redistributions in binary form must reproduce the above copyright
14
 *    notice, this list of conditions and the following disclaimer in the
15
 *    documentation and/or other materials provided with the distribution.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 *
29
 * Least-Recently-Used logic for freeing space in stevedores.
30
 */
31
32
#include "config.h"
33
34
#include <stdlib.h>
35
36
#include "cache/cache_varnishd.h"
37
#include "cache/cache_objhead.h"
38
39
#include "storage/storage.h"
40
41
struct lru {
42
        unsigned                magic;
43
#define LRU_MAGIC               0x3fec7bb0
44
        VTAILQ_HEAD(,objcore)   lru_head;
45
        struct lock             mtx;
46
};
47
48
static struct lru *
49 8844
lru_get(const struct objcore *oc)
50
{
51 8844
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
52 8844
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC);
53 8844
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore->lru, LRU_MAGIC);
54 8844
        return (oc->stobj->stevedore->lru);
55
}
56
57
struct lru *
58 9072
LRU_Alloc(void)
59
{
60
        struct lru *lru;
61
62 9072
        ALLOC_OBJ(lru, LRU_MAGIC);
63 9072
        AN(lru);
64 9072
        VTAILQ_INIT(&lru->lru_head);
65 9072
        Lck_New(&lru->mtx, lck_lru);
66 9072
        return (lru);
67
}
68
69
void
70 0
LRU_Free(struct lru **pp)
71
{
72
        struct lru *lru;
73
74 0
        TAKE_OBJ_NOTNULL(lru, pp, LRU_MAGIC);
75 0
        Lck_Lock(&lru->mtx);
76 0
        AN(VTAILQ_EMPTY(&lru->lru_head));
77 0
        Lck_Unlock(&lru->mtx);
78 0
        Lck_Delete(&lru->mtx);
79 0
        FREE_OBJ(lru);
80 0
}
81
82
void
83 13750
LRU_Add(struct objcore *oc, vtim_real now)
84
{
85
        struct lru *lru;
86
87 13750
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
88
89 13750
        if (oc->flags & OC_F_PRIVATE)
90 6890
                return;
91
92 6860
        AZ(oc->boc);
93 6860
        AN(isnan(oc->last_lru));
94 6860
        AZ(isnan(now));
95 6860
        lru = lru_get(oc);
96 6860
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
97 6860
        Lck_Lock(&lru->mtx);
98 6860
        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
99 6860
        oc->last_lru = now;
100 6860
        AZ(isnan(oc->last_lru));
101 6860
        Lck_Unlock(&lru->mtx);
102 13750
}
103
104
void
105 8697
LRU_Remove(struct objcore *oc)
106
{
107
        struct lru *lru;
108
109 8697
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
110
111 8697
        if (oc->flags & OC_F_PRIVATE)
112 6890
                return;
113
114 1807
        AZ(oc->boc);
115 1807
        lru = lru_get(oc);
116 1807
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
117 1807
        Lck_Lock(&lru->mtx);
118 1807
        AZ(isnan(oc->last_lru));
119 1807
        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
120 1807
        oc->last_lru = NAN;
121 1807
        Lck_Unlock(&lru->mtx);
122 8697
}
123
124
void v_matchproto_(objtouch_f)
125 15789
LRU_Touch(struct worker *wrk, struct objcore *oc, vtim_real now)
126
{
127
        struct lru *lru;
128
129 15789
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
130 15789
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
131
132 15789
        if (oc->flags & OC_F_PRIVATE || isnan(oc->last_lru))
133 6907
                return;
134
135
        /*
136
         * To avoid the exphdl->mtx becoming a hotspot, we only
137
         * attempt to move objects if they have not been moved
138
         * recently and if the lock is available.  This optimization
139
         * obviously leaves the LRU list imperfectly sorted.
140
         */
141
142 8882
        if (now - oc->last_lru < cache_param->lru_interval)
143 8704
                return;
144
145 178
        lru = lru_get(oc);
146 178
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
147
148 178
        if (Lck_Trylock(&lru->mtx))
149 0
                return;
150
151 178
        if (!isnan(oc->last_lru)) {
152 178
                VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
153 178
                VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
154 178
                VSC_C_main->n_lru_moved++;
155 178
                oc->last_lru = now;
156 178
        }
157 178
        Lck_Unlock(&lru->mtx);
158 15789
}
159
160
/*--------------------------------------------------------------------
161
 * Attempt to make space by nuking the oldest object on the LRU list
162
 * which isn't in use.
163
 * Returns: 1: did, 0: didn't;
164
 */
165
166
int
167 135
LRU_NukeOne(struct worker *wrk, struct lru *lru)
168
{
169
        struct objcore *oc, *oc2;
170
171 135
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
172 135
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
173
174 135
        if (wrk->strangelove-- <= 0) {
175 65
                VSLb(wrk->vsl, SLT_ExpKill, "LRU reached nuke_limit");
176 65
                VSC_C_main->n_lru_limited++;
177 65
                return (0);
178
        }
179
180
        /* Find the first currently unused object on the LRU.  */
181 70
        Lck_Lock(&lru->mtx);
182 70
        VTAILQ_FOREACH_SAFE(oc, &lru->lru_head, lru_list, oc2) {
183 55
                CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
184 55
                AZ(isnan(oc->last_lru));
185
186 110
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d",
187 55
                    oc, oc->flags, oc->refcnt);
188
189 55
                if (HSH_Snipe(wrk, oc)) {
190 55
                        VSC_C_main->n_lru_nuked++; // XXX per lru ?
191 55
                        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
192 55
                        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
193 55
                        break;
194
                }
195 0
        }
196 70
        Lck_Unlock(&lru->mtx);
197
198 70
        if (oc == NULL) {
199 15
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Fail");
200 15
                return (0);
201
        }
202
203
        /* XXX: We could grab and return one storage segment to our caller */
204 55
        ObjSlim(wrk, oc);
205
206 55
        VSLb(wrk->vsl, SLT_ExpKill, "LRU xid=%ju", VXID(ObjGetXID(wrk, oc)));
207 55
        (void)HSH_DerefObjCore(wrk, &oc, 0);    // Ref from HSH_Snipe
208 55
        return (1);
209 135
}