varnish-cache/bin/varnishd/storage/storage_lru.c
1
/*-
2
 * Copyright (c) 2007-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Dag-Erling Smørgav <des@des.no>
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice, this list of conditions and the following disclaimer.
12
 * 2. Redistributions in binary form must reproduce the above copyright
13
 *    notice, this list of conditions and the following disclaimer in the
14
 *    documentation and/or other materials provided with the distribution.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
 * SUCH DAMAGE.
27
 *
28
 * Least-Recently-Used logic for freeing space in stevedores.
29
 */
30
31
#include "config.h"
32
33
#include <stdlib.h>
34
35
#include "cache/cache_varnishd.h"
36
#include "cache/cache_objhead.h"
37
38
#include "storage/storage.h"
39
40
struct lru {
41
        unsigned                magic;
42
#define LRU_MAGIC               0x3fec7bb0
43
        VTAILQ_HEAD(,objcore)   lru_head;
44
        struct lock             mtx;
45
};
46
47
static struct lru *
48 5416
lru_get(const struct objcore *oc)
49
{
50 5416
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
51 5416
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC);
52 5416
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore->lru, LRU_MAGIC);
53 5416
        return (oc->stobj->stevedore->lru);
54
}
55
56
struct lru *
57 5400
LRU_Alloc(void)
58
{
59
        struct lru *lru;
60
61 5400
        ALLOC_OBJ(lru, LRU_MAGIC);
62 5400
        AN(lru);
63 5400
        VTAILQ_INIT(&lru->lru_head);
64 5400
        Lck_New(&lru->mtx, lck_lru);
65 5400
        return (lru);
66
}
67
68
void
69 0
LRU_Free(struct lru **pp)
70
{
71
        struct lru *lru;
72
73 0
        TAKE_OBJ_NOTNULL(lru, pp, LRU_MAGIC);
74 0
        Lck_Lock(&lru->mtx);
75 0
        AN(VTAILQ_EMPTY(&lru->lru_head));
76 0
        Lck_Unlock(&lru->mtx);
77 0
        Lck_Delete(&lru->mtx);
78 0
        FREE_OBJ(lru);
79 0
}
80
81
void
82 7628
LRU_Add(struct objcore *oc, vtim_real now)
83
{
84
        struct lru *lru;
85
86 7628
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
87
88 7628
        if (oc->flags & OC_F_PRIVATE)
89 3368
                return;
90
91 4260
        AZ(oc->boc);
92 4260
        AN(isnan(oc->last_lru));
93 4260
        AZ(isnan(now));
94 4260
        lru = lru_get(oc);
95 4260
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
96 4260
        Lck_Lock(&lru->mtx);
97 4260
        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
98 4260
        oc->last_lru = now;
99 4260
        AZ(isnan(oc->last_lru));
100 4260
        Lck_Unlock(&lru->mtx);
101
}
102
103
void
104 4360
LRU_Remove(struct objcore *oc)
105
{
106
        struct lru *lru;
107
108 4360
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
109
110 4360
        if (oc->flags & OC_F_PRIVATE)
111 3368
                return;
112
113 992
        AZ(oc->boc);
114 992
        lru = lru_get(oc);
115 992
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
116 992
        Lck_Lock(&lru->mtx);
117 992
        AZ(isnan(oc->last_lru));
118 992
        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
119 992
        oc->last_lru = NAN;
120 992
        Lck_Unlock(&lru->mtx);
121
}
122
123
void v_matchproto_(objtouch_f)
124 8836
LRU_Touch(struct worker *wrk, struct objcore *oc, vtim_real now)
125
{
126
        struct lru *lru;
127
128 8836
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
129 8836
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
130
131 8836
        if (oc->flags & OC_F_PRIVATE || isnan(oc->last_lru))
132 4033
                return;
133
134
        /*
135
         * To avoid the exphdl->mtx becoming a hotspot, we only
136
         * attempt to move objects if they have not been moved
137
         * recently and if the lock is available.  This optimization
138
         * obviously leaves the LRU list imperfectly sorted.
139
         */
140
141 4803
        if (now - oc->last_lru < cache_param->lru_interval)
142 4639
                return;
143
144 164
        lru = lru_get(oc);
145 164
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
146
147 164
        if (Lck_Trylock(&lru->mtx))
148 0
                return;
149
150 164
        if (!isnan(oc->last_lru)) {
151 164
                VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
152 164
                VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
153 164
                VSC_C_main->n_lru_moved++;
154 164
                oc->last_lru = now;
155
        }
156 164
        Lck_Unlock(&lru->mtx);
157
}
158
159
/*--------------------------------------------------------------------
160
 * Attempt to make space by nuking the oldest object on the LRU list
161
 * which isn't in use.
162
 * Returns: 1: did, 0: didn't;
163
 */
164
165
int
166 72
LRU_NukeOne(struct worker *wrk, struct lru *lru)
167
{
168
        struct objcore *oc, *oc2;
169
170 72
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
171 72
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
172
173 72
        if (wrk->strangelove-- <= 0) {
174 32
                VSLb(wrk->vsl, SLT_ExpKill, "LRU reached nuke_limit");
175 32
                VSC_C_main->n_lru_limited++;
176 32
                return (0);
177
        }
178
179
        /* Find the first currently unused object on the LRU.  */
180 40
        Lck_Lock(&lru->mtx);
181 40
        VTAILQ_FOREACH_SAFE(oc, &lru->lru_head, lru_list, oc2) {
182 32
                CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
183 32
                AZ(isnan(oc->last_lru));
184
185 64
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d",
186 64
                    oc, oc->flags, oc->refcnt);
187
188 32
                if (HSH_Snipe(wrk, oc)) {
189 32
                        VSC_C_main->n_lru_nuked++; // XXX per lru ?
190 32
                        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
191 32
                        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
192 32
                        break;
193
                }
194
        }
195 40
        Lck_Unlock(&lru->mtx);
196
197 40
        if (oc == NULL) {
198 8
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Fail");
199 8
                return (0);
200
        }
201
202
        /* XXX: We could grab and return one storage segment to our caller */
203 32
        ObjSlim(wrk, oc);
204
205 32
        VSLb(wrk->vsl, SLT_ExpKill, "LRU x=%u", ObjGetXID(wrk, oc));
206 32
        (void)HSH_DerefObjCore(wrk, &oc, 0);    // Ref from HSH_Snipe
207 32
        return (1);
208
}