varnish-cache/bin/varnishd/storage/storage_lru.c
1
/*-
2
 * Copyright (c) 2007-2015 Varnish Software AS
3
 * All rights reserved.
4
 *
5
 * Author: Dag-Erling Smørgav <des@des.no>
6
 *
7
 * Redistribution and use in source and binary forms, with or without
8
 * modification, are permitted provided that the following conditions
9
 * are met:
10
 * 1. Redistributions of source code must retain the above copyright
11
 *    notice, this list of conditions and the following disclaimer.
12
 * 2. Redistributions in binary form must reproduce the above copyright
13
 *    notice, this list of conditions and the following disclaimer in the
14
 *    documentation and/or other materials provided with the distribution.
15
 *
16
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19
 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
20
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26
 * SUCH DAMAGE.
27
 *
28
 * Least-Recently-Used logic for freeing space in stevedores.
29
 */
30
31
#include "config.h"
32
33
#include <stdlib.h>
34
35
#include "cache/cache_varnishd.h"
36
#include "cache/cache_objhead.h"
37
38
#include "storage/storage.h"
39
40
struct lru {
41
        unsigned                magic;
42
#define LRU_MAGIC               0x3fec7bb0
43
        VTAILQ_HEAD(,objcore)   lru_head;
44
        struct lock             mtx;
45
};
46
47
static struct lru *
48 3992
lru_get(const struct objcore *oc)
49
{
50 3992
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
51 3992
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC);
52 3992
        CHECK_OBJ_NOTNULL(oc->stobj->stevedore->lru, LRU_MAGIC);
53 3992
        return (oc->stobj->stevedore->lru);
54
}
55
56
struct lru *
57 4032
LRU_Alloc(void)
58
{
59
        struct lru *lru;
60
61 4032
        ALLOC_OBJ(lru, LRU_MAGIC);
62 4032
        AN(lru);
63 4032
        VTAILQ_INIT(&lru->lru_head);
64 4032
        Lck_New(&lru->mtx, lck_lru);
65 4032
        return (lru);
66
}
67
68
void
69 0
LRU_Free(struct lru **pp)
70
{
71
        struct lru *lru;
72
73 0
        TAKE_OBJ_NOTNULL(lru, pp, LRU_MAGIC);
74 0
        Lck_Lock(&lru->mtx);
75 0
        AN(VTAILQ_EMPTY(&lru->lru_head));
76 0
        Lck_Unlock(&lru->mtx);
77 0
        Lck_Delete(&lru->mtx);
78 0
        FREE_OBJ(lru);
79 0
}
80
81
void
82 5628
LRU_Add(struct objcore *oc, double now)
83
{
84
        struct lru *lru;
85
86 5628
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
87
88 5628
        if (oc->flags & OC_F_PRIVATE)
89 2448
                return;
90
91 3180
        AZ(oc->boc);
92 3180
        AN(isnan(oc->last_lru));
93 3180
        AZ(isnan(now));
94 3180
        lru = lru_get(oc);
95 3180
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
96 3180
        Lck_Lock(&lru->mtx);
97 3180
        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
98 3180
        oc->last_lru = now;
99 3180
        AZ(isnan(oc->last_lru));
100 3180
        Lck_Unlock(&lru->mtx);
101
}
102
103
void
104 3144
LRU_Remove(struct objcore *oc)
105
{
106
        struct lru *lru;
107
108 3144
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
109
110 3144
        if (oc->flags & OC_F_PRIVATE)
111 2448
                return;
112
113 696
        AZ(oc->boc);
114 696
        lru = lru_get(oc);
115 696
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
116 696
        Lck_Lock(&lru->mtx);
117 696
        AZ(isnan(oc->last_lru));
118 696
        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
119 696
        oc->last_lru = NAN;
120 696
        Lck_Unlock(&lru->mtx);
121
}
122
123
void v_matchproto_(objtouch_f)
124 6579
LRU_Touch(struct worker *wrk, struct objcore *oc, double now)
125
{
126
        struct lru *lru;
127
128 6579
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
129 6579
        CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
130
131 6579
        if (oc->flags & OC_F_PRIVATE || isnan(oc->last_lru))
132 3009
                return;
133
134
        /*
135
         * To avoid the exphdl->mtx becoming a hotspot, we only
136
         * attempt to move objects if they have not been moved
137
         * recently and if the lock is available.  This optimization
138
         * obviously leaves the LRU list imperfectly sorted.
139
         */
140
141 3570
        if (now - oc->last_lru < cache_param->lru_interval)
142 3454
                return;
143
144 116
        lru = lru_get(oc);
145 116
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
146
147 116
        if (Lck_Trylock(&lru->mtx))
148 0
                return;
149
150 116
        if (!isnan(oc->last_lru)) {
151 116
                VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
152 116
                VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
153 116
                VSC_C_main->n_lru_moved++;
154 116
                oc->last_lru = now;
155
        }
156 116
        Lck_Unlock(&lru->mtx);
157
}
158
159
/*--------------------------------------------------------------------
160
 * Attempt to make space by nuking the oldest object on the LRU list
161
 * which isn't in use.
162
 * Returns: 1: did, 0: didn't;
163
 */
164
165
int
166 54
LRU_NukeOne(struct worker *wrk, struct lru *lru)
167
{
168
        struct objcore *oc, *oc2;
169
170 54
        CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC);
171 54
        CHECK_OBJ_NOTNULL(lru, LRU_MAGIC);
172
173 54
        if (wrk->strangelove-- <= 0) {
174 24
                VSLb(wrk->vsl, SLT_ExpKill, "LRU reached nuke_limit");
175 24
                VSC_C_main->n_lru_limited++;
176 24
                return (0);
177
        }
178
179
        /* Find the first currently unused object on the LRU.  */
180 30
        Lck_Lock(&lru->mtx);
181 30
        VTAILQ_FOREACH_SAFE(oc, &lru->lru_head, lru_list, oc2) {
182 24
                CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC);
183 24
                AZ(isnan(oc->last_lru));
184
185 48
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d",
186 48
                    oc, oc->flags, oc->refcnt);
187
188 24
                if (HSH_Snipe(wrk, oc)) {
189 24
                        VSC_C_main->n_lru_nuked++; // XXX per lru ?
190 24
                        VTAILQ_REMOVE(&lru->lru_head, oc, lru_list);
191 24
                        VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list);
192 24
                        break;
193
                }
194
        }
195 30
        Lck_Unlock(&lru->mtx);
196
197 30
        if (oc == NULL) {
198 6
                VSLb(wrk->vsl, SLT_ExpKill, "LRU_Fail");
199 6
                return (0);
200
        }
201
202
        /* XXX: We could grab and return one storage segment to our caller */
203 24
        ObjSlim(wrk, oc);
204
205 24
        VSLb(wrk->vsl, SLT_ExpKill, "LRU x=%u", ObjGetXID(wrk, oc));
206 24
        (void)HSH_DerefObjCore(wrk, &oc, 0);    // Ref from HSH_Snipe
207 24
        return (1);
208
}