| | varnish-cache/bin/varnishd/storage/storage_lru.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2007-2015 Varnish Software AS |
2 |
|
* All rights reserved. |
3 |
|
* |
4 |
|
* Author: Dag-Erling Smørgav <des@des.no> |
5 |
|
* |
6 |
|
* SPDX-License-Identifier: BSD-2-Clause |
7 |
|
* |
8 |
|
* Redistribution and use in source and binary forms, with or without |
9 |
|
* modification, are permitted provided that the following conditions |
10 |
|
* are met: |
11 |
|
* 1. Redistributions of source code must retain the above copyright |
12 |
|
* notice, this list of conditions and the following disclaimer. |
13 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
14 |
|
* notice, this list of conditions and the following disclaimer in the |
15 |
|
* documentation and/or other materials provided with the distribution. |
16 |
|
* |
17 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
18 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
19 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
20 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
21 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
22 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
23 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
24 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
25 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
26 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
27 |
|
* SUCH DAMAGE. |
28 |
|
* |
29 |
|
* Least-Recently-Used logic for freeing space in stevedores. |
30 |
|
*/ |
31 |
|
|
32 |
|
#include "config.h" |
33 |
|
|
34 |
|
#include <stdlib.h> |
35 |
|
|
36 |
|
#include "cache/cache_varnishd.h" |
37 |
|
#include "cache/cache_objhead.h" |
38 |
|
|
39 |
|
#include "storage/storage.h" |
40 |
|
|
41 |
|
struct lru { |
42 |
|
unsigned magic; |
43 |
|
#define LRU_MAGIC 0x3fec7bb0 |
44 |
|
VTAILQ_HEAD(,objcore) lru_head; |
45 |
|
struct lock mtx; |
46 |
|
}; |
47 |
|
|
48 |
|
static struct lru * |
49 |
70215 |
lru_get(const struct objcore *oc) |
50 |
|
{ |
51 |
70215 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
52 |
70215 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore, STEVEDORE_MAGIC); |
53 |
70215 |
CHECK_OBJ_NOTNULL(oc->stobj->stevedore->lru, LRU_MAGIC); |
54 |
70215 |
return (oc->stobj->stevedore->lru); |
55 |
|
} |
56 |
|
|
57 |
|
struct lru * |
58 |
72041 |
LRU_Alloc(void) |
59 |
|
{ |
60 |
|
struct lru *lru; |
61 |
|
|
62 |
72041 |
ALLOC_OBJ(lru, LRU_MAGIC); |
63 |
72041 |
AN(lru); |
64 |
72041 |
VTAILQ_INIT(&lru->lru_head); |
65 |
72041 |
Lck_New(&lru->mtx, lck_lru); |
66 |
72041 |
return (lru); |
67 |
|
} |
68 |
|
|
69 |
|
void |
70 |
0 |
LRU_Free(struct lru **pp) |
71 |
|
{ |
72 |
|
struct lru *lru; |
73 |
|
|
74 |
0 |
TAKE_OBJ_NOTNULL(lru, pp, LRU_MAGIC); |
75 |
0 |
Lck_Lock(&lru->mtx); |
76 |
0 |
AN(VTAILQ_EMPTY(&lru->lru_head)); |
77 |
0 |
Lck_Unlock(&lru->mtx); |
78 |
0 |
Lck_Delete(&lru->mtx); |
79 |
0 |
FREE_OBJ(lru); |
80 |
0 |
} |
81 |
|
|
82 |
|
void |
83 |
109519 |
LRU_Add(struct objcore *oc, vtim_real now) |
84 |
|
{ |
85 |
|
struct lru *lru; |
86 |
|
|
87 |
109519 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
88 |
|
|
89 |
109519 |
if (oc->flags & OC_F_PRIVATE) |
90 |
55039 |
return; |
91 |
|
|
92 |
54480 |
AZ(oc->boc); |
93 |
54480 |
AN(isnan(oc->last_lru)); |
94 |
54480 |
AZ(isnan(now)); |
95 |
54480 |
lru = lru_get(oc); |
96 |
54480 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
97 |
54480 |
Lck_Lock(&lru->mtx); |
98 |
54480 |
VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list); |
99 |
54480 |
oc->last_lru = now; |
100 |
54480 |
AZ(isnan(oc->last_lru)); |
101 |
54480 |
Lck_Unlock(&lru->mtx); |
102 |
109519 |
} |
103 |
|
|
104 |
|
void |
105 |
69421 |
LRU_Remove(struct objcore *oc) |
106 |
|
{ |
107 |
|
struct lru *lru; |
108 |
|
|
109 |
69421 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
110 |
|
|
111 |
69421 |
if (oc->flags & OC_F_PRIVATE) |
112 |
55038 |
return; |
113 |
|
|
114 |
14383 |
AZ(oc->boc); |
115 |
14383 |
lru = lru_get(oc); |
116 |
14383 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
117 |
14383 |
Lck_Lock(&lru->mtx); |
118 |
14383 |
AZ(isnan(oc->last_lru)); |
119 |
14383 |
VTAILQ_REMOVE(&lru->lru_head, oc, lru_list); |
120 |
14383 |
oc->last_lru = NAN; |
121 |
14383 |
Lck_Unlock(&lru->mtx); |
122 |
69421 |
} |
123 |
|
|
124 |
|
void v_matchproto_(objtouch_f) |
125 |
124495 |
LRU_Touch(struct worker *wrk, struct objcore *oc, vtim_real now) |
126 |
|
{ |
127 |
|
struct lru *lru; |
128 |
|
|
129 |
124495 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
130 |
124495 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
131 |
|
|
132 |
124495 |
if (oc->flags & OC_F_PRIVATE || isnan(oc->last_lru)) |
133 |
54928 |
return; |
134 |
|
|
135 |
|
/* |
136 |
|
* To avoid the exphdl->mtx becoming a hotspot, we only |
137 |
|
* attempt to move objects if they have not been moved |
138 |
|
* recently and if the lock is available. This optimization |
139 |
|
* obviously leaves the LRU list imperfectly sorted. |
140 |
|
*/ |
141 |
|
|
142 |
69567 |
if (now - oc->last_lru < cache_param->lru_interval) |
143 |
68202 |
return; |
144 |
|
|
145 |
1365 |
lru = lru_get(oc); |
146 |
1365 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
147 |
|
|
148 |
1365 |
if (Lck_Trylock(&lru->mtx)) |
149 |
2 |
return; |
150 |
|
|
151 |
1363 |
if (!isnan(oc->last_lru)) { |
152 |
1363 |
VTAILQ_REMOVE(&lru->lru_head, oc, lru_list); |
153 |
1363 |
VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list); |
154 |
1363 |
VSC_C_main->n_lru_moved++; |
155 |
1363 |
oc->last_lru = now; |
156 |
1363 |
} |
157 |
1363 |
Lck_Unlock(&lru->mtx); |
158 |
124495 |
} |
159 |
|
|
160 |
|
/*-------------------------------------------------------------------- |
161 |
|
* Attempt to make space by nuking the oldest object on the LRU list |
162 |
|
* which isn't in use. |
163 |
|
* Returns: 1: did, 0: didn't; |
164 |
|
*/ |
165 |
|
|
166 |
|
int |
167 |
1080 |
LRU_NukeOne(struct worker *wrk, struct lru *lru) |
168 |
|
{ |
169 |
|
struct objcore *oc, *oc2; |
170 |
|
|
171 |
1080 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
172 |
1080 |
CHECK_OBJ_NOTNULL(lru, LRU_MAGIC); |
173 |
|
|
174 |
1080 |
if (wrk->strangelove-- <= 0) { |
175 |
520 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU reached nuke_limit"); |
176 |
520 |
VSC_C_main->n_lru_limited++; |
177 |
520 |
return (0); |
178 |
|
} |
179 |
|
|
180 |
|
/* Find the first currently unused object on the LRU. */ |
181 |
560 |
Lck_Lock(&lru->mtx); |
182 |
560 |
VTAILQ_FOREACH_SAFE(oc, &lru->lru_head, lru_list, oc2) { |
183 |
440 |
CHECK_OBJ_NOTNULL(oc, OBJCORE_MAGIC); |
184 |
440 |
AZ(isnan(oc->last_lru)); |
185 |
|
|
186 |
880 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU_Cand p=%p f=0x%x r=%d", |
187 |
440 |
oc, oc->flags, oc->refcnt); |
188 |
|
|
189 |
440 |
if (HSH_Snipe(wrk, oc)) { |
190 |
440 |
VSC_C_main->n_lru_nuked++; // XXX per lru ? |
191 |
440 |
VTAILQ_REMOVE(&lru->lru_head, oc, lru_list); |
192 |
440 |
VTAILQ_INSERT_TAIL(&lru->lru_head, oc, lru_list); |
193 |
440 |
break; |
194 |
|
} |
195 |
0 |
} |
196 |
560 |
Lck_Unlock(&lru->mtx); |
197 |
|
|
198 |
560 |
if (oc == NULL) { |
199 |
120 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU_Fail"); |
200 |
120 |
return (0); |
201 |
|
} |
202 |
|
|
203 |
|
/* XXX: We could grab and return one storage segment to our caller */ |
204 |
440 |
ObjSlim(wrk, oc); |
205 |
|
|
206 |
440 |
VSLb(wrk->vsl, SLT_ExpKill, "LRU xid=%ju", VXID(ObjGetXID(wrk, oc))); |
207 |
440 |
(void)HSH_DerefObjCore(wrk, &oc, 0); // Ref from HSH_Snipe |
208 |
440 |
return (1); |
209 |
1080 |
} |