[Varnish] #538: [varnish-2.0.4] Potential Memory Leak
Varnish
varnish-bugs at projects.linpro.no
Mon Aug 17 22:44:31 CEST 2009
#538: [varnish-2.0.4] Potential Memory Leak
-------------------------------+--------------------------------------------
Reporter: pprocacci | Owner: phk
Type: defect | Status: new
Priority: high | Milestone:
Component: varnishd | Version: trunk
Severity: major | Resolution:
Keywords: Memory Leak 2.0.4 |
-------------------------------+--------------------------------------------
Comment (by barnaclebob):
I also have this problem. I did not notice it as we were not serving
proper cache headers any where and now are.
{{{
top - 15:40:13 up 39 days, 14:44, 2 users, load average: 0.39, 0.21,
0.11
Mem: 8181520k total, 8130296k used, 51224k free, 54624k buffers
Swap: 1052248k total, 41220k used, 1011028k free, 7485492k cached
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
16602 varnish 15 0 50.3g 3.7g 3.5g S 3.7 47.7 3:05.06
/usr/sbin/varnishd -P /var/run/varnish.pid -a :80 -f
/etc/varnish/default.vcl -T :6082 -t 120 -p thread_pools 4 -p lru_interval
120 -h classic,350003 -p obj_workspace 4096 -w 50,2000,120 -u varnish -g
varnish -s file,/var/lib/varnish/varnish_storage.bin,30%
}}}
varnish stat:
{{{
karl at fe01:~$ varnishstat -1
uptime 8145 . Child uptime
client_conn 924273 113.48 Client connections accepted
client_req 924260 113.48 Client requests received
cache_hit 327260 40.18 Cache hits
cache_hitpass 6028 0.74 Cache hits for pass
cache_miss 341935 41.98 Cache misses
backend_conn 597001 73.30 Backend connections success
backend_unhealthy 0 0.00 Backend connections not
attempted
backend_busy 0 0.00 Backend connections too many
backend_fail 0 0.00 Backend connections failures
backend_reuse 482556 59.25 Backend connections reuses
backend_recycle 576729 70.81 Backend connections recycles
backend_unused 0 0.00 Backend connections unused
n_srcaddr 3 . N struct srcaddr
n_srcaddr_act 1 . N active struct srcaddr
n_sess_mem 263 . N struct sess_mem
n_sess 35 . N struct sess
n_object 102049 . N struct object
n_objecthead 102124 . N struct objecthead
n_smf 224940 . N struct smf
n_smf_frag 20472 . N small free smf
n_smf_large 1 . N large free smf
n_vbe_conn 55 . N struct vbe_conn
n_bereq 238 . N struct bereq
n_wrk 200 . N worker threads
n_wrk_create 333 0.04 N worker threads created
n_wrk_failed 0 0.00 N worker threads not created
n_wrk_max 0 0.00 N worker threads limited
n_wrk_queue 0 0.00 N queued work requests
n_wrk_overflow 528 0.06 N overflowed work requests
n_wrk_drop 0 0.00 N dropped work requests
n_backend 1 . N backends
n_expired 239953 . N expired objects
n_lru_nuked 0 . N LRU nuked objects
n_lru_saved 0 . N LRU saved objects
n_lru_moved 58306 . N LRU moved objects
n_deathrow 0 . N objects on deathrow
losthdr 42 0.01 HTTP header overflows
n_objsendfile 0 0.00 Objects sent with sendfile
n_objwrite 880956 108.16 Objects sent with write
n_objoverflow 0 0.00 Objects overflowing workspace
s_sess 924241 113.47 Total Sessions
s_req 924232 113.47 Total Requests
s_pipe 3 0.00 Total pipe
s_pass 255053 31.31 Total pass
s_fetch 596914 73.29 Total fetch
s_hdrbytes 353891229 43448.89 Total header bytes
s_bodybytes 5969771056 732936.90 Total body bytes
sess_closed 924241 113.47 Session Closed
sess_pipeline 0 0.00 Session Pipeline
sess_readahead 0 0.00 Session Read Ahead
sess_linger 0 0.00 Session Linger
sess_herd 0 0.00 Session herd
shm_records 68151008 8367.22 SHM records
shm_writes 5265060 646.42 SHM writes
shm_flushes 23615 2.90 SHM flushes due to overflow
shm_cont 2762 0.34 SHM MTX contention
shm_cycles 28 0.00 SHM cycles through buffer
sm_nreq 1155811 141.90 allocator requests
sm_nobj 204467 . outstanding allocations
sm_balloc 3320414208 . bytes allocated
sm_bfree 48156446720 . bytes free
sma_nreq 0 0.00 SMA allocator requests
sma_nobj 0 . SMA outstanding allocations
sma_nbytes 0 . SMA outstanding bytes
sma_balloc 0 . SMA bytes allocated
sma_bfree 0 . SMA bytes free
sms_nreq 53 0.01 SMS allocator requests
sms_nobj 0 . SMS outstanding allocations
sms_nbytes 0 . SMS outstanding bytes
sms_balloc 172992 . SMS bytes allocated
sms_bfree 172992 . SMS bytes freed
backend_req 596988 73.30 Backend requests made
n_vcl 1 0.00 N vcl total
n_vcl_avail 1 0.00 N vcl available
n_vcl_discard 0 0.00 N vcl discarded
n_purge 2612 . N total active purges
n_purge_add 2631 0.32 N new purges added
n_purge_retire 19 0.00 N old purges deleted
n_purge_obj_test 108700 13.35 N objects tested
n_purge_re_test 22665736 2782.78 N regexps tested against
n_purge_dups 0 0.00 N duplicate purges removed
hcb_nolock 0 0.00 HCB Lookups without lock
hcb_lock 0 0.00 HCB Lookups with lock
hcb_insert 0 0.00 HCB Inserts
esi_parse 0 0.00 Objects ESI parsed (unlock)
esi_errors 0 0.00 ESI parse errors (unlock)
}}}
VCL:
{{{
backend default {
.host = "localhost";
.port = "8080";
}
sub vcl_recv {
if(req.http.Accept-Encoding ~ "gzip"){
set req.http.Accept-Encoding="gzip";
}else{
remove req.http.Accept-Encoding;
}
#unless we have the only 2 cookies we care about just remove the
whole string
if (!req.http.cookie ~ "_cookie_id\s*=" && ! req.http.cookie ~
"auth_token\s*=" && ! req.http.cookie ~ "flash\s*=") {
remove req.http.cookie;
}
#doing purges this way causes a memory leak in varnish. do not
ever use them
#if (req.request == "PURGE") {
# if (!client.ip ~ private) {
# error 405 "Not allowed.";
# }
# lookup;
#}
set req.grace = 60s;
if (req.request != "GET" &&
req.request != "HEAD" &&
req.request != "PUT" &&
req.request != "POST" &&
req.request != "TRACE" &&
req.request != "OPTIONS" &&
req.request != "DELETE") {
/* Non-RFC2616 or CONNECT which is weird. */
pipe;
}
if (req.request != "GET" && req.request != "HEAD") {
/* We only deal with GET and HEAD by default */
pass;
}
if (req.http.Cookie) {
/* Not cacheable by default */
pass;
}
lookup;
}
sub vcl_hit {
#doing purges this way causes a memory leak in varnish. do not
ever use them
#if (req.request == "PURGE") {
# set obj.ttl = 0s;
# error 200 "Purged.";
#}
}
sub vcl_miss {
if (req.request == "PURGE") {
error 404 "Not in cache.";
}
}
sub vcl_hash {
if(req.http.Accept-Encoding ~ "gzip"){
set req.hash+="gzip";
}else{
set req.hash+="nogzip";
}
}
sub vcl_fetch {
set obj.grace = 60s;
if(obj.http.cache-control && obj.http.x-bench-route){
set obj.http.cache-control = regsub(obj.http.cache-control
,"max-age\s*=\s*[0-9]+","max-age = 0");
}
if(obj.http.x-ss-static){
unset obj.http.expires;
unset obj.http.set-cookie;
set obj.http.cache-control = "max-age = 300";
set obj.ttl = 1w;
set obj.prefetch = -30s;
deliver;
}
if (!obj.cacheable) {
pass;
}
if (obj.http.Set-Cookie) {
pass;
}
set obj.prefetch = -30s;
deliver;
}
sub vcl_deliver {
if(resp.http.x-ss-static){
unset resp.http.x-ss-static;
set resp.http.age = "0";
}
}
sub vcl_error {
set obj.http.Content-Type = "text/html; charset=utf-8";
synthetic {"
...snip...
"};
deliver;
}
}}}
--
Ticket URL: <http://varnish.projects.linpro.no/ticket/538#comment:3>
Varnish <http://varnish.projects.linpro.no/>
The Varnish HTTP Accelerator
More information about the varnish-bugs
mailing list