| | varnish-cache/bin/varnishd/cache/cache_backend.c |
0 |
|
/*- |
1 |
|
* Copyright (c) 2006 Verdens Gang AS |
2 |
|
* Copyright (c) 2006-2015 Varnish Software AS |
3 |
|
* All rights reserved. |
4 |
|
* |
5 |
|
* Author: Poul-Henning Kamp <phk@phk.freebsd.dk> |
6 |
|
* |
7 |
|
* SPDX-License-Identifier: BSD-2-Clause |
8 |
|
* |
9 |
|
* Redistribution and use in source and binary forms, with or without |
10 |
|
* modification, are permitted provided that the following conditions |
11 |
|
* are met: |
12 |
|
* 1. Redistributions of source code must retain the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer. |
14 |
|
* 2. Redistributions in binary form must reproduce the above copyright |
15 |
|
* notice, this list of conditions and the following disclaimer in the |
16 |
|
* documentation and/or other materials provided with the distribution. |
17 |
|
* |
18 |
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
19 |
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 |
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
21 |
|
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
22 |
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 |
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
24 |
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
25 |
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
26 |
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
27 |
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
28 |
|
* SUCH DAMAGE. |
29 |
|
* |
30 |
|
* The director implementation for VCL backends. |
31 |
|
* |
32 |
|
*/ |
33 |
|
|
34 |
|
#include "config.h" |
35 |
51100 |
|
36 |
51100 |
#include <stdlib.h> |
37 |
51100 |
|
38 |
51100 |
#include "cache_varnishd.h" |
39 |
|
#include "cache_director.h" |
40 |
|
|
41 |
|
#include "vtcp.h" |
42 |
|
#include "vtim.h" |
43 |
|
#include "vsa.h" |
44 |
|
|
45 |
|
#include "cache_backend.h" |
46 |
|
#include "cache_conn_pool.h" |
47 |
|
#include "cache_transport.h" |
48 |
|
#include "cache_vcl.h" |
49 |
|
#include "http1/cache_http1.h" |
50 |
|
#include "proxy/cache_proxy.h" |
51 |
|
|
52 |
|
#include "VSC_vbe.h" |
53 |
|
|
54 |
|
/*--------------------------------------------------------------------*/ |
55 |
|
|
56 |
|
static const char * const vbe_proto_ident = "HTTP Backend"; |
57 |
|
|
58 |
|
static struct lock backends_mtx; |
59 |
|
|
60 |
|
/*--------------------------------------------------------------------*/ |
61 |
|
|
62 |
|
void |
63 |
1079 |
VBE_Connect_Error(struct VSC_vbe *vsc, int err) |
64 |
|
{ |
65 |
|
|
66 |
1079 |
switch(err) { |
67 |
|
case 0: |
68 |
|
/* |
69 |
|
* This is kind of brittle, but zero is the only |
70 |
|
* value of errno we can trust to have no meaning. |
71 |
|
*/ |
72 |
469 |
vsc->helddown++; |
73 |
469 |
break; |
74 |
|
case EACCES: |
75 |
|
case EPERM: |
76 |
0 |
vsc->fail_eacces++; |
77 |
0 |
break; |
78 |
|
case EADDRNOTAVAIL: |
79 |
0 |
vsc->fail_eaddrnotavail++; |
80 |
0 |
break; |
81 |
|
case ECONNREFUSED: |
82 |
610 |
vsc->fail_econnrefused++; |
83 |
610 |
break; |
84 |
|
case ENETUNREACH: |
85 |
0 |
vsc->fail_enetunreach++; |
86 |
0 |
break; |
87 |
|
case ETIMEDOUT: |
88 |
0 |
vsc->fail_etimedout++; |
89 |
0 |
break; |
90 |
|
default: |
91 |
0 |
vsc->fail_other++; |
92 |
0 |
} |
93 |
1079 |
} |
94 |
|
|
95 |
|
/*--------------------------------------------------------------------*/ |
96 |
|
|
97 |
|
#define FIND_TMO(tmx, dst, bo, be) \ |
98 |
|
do { \ |
99 |
|
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); \ |
100 |
|
dst = bo->tmx; \ |
101 |
|
if (dst == 0.0) \ |
102 |
|
dst = be->tmx; \ |
103 |
|
if (dst == 0.0) \ |
104 |
|
dst = cache_param->tmx; \ |
105 |
|
} while (0) |
106 |
|
|
107 |
|
/*-------------------------------------------------------------------- |
108 |
|
* Get a connection to the backend |
109 |
|
* |
110 |
|
* note: wrk is a separate argument because it differs for pipe vs. fetch |
111 |
|
*/ |
112 |
|
|
113 |
|
static struct pfd * |
114 |
52875 |
vbe_dir_getfd(VRT_CTX, struct worker *wrk, VCL_BACKEND dir, struct backend *bp, |
115 |
|
unsigned force_fresh) |
116 |
|
{ |
117 |
|
struct busyobj *bo; |
118 |
|
struct pfd *pfd; |
119 |
|
int *fdp, err; |
120 |
|
vtim_dur tmod; |
121 |
|
char abuf1[VTCP_ADDRBUFSIZE], abuf2[VTCP_ADDRBUFSIZE]; |
122 |
|
char pbuf1[VTCP_PORTBUFSIZE], pbuf2[VTCP_PORTBUFSIZE]; |
123 |
|
|
124 |
52875 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
125 |
52875 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
126 |
52875 |
bo = ctx->bo; |
127 |
52875 |
CHECK_OBJ_NOTNULL(bp, BACKEND_MAGIC); |
128 |
52875 |
AN(bp->vsc); |
129 |
|
|
130 |
52875 |
if (!VRT_Healthy(ctx, dir, NULL)) { |
131 |
250 |
VSLb(bo->vsl, SLT_FetchError, |
132 |
125 |
"backend %s: unhealthy", VRT_BACKEND_string(dir)); |
133 |
125 |
bp->vsc->unhealthy++; |
134 |
125 |
VSC_C_main->backend_unhealthy++; |
135 |
125 |
return (NULL); |
136 |
|
} |
137 |
|
|
138 |
52750 |
if (bp->max_connections > 0 && bp->n_conn >= bp->max_connections) { |
139 |
100 |
VSLb(bo->vsl, SLT_FetchError, |
140 |
50 |
"backend %s: busy", VRT_BACKEND_string(dir)); |
141 |
50 |
bp->vsc->busy++; |
142 |
50 |
VSC_C_main->backend_busy++; |
143 |
50 |
return (NULL); |
144 |
|
} |
145 |
|
|
146 |
52700 |
AZ(bo->htc); |
147 |
52700 |
bo->htc = WS_Alloc(bo->ws, sizeof *bo->htc); |
148 |
52700 |
if (bo->htc == NULL) { |
149 |
775 |
VSLb(bo->vsl, SLT_FetchError, "out of workspace"); |
150 |
|
/* XXX: counter ? */ |
151 |
775 |
return (NULL); |
152 |
|
} |
153 |
51925 |
bo->htc->doclose = SC_NULL; |
154 |
51925 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
155 |
|
|
156 |
51925 |
FIND_TMO(connect_timeout, tmod, bo, bp); |
157 |
51925 |
pfd = VCP_Get(bp->conn_pool, tmod, wrk, force_fresh, &err); |
158 |
51925 |
if (pfd == NULL) { |
159 |
776 |
Lck_Lock(bp->director->mtx); |
160 |
776 |
VBE_Connect_Error(bp->vsc, err); |
161 |
776 |
Lck_Unlock(bp->director->mtx); |
162 |
1552 |
VSLb(bo->vsl, SLT_FetchError, |
163 |
|
"backend %s: fail errno %d (%s)", |
164 |
776 |
VRT_BACKEND_string(dir), err, VAS_errtxt(err)); |
165 |
776 |
VSC_C_main->backend_fail++; |
166 |
776 |
bo->htc = NULL; |
167 |
776 |
return (NULL); |
168 |
|
} |
169 |
|
|
170 |
51149 |
VSLb_ts_busyobj(bo, "Connected", W_TIM_real(wrk)); |
171 |
51149 |
fdp = PFD_Fd(pfd); |
172 |
51149 |
AN(fdp); |
173 |
51149 |
assert(*fdp >= 0); |
174 |
|
|
175 |
51149 |
Lck_Lock(bp->director->mtx); |
176 |
51149 |
bp->n_conn++; |
177 |
51149 |
bp->vsc->conn++; |
178 |
51149 |
bp->vsc->req++; |
179 |
51149 |
Lck_Unlock(bp->director->mtx); |
180 |
|
|
181 |
51149 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
182 |
|
|
183 |
51149 |
err = 0; |
184 |
51149 |
if (bp->proxy_header != 0) |
185 |
125 |
err += VPX_Send_Proxy(*fdp, bp->proxy_header, bo->sp); |
186 |
51149 |
if (err < 0) { |
187 |
0 |
VSLb(bo->vsl, SLT_FetchError, |
188 |
|
"backend %s: proxy write errno %d (%s)", |
189 |
0 |
VRT_BACKEND_string(dir), |
190 |
0 |
errno, VAS_errtxt(errno)); |
191 |
|
// account as if connect failed - good idea? |
192 |
0 |
VSC_C_main->backend_fail++; |
193 |
0 |
bo->htc = NULL; |
194 |
0 |
VCP_Close(&pfd); |
195 |
0 |
AZ(pfd); |
196 |
0 |
Lck_Lock(bp->director->mtx); |
197 |
0 |
bp->n_conn--; |
198 |
0 |
bp->vsc->conn--; |
199 |
0 |
bp->vsc->req--; |
200 |
0 |
Lck_Unlock(bp->director->mtx); |
201 |
0 |
return (NULL); |
202 |
|
} |
203 |
51149 |
bo->acct.bereq_hdrbytes += err; |
204 |
|
|
205 |
51149 |
PFD_LocalName(pfd, abuf1, sizeof abuf1, pbuf1, sizeof pbuf1); |
206 |
51149 |
PFD_RemoteName(pfd, abuf2, sizeof abuf2, pbuf2, sizeof pbuf2); |
207 |
102298 |
VSLb(bo->vsl, SLT_BackendOpen, "%d %s %s %s %s %s %s", |
208 |
51149 |
*fdp, VRT_BACKEND_string(dir), abuf2, pbuf2, abuf1, pbuf1, |
209 |
51149 |
PFD_State(pfd) == PFD_STATE_STOLEN ? "reuse" : "connect"); |
210 |
|
|
211 |
51149 |
INIT_OBJ(bo->htc, HTTP_CONN_MAGIC); |
212 |
51149 |
bo->htc->priv = pfd; |
213 |
51149 |
bo->htc->rfd = fdp; |
214 |
51149 |
bo->htc->doclose = SC_NULL; |
215 |
51149 |
FIND_TMO(first_byte_timeout, |
216 |
|
bo->htc->first_byte_timeout, bo, bp); |
217 |
51149 |
FIND_TMO(between_bytes_timeout, |
218 |
|
bo->htc->between_bytes_timeout, bo, bp); |
219 |
51149 |
return (pfd); |
220 |
52875 |
} |
221 |
|
|
222 |
|
static void v_matchproto_(vdi_finish_f) |
223 |
51100 |
vbe_dir_finish(VRT_CTX, VCL_BACKEND d) |
224 |
|
{ |
225 |
|
struct backend *bp; |
226 |
|
struct busyobj *bo; |
227 |
|
struct pfd *pfd; |
228 |
|
|
229 |
51100 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
230 |
51100 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
231 |
51100 |
bo = ctx->bo; |
232 |
51100 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
233 |
51100 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
234 |
|
|
235 |
51100 |
CHECK_OBJ_NOTNULL(bo->htc, HTTP_CONN_MAGIC); |
236 |
51100 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
237 |
|
|
238 |
51100 |
pfd = bo->htc->priv; |
239 |
51100 |
bo->htc->priv = NULL; |
240 |
51100 |
if (bo->htc->doclose != SC_NULL || bp->proxy_header != 0) { |
241 |
18060 |
VSLb(bo->vsl, SLT_BackendClose, "%d %s close %s", *PFD_Fd(pfd), |
242 |
9030 |
VRT_BACKEND_string(d), bo->htc->doclose->desc); |
243 |
9030 |
VCP_Close(&pfd); |
244 |
9030 |
AZ(pfd); |
245 |
9030 |
Lck_Lock(bp->director->mtx); |
246 |
9030 |
} else { |
247 |
42070 |
assert (PFD_State(pfd) == PFD_STATE_USED); |
248 |
84140 |
VSLb(bo->vsl, SLT_BackendClose, "%d %s recycle", *PFD_Fd(pfd), |
249 |
42070 |
VRT_BACKEND_string(d)); |
250 |
42070 |
Lck_Lock(bp->director->mtx); |
251 |
42070 |
VSC_C_main->backend_recycle++; |
252 |
42070 |
VCP_Recycle(bo->wrk, &pfd); |
253 |
|
} |
254 |
51100 |
assert(bp->n_conn > 0); |
255 |
51100 |
bp->n_conn--; |
256 |
51100 |
AN(bp->vsc); |
257 |
51100 |
bp->vsc->conn--; |
258 |
|
#define ACCT(foo) bp->vsc->foo += bo->acct.foo; |
259 |
|
#include "tbl/acct_fields_bereq.h" |
260 |
51100 |
Lck_Unlock(bp->director->mtx); |
261 |
51100 |
bo->htc = NULL; |
262 |
51100 |
} |
263 |
|
|
264 |
|
static int v_matchproto_(vdi_gethdrs_f) |
265 |
52125 |
vbe_dir_gethdrs(VRT_CTX, VCL_BACKEND d) |
266 |
|
{ |
267 |
52125 |
int i, extrachance = 1; |
268 |
|
struct backend *bp; |
269 |
|
struct pfd *pfd; |
270 |
|
struct busyobj *bo; |
271 |
|
struct worker *wrk; |
272 |
|
|
273 |
52125 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
274 |
52125 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
275 |
52125 |
bo = ctx->bo; |
276 |
52125 |
CHECK_OBJ_NOTNULL(bo, BUSYOBJ_MAGIC); |
277 |
52125 |
CHECK_OBJ_NOTNULL(bo->bereq, HTTP_MAGIC); |
278 |
52125 |
if (bo->htc != NULL) |
279 |
0 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
280 |
52125 |
wrk = ctx->bo->wrk; |
281 |
52125 |
CHECK_OBJ_NOTNULL(wrk, WORKER_MAGIC); |
282 |
52125 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
283 |
|
|
284 |
|
/* |
285 |
|
* Now that we know our backend, we can set a default Host: |
286 |
|
* header if one is necessary. This cannot be done in the VCL |
287 |
|
* because the backend may be chosen by a director. |
288 |
|
*/ |
289 |
52125 |
if (!http_GetHdr(bo->bereq, H_Host, NULL) && bp->hosthdr != NULL) |
290 |
1525 |
http_PrintfHeader(bo->bereq, "Host: %s", bp->hosthdr); |
291 |
|
|
292 |
52125 |
do { |
293 |
52252 |
if (bo->htc != NULL) |
294 |
0 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
295 |
52252 |
pfd = vbe_dir_getfd(ctx, wrk, d, bp, extrachance == 0 ? 1 : 0); |
296 |
52252 |
if (pfd == NULL) |
297 |
1701 |
return (-1); |
298 |
50551 |
AN(bo->htc); |
299 |
50551 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
300 |
50551 |
if (PFD_State(pfd) != PFD_STATE_STOLEN) |
301 |
31803 |
extrachance = 0; |
302 |
|
|
303 |
101102 |
i = V1F_SendReq(wrk, bo, &bo->acct.bereq_hdrbytes, |
304 |
50551 |
&bo->acct.bereq_bodybytes); |
305 |
|
|
306 |
50551 |
if (i == 0 && PFD_State(pfd) != PFD_STATE_USED) { |
307 |
56166 |
if (VCP_Wait(wrk, pfd, VTIM_real() + |
308 |
37444 |
bo->htc->first_byte_timeout) != 0) { |
309 |
25 |
bo->htc->doclose = SC_RX_TIMEOUT; |
310 |
25 |
VSLb(bo->vsl, SLT_FetchError, |
311 |
|
"first byte timeout (reused connection)"); |
312 |
25 |
extrachance = 0; |
313 |
25 |
} |
314 |
18722 |
} |
315 |
|
|
316 |
50551 |
if (bo->htc->doclose == SC_NULL) { |
317 |
50251 |
assert(PFD_State(pfd) == PFD_STATE_USED); |
318 |
50251 |
if (i == 0) |
319 |
50248 |
i = V1F_FetchRespHdr(bo); |
320 |
50251 |
if (i == 0) { |
321 |
48423 |
AN(bo->htc->priv); |
322 |
48423 |
http_VSL_log(bo->beresp); |
323 |
48423 |
return (0); |
324 |
|
} |
325 |
1826 |
} |
326 |
2126 |
CHECK_OBJ_NOTNULL(bo->htc->doclose, STREAM_CLOSE_MAGIC); |
327 |
|
|
328 |
|
/* |
329 |
|
* If we recycled a backend connection, there is a finite chance |
330 |
|
* that the backend closed it before we got the bereq to it. |
331 |
|
* In that case do a single automatic retry if req.body allows. |
332 |
|
*/ |
333 |
2126 |
vbe_dir_finish(ctx, d); |
334 |
2126 |
AZ(bo->htc); |
335 |
2126 |
if (i < 0 || extrachance == 0) |
336 |
2000 |
break; |
337 |
126 |
if (bo->no_retry != NULL) |
338 |
0 |
break; |
339 |
126 |
VSC_C_main->backend_retry++; |
340 |
126 |
} while (extrachance--); |
341 |
2001 |
return (-1); |
342 |
52125 |
} |
343 |
|
|
344 |
|
static VCL_IP v_matchproto_(vdi_getip_f) |
345 |
25 |
vbe_dir_getip(VRT_CTX, VCL_BACKEND d) |
346 |
|
{ |
347 |
|
struct pfd *pfd; |
348 |
|
|
349 |
25 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
350 |
25 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
351 |
25 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
352 |
25 |
CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC); |
353 |
25 |
pfd = ctx->bo->htc->priv; |
354 |
|
|
355 |
25 |
return (VCP_GetIp(pfd)); |
356 |
|
} |
357 |
|
|
358 |
|
/*--------------------------------------------------------------------*/ |
359 |
|
|
360 |
|
static stream_close_t v_matchproto_(vdi_http1pipe_f) |
361 |
625 |
vbe_dir_http1pipe(VRT_CTX, VCL_BACKEND d) |
362 |
|
{ |
363 |
|
int i; |
364 |
|
stream_close_t retval; |
365 |
|
struct backend *bp; |
366 |
|
struct v1p_acct v1a; |
367 |
|
struct pfd *pfd; |
368 |
|
vtim_real deadline; |
369 |
|
|
370 |
625 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
371 |
625 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
372 |
625 |
CHECK_OBJ_NOTNULL(ctx->req, REQ_MAGIC); |
373 |
625 |
CHECK_OBJ_NOTNULL(ctx->bo, BUSYOBJ_MAGIC); |
374 |
625 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
375 |
|
|
376 |
625 |
memset(&v1a, 0, sizeof v1a); |
377 |
|
|
378 |
|
/* This is hackish... */ |
379 |
625 |
v1a.req = ctx->req->acct.req_hdrbytes; |
380 |
625 |
ctx->req->acct.req_hdrbytes = 0; |
381 |
|
|
382 |
625 |
ctx->req->res_mode = RES_PIPE; |
383 |
|
|
384 |
625 |
retval = SC_TX_ERROR; |
385 |
625 |
pfd = vbe_dir_getfd(ctx, ctx->req->wrk, d, bp, 0); |
386 |
|
|
387 |
625 |
if (pfd != NULL) { |
388 |
600 |
CHECK_OBJ_NOTNULL(ctx->bo->htc, HTTP_CONN_MAGIC); |
389 |
1200 |
i = V1F_SendReq(ctx->req->wrk, ctx->bo, |
390 |
600 |
&v1a.bereq, &v1a.out); |
391 |
600 |
VSLb_ts_req(ctx->req, "Pipe", W_TIM_real(ctx->req->wrk)); |
392 |
600 |
if (i == 0) { |
393 |
600 |
deadline = ctx->bo->task_deadline; |
394 |
600 |
if (isnan(deadline)) |
395 |
575 |
deadline = cache_param->pipe_task_deadline; |
396 |
600 |
if (deadline > 0.) |
397 |
75 |
deadline += ctx->req->sp->t_idle; |
398 |
1200 |
retval = V1P_Process(ctx->req, *PFD_Fd(pfd), &v1a, |
399 |
600 |
deadline); |
400 |
600 |
} |
401 |
600 |
VSLb_ts_req(ctx->req, "PipeSess", W_TIM_real(ctx->req->wrk)); |
402 |
600 |
ctx->bo->htc->doclose = retval; |
403 |
600 |
vbe_dir_finish(ctx, d); |
404 |
600 |
} |
405 |
625 |
V1P_Charge(ctx->req, &v1a, bp->vsc); |
406 |
625 |
CHECK_OBJ_NOTNULL(retval, STREAM_CLOSE_MAGIC); |
407 |
625 |
return (retval); |
408 |
|
} |
409 |
|
|
410 |
|
/*--------------------------------------------------------------------*/ |
411 |
|
|
412 |
|
static void |
413 |
35039 |
vbe_dir_event(const struct director *d, enum vcl_event_e ev) |
414 |
|
{ |
415 |
|
struct backend *bp; |
416 |
|
|
417 |
35039 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
418 |
35039 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
419 |
|
|
420 |
35039 |
if (ev == VCL_EVENT_WARM) { |
421 |
31475 |
VRT_VSC_Reveal(bp->vsc_seg); |
422 |
31475 |
if (bp->probe != NULL) |
423 |
850 |
VBP_Control(bp, 1); |
424 |
35039 |
} else if (ev == VCL_EVENT_COLD) { |
425 |
1968 |
if (bp->probe != NULL) |
426 |
175 |
VBP_Control(bp, 0); |
427 |
1968 |
VRT_VSC_Hide(bp->vsc_seg); |
428 |
3564 |
} else if (ev == VCL_EVENT_DISCARD) { |
429 |
1596 |
VRT_DelDirector(&bp->director); |
430 |
1596 |
} |
431 |
35039 |
} |
432 |
|
|
433 |
|
/*---------------------------------------------------------------------*/ |
434 |
|
|
435 |
|
static void |
436 |
1946 |
vbe_free(struct backend *be) |
437 |
|
{ |
438 |
|
|
439 |
1946 |
CHECK_OBJ_NOTNULL(be, BACKEND_MAGIC); |
440 |
|
|
441 |
1946 |
if (be->probe != NULL) |
442 |
150 |
VBP_Remove(be); |
443 |
|
|
444 |
1946 |
VSC_vbe_Destroy(&be->vsc_seg); |
445 |
1946 |
Lck_Lock(&backends_mtx); |
446 |
1946 |
VSC_C_main->n_backend--; |
447 |
1946 |
Lck_Unlock(&backends_mtx); |
448 |
1946 |
VCP_Rel(&be->conn_pool); |
449 |
|
|
450 |
|
#define DA(x) do { if (be->x != NULL) free(be->x); } while (0) |
451 |
|
#define DN(x) /**/ |
452 |
1946 |
VRT_BACKEND_HANDLE(); |
453 |
|
#undef DA |
454 |
|
#undef DN |
455 |
1946 |
free(be->endpoint); |
456 |
|
|
457 |
1946 |
FREE_OBJ(be); |
458 |
1946 |
} |
459 |
|
|
460 |
|
static void v_matchproto_(vdi_destroy_f) |
461 |
1946 |
vbe_destroy(const struct director *d) |
462 |
|
{ |
463 |
|
struct backend *be; |
464 |
|
|
465 |
1946 |
CAST_OBJ_NOTNULL(be, d->priv, BACKEND_MAGIC); |
466 |
1946 |
vbe_free(be); |
467 |
1946 |
} |
468 |
|
|
469 |
|
/*--------------------------------------------------------------------*/ |
470 |
|
|
471 |
|
static void |
472 |
125 |
vbe_panic(const struct director *d, struct vsb *vsb) |
473 |
|
{ |
474 |
|
struct backend *bp; |
475 |
|
|
476 |
125 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
477 |
125 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
478 |
|
|
479 |
125 |
VCP_Panic(vsb, bp->conn_pool); |
480 |
125 |
VSB_printf(vsb, "hosthdr = %s,\n", bp->hosthdr); |
481 |
125 |
VSB_printf(vsb, "n_conn = %u,\n", bp->n_conn); |
482 |
125 |
} |
483 |
|
|
484 |
|
/*-------------------------------------------------------------------- |
485 |
|
*/ |
486 |
|
|
487 |
|
static void v_matchproto_(vdi_list_f) |
488 |
28000 |
vbe_list(VRT_CTX, const struct director *d, struct vsb *vsb, int pflag, |
489 |
|
int jflag) |
490 |
|
{ |
491 |
|
struct backend *bp; |
492 |
|
|
493 |
28000 |
(void)ctx; |
494 |
|
|
495 |
28000 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
496 |
28000 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
497 |
|
|
498 |
28000 |
if (bp->probe != NULL) |
499 |
1950 |
VBP_Status(vsb, bp, pflag, jflag); |
500 |
26050 |
else if (jflag && pflag) |
501 |
75 |
VSB_cat(vsb, "{},\n"); |
502 |
25975 |
else if (jflag) |
503 |
275 |
VSB_cat(vsb, "[0, 0, \"healthy\"]"); |
504 |
25700 |
else if (pflag) |
505 |
200 |
return; |
506 |
|
else |
507 |
25500 |
VSB_cat(vsb, "0/0\thealthy"); |
508 |
28000 |
} |
509 |
|
|
510 |
|
/*-------------------------------------------------------------------- |
511 |
|
*/ |
512 |
|
|
513 |
|
static VCL_BOOL v_matchproto_(vdi_healthy_f) |
514 |
1425 |
vbe_healthy(VRT_CTX, VCL_BACKEND d, VCL_TIME *t) |
515 |
|
{ |
516 |
|
struct backend *bp; |
517 |
|
|
518 |
1425 |
(void)ctx; |
519 |
1425 |
CHECK_OBJ_NOTNULL(d, DIRECTOR_MAGIC); |
520 |
1425 |
CAST_OBJ_NOTNULL(bp, d->priv, BACKEND_MAGIC); |
521 |
|
|
522 |
1425 |
if (t != NULL) |
523 |
1050 |
*t = bp->changed; |
524 |
|
|
525 |
1425 |
return (!bp->sick); |
526 |
|
} |
527 |
|
|
528 |
|
|
529 |
|
/*-------------------------------------------------------------------- |
530 |
|
*/ |
531 |
|
|
532 |
|
static const struct vdi_methods vbe_methods[1] = {{ |
533 |
|
.magic = VDI_METHODS_MAGIC, |
534 |
|
.type = "backend", |
535 |
|
.http1pipe = vbe_dir_http1pipe, |
536 |
|
.gethdrs = vbe_dir_gethdrs, |
537 |
|
.getip = vbe_dir_getip, |
538 |
|
.finish = vbe_dir_finish, |
539 |
|
.event = vbe_dir_event, |
540 |
|
.destroy = vbe_destroy, |
541 |
|
.panic = vbe_panic, |
542 |
|
.list = vbe_list, |
543 |
|
.healthy = vbe_healthy |
544 |
|
}}; |
545 |
|
|
546 |
|
static const struct vdi_methods vbe_methods_noprobe[1] = {{ |
547 |
|
.magic = VDI_METHODS_MAGIC, |
548 |
|
.type = "backend", |
549 |
|
.http1pipe = vbe_dir_http1pipe, |
550 |
|
.gethdrs = vbe_dir_gethdrs, |
551 |
|
.getip = vbe_dir_getip, |
552 |
|
.finish = vbe_dir_finish, |
553 |
|
.event = vbe_dir_event, |
554 |
|
.destroy = vbe_destroy, |
555 |
|
.panic = vbe_panic, |
556 |
|
.list = vbe_list |
557 |
|
}}; |
558 |
|
|
559 |
|
/*-------------------------------------------------------------------- |
560 |
|
* Create a new static or dynamic director::backend instance. |
561 |
|
*/ |
562 |
|
|
563 |
|
size_t |
564 |
28525 |
VRT_backend_vsm_need(VRT_CTX) |
565 |
|
{ |
566 |
28525 |
(void)ctx; |
567 |
28525 |
return (VRT_VSC_Overhead(VSC_vbe_size)); |
568 |
|
} |
569 |
|
|
570 |
|
/* |
571 |
|
* The new_backend via parameter is a VCL_BACKEND, but we need a (struct |
572 |
|
* backend) |
573 |
|
* |
574 |
|
* For now, we resolve it when creating the backend, which imples no redundancy |
575 |
|
* / load balancing across the via director if it is more than a simple backend. |
576 |
|
*/ |
577 |
|
|
578 |
|
static const struct backend * |
579 |
175 |
via_resolve(VRT_CTX, const struct vrt_endpoint *vep, VCL_BACKEND via) |
580 |
|
{ |
581 |
175 |
const struct backend *viabe = NULL; |
582 |
|
|
583 |
175 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
584 |
175 |
CHECK_OBJ_NOTNULL(via, DIRECTOR_MAGIC); |
585 |
|
|
586 |
175 |
if (vep->uds_path) { |
587 |
0 |
VRT_fail(ctx, "Via is only supported for IP addresses"); |
588 |
0 |
return (NULL); |
589 |
|
} |
590 |
|
|
591 |
175 |
via = VRT_DirectorResolve(ctx, via); |
592 |
|
|
593 |
175 |
if (via == NULL) { |
594 |
0 |
VRT_fail(ctx, "Via resolution failed"); |
595 |
0 |
return (NULL); |
596 |
|
} |
597 |
|
|
598 |
175 |
CHECK_OBJ(via, DIRECTOR_MAGIC); |
599 |
175 |
CHECK_OBJ_NOTNULL(via->vdir, VCLDIR_MAGIC); |
600 |
|
|
601 |
175 |
if (via->vdir->methods == vbe_methods || |
602 |
175 |
via->vdir->methods == vbe_methods_noprobe) |
603 |
175 |
CAST_OBJ_NOTNULL(viabe, via->priv, BACKEND_MAGIC); |
604 |
|
|
605 |
175 |
if (viabe == NULL) |
606 |
0 |
VRT_fail(ctx, "Via does not resolve to a backend"); |
607 |
|
|
608 |
175 |
return (viabe); |
609 |
175 |
} |
610 |
|
|
611 |
|
/* |
612 |
|
* construct a new endpoint identical to vep with sa in a proxy header |
613 |
|
*/ |
614 |
|
static struct vrt_endpoint * |
615 |
175 |
via_endpoint(const struct vrt_endpoint *vep, const struct suckaddr *sa, |
616 |
|
const char *auth) |
617 |
|
{ |
618 |
|
struct vsb *preamble; |
619 |
|
struct vrt_blob blob[1]; |
620 |
|
struct vrt_endpoint *nvep, *ret; |
621 |
|
const struct suckaddr *client_bogo; |
622 |
|
|
623 |
175 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
624 |
175 |
AN(sa); |
625 |
|
|
626 |
175 |
nvep = VRT_Endpoint_Clone(vep); |
627 |
175 |
CHECK_OBJ_NOTNULL(nvep, VRT_ENDPOINT_MAGIC); |
628 |
|
|
629 |
175 |
if (VSA_Get_Proto(sa) == AF_INET6) |
630 |
0 |
client_bogo = bogo_ip6; |
631 |
|
else |
632 |
175 |
client_bogo = bogo_ip; |
633 |
|
|
634 |
175 |
preamble = VSB_new_auto(); |
635 |
175 |
AN(preamble); |
636 |
175 |
VPX_Format_Proxy(preamble, 2, client_bogo, sa, auth); |
637 |
175 |
blob->blob = VSB_data(preamble); |
638 |
175 |
blob->len = VSB_len(preamble); |
639 |
175 |
nvep->preamble = blob; |
640 |
175 |
ret = VRT_Endpoint_Clone(nvep); |
641 |
175 |
CHECK_OBJ_NOTNULL(ret, VRT_ENDPOINT_MAGIC); |
642 |
175 |
VSB_destroy(&preamble); |
643 |
175 |
FREE_OBJ(nvep); |
644 |
|
|
645 |
175 |
return (ret); |
646 |
|
} |
647 |
|
|
648 |
|
VCL_BACKEND |
649 |
31650 |
VRT_new_backend_clustered(VRT_CTX, struct vsmw_cluster *vc, |
650 |
|
const struct vrt_backend *vrt, VCL_BACKEND via) |
651 |
|
{ |
652 |
|
struct backend *be; |
653 |
|
struct vcl *vcl; |
654 |
|
const struct vrt_backend_probe *vbp; |
655 |
|
const struct vrt_endpoint *vep; |
656 |
|
const struct vdi_methods *m; |
657 |
31650 |
const struct suckaddr *sa = NULL; |
658 |
|
char abuf[VTCP_ADDRBUFSIZE]; |
659 |
31650 |
const struct backend *viabe = NULL; |
660 |
|
|
661 |
31650 |
CHECK_OBJ_NOTNULL(ctx, VRT_CTX_MAGIC); |
662 |
31650 |
CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC); |
663 |
31650 |
vep = vrt->endpoint; |
664 |
31650 |
CHECK_OBJ_NOTNULL(vep, VRT_ENDPOINT_MAGIC); |
665 |
31650 |
if (vep->uds_path == NULL) { |
666 |
30500 |
if (vep->ipv4 == NULL && vep->ipv6 == NULL) { |
667 |
0 |
VRT_fail(ctx, "%s: Illegal IP", __func__); |
668 |
0 |
return (NULL); |
669 |
|
} |
670 |
30500 |
} else { |
671 |
1150 |
assert(vep->ipv4== NULL && vep->ipv6== NULL); |
672 |
|
} |
673 |
|
|
674 |
31650 |
if (via != NULL) { |
675 |
175 |
viabe = via_resolve(ctx, vep, via); |
676 |
175 |
if (viabe == NULL) |
677 |
0 |
return (NULL); |
678 |
175 |
} |
679 |
|
|
680 |
31650 |
vcl = ctx->vcl; |
681 |
31650 |
AN(vcl); |
682 |
31650 |
AN(vrt->vcl_name); |
683 |
|
|
684 |
|
/* Create new backend */ |
685 |
31650 |
ALLOC_OBJ(be, BACKEND_MAGIC); |
686 |
31650 |
if (be == NULL) |
687 |
0 |
return (NULL); |
688 |
|
|
689 |
|
#define DA(x) do { if (vrt->x != NULL) REPLACE((be->x), (vrt->x)); } while (0) |
690 |
|
#define DN(x) do { be->x = vrt->x; } while (0) |
691 |
31650 |
VRT_BACKEND_HANDLE(); |
692 |
|
#undef DA |
693 |
|
#undef DN |
694 |
|
|
695 |
31600 |
if (viabe || be->hosthdr == NULL) { |
696 |
200 |
if (vrt->endpoint->uds_path != NULL) |
697 |
25 |
sa = bogo_ip; |
698 |
175 |
else if (cache_param->prefer_ipv6 && vep->ipv6 != NULL) |
699 |
0 |
sa = vep->ipv6; |
700 |
175 |
else if (vep->ipv4!= NULL) |
701 |
175 |
sa = vep->ipv4; |
702 |
|
else |
703 |
0 |
sa = vep->ipv6; |
704 |
200 |
if (be->hosthdr == NULL) { |
705 |
25 |
VTCP_name(sa, abuf, sizeof abuf, NULL, 0); |
706 |
25 |
REPLACE(be->hosthdr, abuf); |
707 |
25 |
} |
708 |
200 |
} |
709 |
|
|
710 |
63200 |
be->vsc = VSC_vbe_New(vc, &be->vsc_seg, |
711 |
31600 |
"%s.%s", VCL_Name(ctx->vcl), vrt->vcl_name); |
712 |
31600 |
AN(be->vsc); |
713 |
31600 |
if (! vcl->temp->is_warm) |
714 |
31275 |
VRT_VSC_Hide(be->vsc_seg); |
715 |
|
|
716 |
31600 |
if (viabe) |
717 |
300 |
vep = be->endpoint = via_endpoint(viabe->endpoint, sa, |
718 |
150 |
be->authority); |
719 |
|
else |
720 |
31450 |
vep = be->endpoint = VRT_Endpoint_Clone(vep); |
721 |
|
|
722 |
31600 |
AN(vep); |
723 |
31600 |
be->conn_pool = VCP_Ref(vep, vbe_proto_ident); |
724 |
31600 |
AN(be->conn_pool); |
725 |
|
|
726 |
31600 |
vbp = vrt->probe; |
727 |
31600 |
if (vbp == NULL) |
728 |
31000 |
vbp = VCL_DefaultProbe(vcl); |
729 |
|
|
730 |
31600 |
if (vbp != NULL) { |
731 |
825 |
VBP_Insert(be, vbp, be->conn_pool); |
732 |
825 |
m = vbe_methods; |
733 |
825 |
} else { |
734 |
30775 |
be->sick = 0; |
735 |
30775 |
m = vbe_methods_noprobe; |
736 |
|
} |
737 |
|
|
738 |
31600 |
Lck_Lock(&backends_mtx); |
739 |
31600 |
VSC_C_main->n_backend++; |
740 |
31600 |
Lck_Unlock(&backends_mtx); |
741 |
|
|
742 |
31600 |
be->director = VRT_AddDirector(ctx, m, be, "%s", vrt->vcl_name); |
743 |
|
|
744 |
31600 |
if (be->director == NULL) { |
745 |
0 |
vbe_free(be); |
746 |
0 |
return (NULL); |
747 |
|
} |
748 |
|
/* for cold VCL, update initial director state */ |
749 |
31600 |
if (be->probe != NULL) |
750 |
850 |
VBP_Update_Backend(be->probe); |
751 |
31600 |
return (be->director); |
752 |
31600 |
} |
753 |
|
|
754 |
|
VCL_BACKEND |
755 |
800 |
VRT_new_backend(VRT_CTX, const struct vrt_backend *vrt, VCL_BACKEND via) |
756 |
|
{ |
757 |
|
|
758 |
800 |
CHECK_OBJ_NOTNULL(vrt, VRT_BACKEND_MAGIC); |
759 |
800 |
CHECK_OBJ_NOTNULL(vrt->endpoint, VRT_ENDPOINT_MAGIC); |
760 |
800 |
return (VRT_new_backend_clustered(ctx, NULL, vrt, via)); |
761 |
|
} |
762 |
|
|
763 |
|
/*-------------------------------------------------------------------- |
764 |
|
* Delete a dynamic director::backend instance. Undeleted dynamic and |
765 |
|
* static instances are GC'ed when the VCL is discarded (in cache_vcl.c) |
766 |
|
*/ |
767 |
|
|
768 |
|
void |
769 |
1896 |
VRT_delete_backend(VRT_CTX, VCL_BACKEND *dp) |
770 |
|
{ |
771 |
|
|
772 |
1896 |
(void)ctx; |
773 |
1896 |
CHECK_OBJ_NOTNULL(*dp, DIRECTOR_MAGIC); |
774 |
1896 |
VRT_DisableDirector(*dp); |
775 |
1896 |
VRT_Assign_Backend(dp, NULL); |
776 |
1896 |
} |
777 |
|
|
778 |
|
/*---------------------------------------------------------------------*/ |
779 |
|
|
780 |
|
void |
781 |
22197 |
VBE_InitCfg(void) |
782 |
|
{ |
783 |
|
|
784 |
22197 |
Lck_New(&backends_mtx, lck_vbe); |
785 |
22197 |
} |