Nginx的轮询算法

1、概述

Nginx轮询算法应用于http模块和stream模块的upstream块,根据权重选择相应的服务器进行负载均衡。

1.1、相关配置

1
2
3
4
5
6
7
8
9
10
11

http {

upstream backend {
server 127.0.0.1:8080 weight=10;
server 192.168.1.2:8080 weight=10;
}

}

>若upstream块中没有指定负载均衡算法,则默认使用轮询算法。

2、算法实现

Nginx中http_upstream_module跟stream_upstream_module模块都使用了轮询算法,这里以stream_upstrean模块为例

2.1、相关结构体

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
typedef struct {
ngx_stream_upstream_rr_peers_t *peers;
ngx_stream_upstream_rr_peer_t *current;
uintptr_t *tried; //若后端服务器数量大于32,则使用此字段表示各个服务器是否使用
uintptr_t data; //若后端服务器数量小于32,则使用此字段表示各个服务器是否使用
} ngx_stream_upstream_rr_peer_data_t;


typedef struct ngx_stream_upstream_rr_peer_s ngx_stream_upstream_rr_peer_t;

struct ngx_stream_upstream_rr_peer_s {
struct sockaddr *sockaddr;
socklen_t socklen;
ngx_str_t name; //服务器名称,一般是ip地址
ngx_str_t server;

ngx_int_t current_weight;
ngx_int_t effective_weight; //RR算法权重,初始值与weight相同
ngx_int_t weight; //配置的权重

ngx_uint_t conns; //该后端peer上面的成功连接数

ngx_uint_t fails; //已尝试失败次数
time_t accessed;
time_t checked;

ngx_uint_t max_fails; //配置的max_fails阈值
time_t fail_timeout;

ngx_uint_t down; /* unsigned down:1; */

#if (NGX_STREAM_SSL)
void *ssl_session;
int ssl_session_len;
#endif

ngx_stream_upstream_rr_peer_t *next;

#if (NGX_STREAM_UPSTREAM_ZONE)
ngx_atomic_t lock;
#endif
};


typedef struct ngx_stream_upstream_rr_peers_s ngx_stream_upstream_rr_peers_t;

struct ngx_stream_upstream_rr_peers_s {
ngx_uint_t number; //服务器数量 为后端配置了多少个服务器

#if (NGX_STREAM_UPSTREAM_ZONE)
ngx_slab_pool_t *shpool;
ngx_atomic_t rwlock;
#endif

ngx_uint_t total_weight; //所有服务器的权重和

unsigned single:1; //是否只有一个服务器
unsigned weighted:1;

ngx_str_t *name;

ngx_stream_upstream_rr_peers_t *next;

ngx_stream_upstream_rr_peer_t *peer; //服务器信息 //所有的peer[]服务器信息通过peers->peer连接在一起
};

上述3个结构体是整个轮询算法的核心,ngx_stream_upstream_rr_peer_data_t结构体保存已经选择过的服务器以及当前选择的服务器,ngx_stream_upstream_rr_peers_t结构体中几个weight成员用来选择服务器。

2.1 初始化ngx_stream_upstream_rr_peer_t结构体

Nginx在解析配置文件时,当解析到upstream块时,会调用ngx_stream_upstream_init_round_robin函数,将配置文件中的server保存在ngx_stream_upstream_rr_peer_t结构体中。接下来看看ngx_stream_upstream_init_round_robin函数定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210

ngx_int_t
ngx_stream_upstream_init_round_robin(ngx_conf_t *cf,
ngx_stream_upstream_srv_conf_t *us)
{
ngx_url_t u;
ngx_uint_t i, j, n, w;
ngx_stream_upstream_server_t *server;
ngx_stream_upstream_rr_peer_t *peer, **peerp;
ngx_stream_upstream_rr_peers_t *peers, *backup;//一个是正常的servers,一个是backup servers

us->peer.init = ngx_stream_upstream_init_round_robin_peer;//upstream中服务器节点的初始化赋值

if (us->servers) {
server = us->servers->elts;

n = 0;
w = 0;

for (i = 0; i < us->servers->nelts; i++) {
if (server[i].backup) {
continue;
}

n += server[i].naddrs;
w += server[i].naddrs * server[i].weight;
}

if (n == 0) {
ngx_log_error(NGX_LOG_EMERG, cf->log, 0,
"no servers in upstream \"%V\" in %s:%ui",
&us->host, us->file_name, us->line);
return NGX_ERROR;
}

peers = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peers_t));
if (peers == NULL) {
return NGX_ERROR;
}

peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) * n);
if (peer == NULL) {
return NGX_ERROR;
}

peers->single = (n == 1);
peers->number = n;
peers->weighted = (w != n);
peers->total_weight = w;
peers->name = &us->host;

n = 0;
peerp = &peers->peer;
//初始化peer数据
for (i = 0; i < us->servers->nelts; i++) {
if (server[i].backup) {
continue;
}

for (j = 0; j < server[i].naddrs; j++) {
peer[n].sockaddr = server[i].addrs[j].sockaddr;
peer[n].socklen = server[i].addrs[j].socklen;
peer[n].name = server[i].addrs[j].name;
peer[n].weight = server[i].weight;
peer[n].effective_weight = server[i].weight;
peer[n].current_weight = 0;
peer[n].max_fails = server[i].max_fails;
peer[n].fail_timeout = server[i].fail_timeout;
peer[n].down = server[i].down;
peer[n].server = server[i].name;

*peerp = &peer[n];
peerp = &peer[n].next;
n++;
}
}

us->peer.data = peers;

/* backup servers */

n = 0;
w = 0;

for (i = 0; i < us->servers->nelts; i++) {
if (!server[i].backup) {
continue;
}

n += server[i].naddrs;
w += server[i].naddrs * server[i].weight;
}

if (n == 0) {
return NGX_OK;
}

backup = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peers_t));
if (backup == NULL) {
return NGX_ERROR;
}

peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) * n);
if (peer == NULL) {
return NGX_ERROR;
}

peers->single = 0;
backup->single = 0;
backup->number = n;
backup->weighted = (w != n);
backup->total_weight = w;
backup->name = &us->host;

n = 0;
peerp = &backup->peer;

for (i = 0; i < us->servers->nelts; i++) {
if (!server[i].backup) {
continue;
}

for (j = 0; j < server[i].naddrs; j++) {
peer[n].sockaddr = server[i].addrs[j].sockaddr;
peer[n].socklen = server[i].addrs[j].socklen;
peer[n].name = server[i].addrs[j].name;
peer[n].weight = server[i].weight;
peer[n].effective_weight = server[i].weight;
peer[n].current_weight = 0;
peer[n].max_fails = server[i].max_fails;
peer[n].fail_timeout = server[i].fail_timeout;
peer[n].down = server[i].down;
peer[n].server = server[i].name;

*peerp = &peer[n];
peerp = &peer[n].next;
n++;
}
}

peers->next = backup;

return NGX_OK;
}


/* an upstream implicitly defined by proxy_pass, etc. */

if (us->port == 0) {
ngx_log_error(NGX_LOG_EMERG, cf->log, 0,
"no port in upstream \"%V\" in %s:%ui",
&us->host, us->file_name, us->line);
return NGX_ERROR;
}

ngx_memzero(&u, sizeof(ngx_url_t));

u.host = us->host;
u.port = us->port;

if (ngx_inet_resolve_host(cf->pool, &u) != NGX_OK) {
if (u.err) {
ngx_log_error(NGX_LOG_EMERG, cf->log, 0,
"%s in upstream \"%V\" in %s:%ui",
u.err, &us->host, us->file_name, us->line);
}

return NGX_ERROR;
}

n = u.naddrs;

peers = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peers_t));
if (peers == NULL) {
return NGX_ERROR;
}

peer = ngx_pcalloc(cf->pool, sizeof(ngx_stream_upstream_rr_peer_t) * n);
if (peer == NULL) {
return NGX_ERROR;
}

peers->single = (n == 1);
peers->number = n;
peers->weighted = 0;
peers->total_weight = n;
peers->name = &us->host;

peerp = &peers->peer;

for (i = 0; i < u.naddrs; i++) {
peer[i].sockaddr = u.addrs[i].sockaddr;
peer[i].socklen = u.addrs[i].socklen;
peer[i].name = u.addrs[i].name;
peer[i].weight = 1;
peer[i].effective_weight = 1;
peer[i].current_weight = 0;
peer[i].max_fails = 1;
peer[i].fail_timeout = 10;
*peerp = &peer[i];
peerp = &peer[i].next;
}

us->peer.data = peers;

/* implicitly defined upstream has no backup servers */

return NGX_OK;
}

  1. 如果没有指定其他负载均衡算法,则此函数在配置解析阶段执行,由ngx_stream_upstream_init_main_conf函数调用执行。
  2. 将配置中的后端服务器使用peer连接起来,对backup服务器采取同样的操作。

2.2 获取一个选中的server

选择一个上游peer,主要由ngx_http_upstream_get_round_robin_peer实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
ngx_int_t
ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, void *data)
{
ngx_http_upstream_rr_peer_data_t *rrp = data;

ngx_int_t rc;
ngx_uint_t i, n;
ngx_http_upstream_rr_peer_t *peer;
ngx_http_upstream_rr_peers_t *peers;

ngx_log_debug1(NGX_LOG_DEBUG_HTTP, pc->log, 0,
"get rr peer, try: %ui", pc->tries);

pc->cached = 0;
pc->connection = NULL;

peers = rrp->peers;
//如果启用了zone,则加锁
ngx_http_upstream_rr_peers_wlock(peers);
//只配置了一个server,则直接使用
if (peers->single) {
peer = peers->peer;
//单个server被标记为down,不可用
if (peer->down) {
goto failed;
}

if (peer->max_conns && peer->conns >= peer->max_conns) {
goto failed;
}

rrp->current = peer;

} else {

/* there are several peers */
// 调用选择peer的函数。
peer = ngx_http_upstream_get_peer(rrp);

if (peer == NULL) {
goto failed;
}

ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0,
"get rr peer, current: %p %i",
peer, peer->current_weight);
}

// 赋值socket数据
pc->sockaddr = peer->sockaddr;
pc->socklen = peer->socklen;
pc->name = &peer->name;

//此peer上的连接数自增
peer->conns++;

// 解锁
ngx_http_upstream_rr_peers_unlock(peers);

return NGX_OK;

// 错误处理
failed:

if (peers->next) {

ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "backup servers");

rrp->peers = peers->next;

n = (rrp->peers->number + (8 * sizeof(uintptr_t) - 1))
/ (8 * sizeof(uintptr_t));

for (i = 0; i < n; i++) {
rrp->tried[i] = 0;
}

ngx_http_upstream_rr_peers_unlock(peers);

rc = ngx_http_upstream_get_round_robin_peer(pc, rrp);

if (rc != NGX_BUSY) {
return rc;
}

ngx_http_upstream_rr_peers_wlock(peers);
}

ngx_http_upstream_rr_peers_unlock(peers);

pc->name = peers->name;

return NGX_BUSY;
}


//轮询的核心算法
static ngx_http_upstream_rr_peer_t *
ngx_http_upstream_get_peer(ngx_http_upstream_rr_peer_data_t *rrp)
{
time_t now;
uintptr_t m;
ngx_int_t total;
ngx_uint_t i, n, p;
ngx_http_upstream_rr_peer_t *peer, *best;

now = ngx_time();

best = NULL;
total = 0;

#if (NGX_SUPPRESS_WARN)
p = 0;
#endif

for (peer = rrp->peers->peer, i = 0;
peer;
peer = peer->next, i++)
{
//查找已选择过的peer
n = i / (8 * sizeof(uintptr_t));
m = (uintptr_t) 1 << i % (8 * sizeof(uintptr_t));
//已选择过,跳过
if (rrp->tried[n] & m) {
continue;
}
//被标记为不可用,跳过
if (peer->down) {
continue;
}
//失败次数超过阈值,跳过
if (peer->max_fails
&& peer->fails >= peer->max_fails
&& now - peer->checked <= peer->fail_timeout)
{
continue;
}
//最大连接数超过配置,跳过
if (peer->max_conns && peer->conns >= peer->max_conns) {
continue;
}
//调整当前peer权重
peer->current_weight += peer->effective_weight;
total += peer->effective_weight;

if (peer->effective_weight < peer->weight) {
peer->effective_weight++;
}
//根据权重选择
if (best == NULL || peer->current_weight > best->current_weight) {
best = peer;
p = i;
}
}
//没有选择到合适的peer,退出
if (best == NULL) {
return NULL;
}

rrp->current = best;

n = p / (8 * sizeof(uintptr_t));
m = (uintptr_t) 1 << p % (8 * sizeof(uintptr_t));
//标记已选中
rrp->tried[n] |= m;
//调整选中节点的权重
best->current_weight -= total;
//调整不可用时间
if (now - best->checked > best->fail_timeout) {
best->checked = now;
}

return best;
}



2.3 释放server

释放上游函数主要由ngx_http_upstream_free_round_robin_peer实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
void
ngx_http_upstream_free_round_robin_peer(ngx_peer_connection_t *pc, void *data,
ngx_uint_t state)
{
ngx_http_upstream_rr_peer_data_t *rrp = data;

time_t now;
ngx_http_upstream_rr_peer_t *peer;

ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0,
"free rr peer %ui %ui", pc->tries, state);

/* TODO: NGX_PEER_KEEPALIVE */

peer = rrp->current;

ngx_http_upstream_rr_peers_rlock(rrp->peers);
ngx_http_upstream_rr_peer_lock(rrp->peers, peer);

// 单个peer情况
if (rrp->peers->single) {

peer->conns--;

ngx_http_upstream_rr_peer_unlock(rrp->peers, peer);
ngx_http_upstream_rr_peers_unlock(rrp->peers);

pc->tries = 0;
return;
}
//如果以失败的状态进入此函数,调整权重
if (state & NGX_PEER_FAILED) {
now = ngx_time();

peer->fails++;
peer->accessed = now;
peer->checked = now;
//调整权重
if (peer->max_fails) {
peer->effective_weight -= peer->weight / peer->max_fails;

if (peer->fails >= peer->max_fails) {
ngx_log_error(NGX_LOG_WARN, pc->log, 0,
"upstream server temporarily disabled");
}
}

ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0,
"free rr peer failed: %p %i",
peer, peer->effective_weight);
//调整权重
if (peer->effective_weight < 0) {
peer->effective_weight = 0;
}

} else {

/* mark peer live if check passed */

if (peer->accessed < peer->checked) {
peer->fails = 0;
}
}


//此上游连接数自减
peer->conns--;

// 若启用了zone模块,则解锁
ngx_http_upstream_rr_peer_unlock(rrp->peers, peer);
ngx_http_upstream_rr_peers_unlock(rrp->peers);

// 重试次数自减
if (pc->tries) {
pc->tries--;
}
}

3、总结

Nginx链接自定义静态库

1.准备静态库

1.1 编译静态库

使用gcc将源代码编译为目标文件

1
2
3

gcc -c test.c

-c选项只编译不链接,生成目标对象文件,编译成功会生成test.o文件,接着对test.o文件进行打包

1.2 打包静态库

使用ar命令将test.o打包为静态库文件

1
2
3

ar -crv libtest.a test.o

在定义静态库文件文件名时,需要遵从命名规则。

2.配置静态库

将生成的libtest.a 复制到/usr/lib64目录下,如有头文件,则复制到/usr/include目录下

3.修改Nginx编译脚本

在auto/unix脚本中修改变量CORE_LIBS的值

1
2
3

CORE_LIBS ="$CORE_LIBS -ltest"

4. 使用第三方开发库

4.1 安装第三方开发库

1
2
3

yum -y install xxx xxx-devel

安装成功后,在/usr/include/目录下能找到相应的头文件,在/usr/lib64/目录下能找到类似于libxxx.so的文件

4.2 在源码中包含第三方库的头文件,就可以使用相应的函数了

4.3 修改Nginx编译脚本

在auto/unix脚本中修改变量CORE_LIBS的值

1
2
3

CORE_LIBS ="$CORE_LIBS -lxxx"

Nginx的rewrite模块详解

1.相关指令

1.1 if指令

1
Context: server, location

依据指定的条件决定是否执行 if 块语句中的内容

1.2 break指令

1
Context: server, location, if

停止执行 ngx_http_rewrite_module 的指令集,但是其他模块指令是不受影响的

1.3 rewrite指令

1
2
3
Context: server, location, if

rewrite regex replacement [flag];

rewrite 指令是使用指定的正则表达式regex来匹配请求的urI,如果匹配成功,则使用replacement更改URI。rewrite指令按照它们在配置文件中出现的顺序执行。可以使用flag标志来终止指令的进一步处理。如果替换字符串replacement以http://,https://或$scheme开头,则停止处理后续内容,并直接重定向返回给客户端。

1.4 return指令

1
2
3
4
5
6
Context: server, location, if

return code [text];
return code URL;
return URL;

停止处理并将指定的code码返回给客户端。 非标准code码 444 关闭连接而不发送响应报头

2.源码解析

2.1 rewrite指令源码解析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139

static char *
ngx_http_rewrite(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_rewrite_loc_conf_t *lcf = conf;

ngx_str_t *value;
ngx_uint_t last;
ngx_regex_compile_t rc;
ngx_http_script_code_pt *code;
ngx_http_script_compile_t sc;
ngx_http_script_regex_code_t *regex;
ngx_http_script_regex_end_code_t *regex_end;
u_char errstr[NGX_MAX_CONF_ERRSTR];

regex = ngx_http_script_start_code(cf->pool, &lcf->codes,
sizeof(ngx_http_script_regex_code_t));
if (regex == NULL) {
return NGX_CONF_ERROR;
}

ngx_memzero(regex, sizeof(ngx_http_script_regex_code_t));

value = cf->args->elts;

ngx_memzero(&rc, sizeof(ngx_regex_compile_t));

rc.pattern = value[1];
rc.err.len = NGX_MAX_CONF_ERRSTR;
rc.err.data = errstr;

/* TODO: NGX_REGEX_CASELESS */
//解析正则表达式,填写ngx_http_regex_t结构并返回。正则句柄,命名子模式等都在里面了。
regex->regex = ngx_http_regex_compile(cf, &rc);
if (regex->regex == NULL) {
return NGX_CONF_ERROR;
}
//将其设置为第一个code函数,求出目标字符串大小。尾部还有ngx_http_script_regex_end_code
regex->code = ngx_http_script_regex_start_code;
regex->uri = 1;
regex->name = value[1];

if (value[2].data[value[2].len - 1] == '?') {

/* the last "?" drops the original arguments */
value[2].len--;

} else {
regex->add_args = 1;
}

last = 0;

if (ngx_strncmp(value[2].data, "http://", sizeof("http://") - 1) == 0
|| ngx_strncmp(value[2].data, "https://", sizeof("https://") - 1) == 0
|| ngx_strncmp(value[2].data, "$scheme", sizeof("$scheme") - 1) == 0)
{
regex->status = NGX_HTTP_MOVED_TEMPORARILY;
regex->redirect = 1;
last = 1;
}

if (cf->args->nelts == 4) {
if (ngx_strcmp(value[3].data, "last") == 0) {
last = 1;

} else if (ngx_strcmp(value[3].data, "break") == 0) {
regex->break_cycle = 1;
last = 1;

} else if (ngx_strcmp(value[3].data, "redirect") == 0) {
regex->status = NGX_HTTP_MOVED_TEMPORARILY;
regex->redirect = 1;
last = 1;

} else if (ngx_strcmp(value[3].data, "permanent") == 0) {
regex->status = NGX_HTTP_MOVED_PERMANENTLY;
regex->redirect = 1;
last = 1;

} else {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid parameter \"%V\"", &value[3]);
return NGX_CONF_ERROR;
}
}

ngx_memzero(&sc, sizeof(ngx_http_script_compile_t));

sc.cf = cf;
sc.source = &value[2];
sc.lengths = &regex->lengths;
sc.values = &lcf->codes;
sc.variables = ngx_http_script_variables_count(&value[2]);
sc.main = regex;
sc.complete_lengths = 1;
sc.compile_args = !regex->redirect;

if (ngx_http_script_compile(&sc) != NGX_OK) {
return NGX_CONF_ERROR;
}

regex = sc.main;

regex->size = sc.size;
regex->args = sc.args;

if (sc.variables == 0 && !sc.dup_capture) {
regex->lengths = NULL;
}

regex_end = ngx_http_script_add_code(lcf->codes,
sizeof(ngx_http_script_regex_end_code_t),
&regex);
if (regex_end == NULL) {
return NGX_CONF_ERROR;
}

regex_end->code = ngx_http_script_regex_end_code; //结束回调,对应前面的开始。
regex_end->uri = regex->uri;
regex_end->args = regex->args;
regex_end->add_args = regex->add_args;
regex_end->redirect = regex->redirect;

if (last) {
code = ngx_http_script_add_code(lcf->codes, sizeof(uintptr_t), &regex);
if (code == NULL) {
return NGX_CONF_ERROR;
}

*code = NULL;
}
//下一个解析句柄组的地址。如果匹配失败,则会直接跳过该regex匹配相关的所有code
regex->next = (u_char *) lcf->codes->elts + lcf->codes->nelts
- (u_char *) regex;

return NGX_CONF_OK;
}

2.2 return指令源码解析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
static char *
ngx_http_rewrite_return(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_rewrite_loc_conf_t *lcf = conf;

u_char *p;
ngx_str_t *value, *v;
ngx_http_script_return_code_t *ret;
ngx_http_compile_complex_value_t ccv;

ret = ngx_http_script_start_code(cf->pool, &lcf->codes,
sizeof(ngx_http_script_return_code_t));
if (ret == NULL) {
return NGX_CONF_ERROR;
}

value = cf->args->elts;

ngx_memzero(ret, sizeof(ngx_http_script_return_code_t));
//注册code为ngx_http_script_return_code
ret->code = ngx_http_script_return_code;

p = value[1].data;

ret->status = ngx_atoi(p, value[1].len);

if (ret->status == (uintptr_t) NGX_ERROR) {

if (cf->args->nelts == 2
&& (ngx_strncmp(p, "http://", sizeof("http://") - 1) == 0
|| ngx_strncmp(p, "https://", sizeof("https://") - 1) == 0
|| ngx_strncmp(p, "$scheme", sizeof("$scheme") - 1) == 0))
{
ret->status = NGX_HTTP_MOVED_TEMPORARILY;
v = &value[1];

} else {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid return code \"%V\"", &value[1]);
return NGX_CONF_ERROR;
}

} else {

if (ret->status > 999) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid return code \"%V\"", &value[1]);
return NGX_CONF_ERROR;
}

if (cf->args->nelts == 2) {
return NGX_CONF_OK;
}

v = &value[2];
}

ngx_memzero(&ccv, sizeof(ngx_http_compile_complex_value_t));

ccv.cf = cf;
ccv.value = v;
ccv.complex_value = &ret->text;

if (ngx_http_compile_complex_value(&ccv) != NGX_OK) {
return NGX_CONF_ERROR;
}

return NGX_CONF_OK;
}

2.3 break指令源码解析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18

static char *
ngx_http_rewrite_break(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_rewrite_loc_conf_t *lcf = conf;

ngx_http_script_code_pt *code;

code = ngx_http_script_start_code(cf->pool, &lcf->codes, sizeof(uintptr_t));
if (code == NULL) {
return NGX_CONF_ERROR;
}

*code = ngx_http_script_break_code;

return NGX_CONF_OK;
}

配合ngx_http_rewrite_handler读代码,可以看到如果设置一个code节点到codes数组,那么在ngx_http_rewrite_handler的for循环执行到该节点code的时候,就会把e->ip置为NULL,这样就直接退出while (*(uintptr_t *) e->ip){}循环

2.4 if指令源码解析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

static char *
ngx_http_rewrite_if(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_rewrite_loc_conf_t *lcf = conf;

void *mconf;
char *rv;
u_char *elts;
ngx_uint_t i;
ngx_conf_t save;
ngx_http_module_t *module;
ngx_http_conf_ctx_t *ctx, *pctx;
ngx_http_core_loc_conf_t *clcf, *pclcf;
ngx_http_script_if_code_t *if_code;
ngx_http_rewrite_loc_conf_t *nlcf;

//if的解析过程和location{}解析过程差不多,也有ctx
ctx = ngx_pcalloc(cf->pool, sizeof(ngx_http_conf_ctx_t));
if (ctx == NULL) {
return NGX_CONF_ERROR;
}

pctx = cf->ctx; //父块{}的上下文ctx
ctx->main_conf = pctx->main_conf;
ctx->srv_conf = pctx->srv_conf;

ctx->loc_conf = ngx_pcalloc(cf->pool, sizeof(void *) * ngx_http_max_module);
if (ctx->loc_conf == NULL) {
return NGX_CONF_ERROR;
}

for (i = 0; ngx_modules[i]; i++) {
if (ngx_modules[i]->type != NGX_HTTP_MODULE) {
continue;
}

module = ngx_modules[i]->ctx;

if (module->create_loc_conf) {
/*
在解析if时, nginx会把它当做一个location来对待的,并且它的location type为noname。通过ngx_http_add_location将该“location”添
加到上层的locations中。这里将if看做location自然有它的合理性,因为if的配置也是需要进行url匹配的。
*/
mconf = module->create_loc_conf(cf);
if (mconf == NULL) {
return NGX_CONF_ERROR;
}

ctx->loc_conf[ngx_modules[i]->ctx_index] = mconf;
}
}

pclcf = pctx->loc_conf[ngx_http_core_module.ctx_index];//该if{}所在location{}的配置信息

clcf = ctx->loc_conf[ngx_http_core_module.ctx_index]; //if{}的配置信息
clcf->loc_conf = ctx->loc_conf;
clcf->name = pclcf->name;
clcf->noname = 1; //if配置被作为location的noname形式

if (ngx_http_add_location(cf, &pclcf->locations, clcf) != NGX_OK) {
return NGX_CONF_ERROR;
}

if (ngx_http_rewrite_if_condition(cf, lcf) != NGX_CONF_OK) {
return NGX_CONF_ERROR;
}

if_code = ngx_array_push_n(lcf->codes, sizeof(ngx_http_script_if_code_t));
if (if_code == NULL) {
return NGX_CONF_ERROR;
}

if_code->code = ngx_http_script_if_code;

elts = lcf->codes->elts;


/* the inner directives must be compiled to the same code array */

nlcf = ctx->loc_conf[ngx_http_rewrite_module.ctx_index];
nlcf->codes = lcf->codes;


save = *cf;
cf->ctx = ctx;

if (pclcf->name.len == 0) {
if_code->loc_conf = NULL;
cf->cmd_type = NGX_HTTP_SIF_CONF;

} else {
if_code->loc_conf = ctx->loc_conf;
cf->cmd_type = NGX_HTTP_LIF_CONF;
}

rv = ngx_conf_parse(cf, NULL);

*cf = save;

if (rv != NGX_CONF_OK) {
return rv;
}


if (elts != lcf->codes->elts) {
if_code = (ngx_http_script_if_code_t *)
((u_char *) if_code + ((u_char *) lcf->codes->elts - elts));
}

if_code->next = (u_char *) lcf->codes->elts + lcf->codes->nelts
- (u_char *) if_code;

/* the code array belong to parent block */

nlcf->codes = NULL;

return NGX_CONF_OK;
}

2.5 set指令源码解析

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

/*Syntax: set $variable value
1. 将$variable加入到变量系统中,cmcf->variables_keys->keys和cmcf->variables。


a. 如果value是简单字符串,那么解析之后,lcf->codes就会追加这样的到后面:
ngx_http_script_value_code 直接简单字符串指向一下就行,都不用拷贝了。
b. 如果value是复杂的包含变量的串,那么lcf->codes就会追加如下的进去 :
ngx_http_script_complex_value_code 调用lengths的lcode获取组合字符串的总长度,并且申请内存
lengths
values,这里根据表达式的不同而不同。 分别将value代表的复杂表达式拆分成语法单元,进行一个个求值,并合并在一起。
ngx_http_script_set_var_code 负责将上述合并出的最终结果设置到variables[]数组中去。
*/
static char *
ngx_http_rewrite_set(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_rewrite_loc_conf_t *lcf = conf;

ngx_int_t index;
ngx_str_t *value;
ngx_http_variable_t *v;
ngx_http_script_var_code_t *vcode;
ngx_http_script_var_handler_code_t *vhcode;

value = cf->args->elts;

if (value[1].data[0] != '$') {//变量必须以$开头
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"invalid variable name \"%V\"", &value[1]);
return NGX_CONF_ERROR;
}

value[1].len--;
value[1].data++;
//下面根据这个变量名,将其加入到cmcf->variables_keys->keys里面。
v = ngx_http_add_variable(cf, &value[1], NGX_HTTP_VAR_CHANGEABLE);
if (v == NULL) {
return NGX_CONF_ERROR;
}
//将其加入到cmcf->variables里面,并返回其下标
index = ngx_http_get_variable_index(cf, &value[1]);
if (index == NGX_ERROR) {
return NGX_CONF_ERROR;
}

//set $variable value中的第一个参数$variable对应的在这里或者ngx_http_variables_init_vars设置ngx_http_variable_t的get_handler和data成员
if (v->get_handler == NULL
&& ngx_strncasecmp(value[1].data, (u_char *) "http_", 5) != 0
&& ngx_strncasecmp(value[1].data, (u_char *) "sent_http_", 10) != 0
&& ngx_strncasecmp(value[1].data, (u_char *) "upstream_http_", 14) != 0
&& ngx_strncasecmp(value[1].data, (u_char *) "cookie_", 7) != 0
&& ngx_strncasecmp(value[1].data, (u_char *) "upstream_cookie_", 16)
!= 0
&& ngx_strncasecmp(value[1].data, (u_char *) "arg_", 4) != 0)
//如果变量名称不是以上开头,则其get_handler为ngx_http_rewrite_var,data为index 。
{
//设置一个默认的handler。在ngx_http_variables_init_vars里面其实是会将上面这些"http_" "sent_http_"这些变量get_hendler的
v->get_handler = ngx_http_rewrite_var;
v->data = index;
}

/*
脚本引擎是一系列的凹调函数以及相关数据(它们被组织成ngx_httpscript_ xxx_codet这样的结构体,代表各种不同功能的操
作步骤),被保存在变量lcf->codes数组内,而ngx_httprewrite_loc_conf_t类型变量Icf是与当前location相关联的,所以这个脚本引擎只有
当客户端请求访问当前这个location时才会被启动执行。如下配置中,“set $file t_a;”构建的脚本引擎只有当客户端请求访问/t日录时才会
被触发,如果当客户端请求访问根目录时则与它毫无关系。
location / {
root web;
}
location /t {
set $file t_a;
}
*/
//ngx_http_rewrite_handler中会移除执行lcf->codes数组中的各个ngx_http_script_xxx_code_t->code函数,

//set $variable value的value参数在这里处理 ,

/*
从下面可以看出没set一次就会创建一个ngx_http_script_var_code_t和ngx_http_script_xxx_value_code_t但是如果连续多次设置同样的
变量不同的值,那么就会有多个var_code_t和value_code_t对,实际上在ngx_http_rewrite_handler变量执行的时候,以最后面的为准,例如:
50: location / {
51: root web;
52: set $file indexl.html;
53: index $file;
54:
65: set $file index2.html;
}
上面的例子追踪访问到的是index2.html
*/

/*
如果set $variable value中的value是普通字符串,则下面的ngx_http_rewrite_value从ngx_http_rewrite_loc_conf_t->codes数组中获取ngx_http_script_value_code_t空间,紧接着在后面的
ngx_http_script_start_code函数同样从ngx_http_rewrite_loc_conf_t->codes数组中获取ngx_http_script_var_code_t空间,因此在codes数组中
存放变量值value的ngx_http_script_value_code_t空间与存放var变量名的ngx_http_script_var_code_t在空间上是靠着的,图形化见<深入剖析nginx 图8-4>

如果set $variable value中的value是变量名,则下面的ngx_http_rewrite_value从ngx_http_rewrite_loc_conf_t->codes数组中获取ngx_http_script_complex_value_code_t空间,紧接着在后面的
ngx_http_script_start_code函数同样从ngx_http_rewrite_loc_conf_t->codes数组中获取ngx_http_script_complex_value_code_t空间,因此在codes数组中
存放变量值value的ngx_http_script_value_code_t空间与存放var变量名的ngx_http_script_var_code_t在空间上是靠着的,图形化见<深入剖析nginx 图8-4>
*///
if (ngx_http_rewrite_value(cf, lcf, &value[2]) != NGX_CONF_OK) {
return NGX_CONF_ERROR;
}

if (v->set_handler) {
vhcode = ngx_http_script_start_code(cf->pool, &lcf->codes,
sizeof(ngx_http_script_var_handler_code_t));
if (vhcode == NULL) {
return NGX_CONF_ERROR;
}

vhcode->code = ngx_http_script_var_set_handler_code;
vhcode->handler = v->set_handler;
vhcode->data = v->data;

return NGX_CONF_OK;
}

vcode = ngx_http_script_start_code(cf->pool, &lcf->codes,
sizeof(ngx_http_script_var_code_t));
if (vcode == NULL) {
return NGX_CONF_ERROR;
}

vcode->code = ngx_http_script_set_var_code;
vcode->index = (uintptr_t) index;

return NGX_CONF_OK;
}

3.总结

Nginx的upstream模块

1.相关配置

upstream模块的典型应用是反向代理,这里就以ngx_http_proxy_module模块为例。假定我们有如下这样的实例环境,客户端对服务器80端口的请求都被Nginx Proxy Server转发到另外两个真实的Nginx Web Server实例上进行处理(下图是实验环境,Web Server和Proxy Server都只是Nginx进程,并且运行在同一台服务器):

那么,Nginx Proxy Server的核心配置多半是这样:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
 
Filename : nginx.conf

http {

upstream load_balance {
server localhost:8001;
server localhost:8002;
}

server {
listen 80;
location / {
proxy_buffering off;
proxy_pass http://load_balance;
}
}
}

上面的*proxy_buffering off;*配置是为了禁用nginx反向代理的缓存功能,保证客户端的每次请求都被转发到后端真实服务器,以便我们每次跟踪分析的nginx执行流程更加简单且完整。而另外两个配置指令upstream和proxy_pass在此处显得更为重要,其中upstream配置指令的回调处理函数为ngx_http_upstream(),该函数除了申请内存、设置初始值等之外,最主要的动作就是切换配置上下文并调用ngx_conf_parse()函数继续进行配置解析:

2.源码解析

1
2
3
4
5
6
7
8
Filename : ngx_http_upstream.c
pcf = *cf;
cf->ctx = ctx;
cf->cmd_type = NGX_HTTP_UPS_CONF;

rv = ngx_conf_parse(cf, NULL);

if (uscf->servers == NULL) {

进入到upstream配置块内,最主要的配置指令也就是server,其对应的处理函数为ngx_http_upstream_server(),对于每一个后端真实服务器,除了其uri地址外,还有诸如down、weight、max_fails、fail_timeout、backup这样的可选参数,所有这些都需要ngx_http_upstream_server()函数来处理。
在ngx_http_upstream.c的第4173行下个断点,我们可以看到这里给出示例的解析结果:

另外一个重要配置指令proxy_pass主要出现在location配置上下文中,而其对应的处理函数为ngx_http_proxy_pass(),抹去该函数内的众多细节,我们重点关注两个赋值语句:

1
2
3
4
5
6
7
8
Filename : ngx_http_proxy_module.c
static char *
ngx_http_proxy_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{

clcf->handler = ngx_http_proxy_handler;

plcf->upstream.upstream = ngx_http_upstream_add(cf, &u, 0);

上面片段代码里的第一个赋值语句给当前location的http处理设置回调函数,而第二个赋值语句则是查找(没有找到则会创建,比如如果配置文件中upstream指令出现在proxy_pass指令的后面)其对应的upstream配置,我们这里就一个名为load_balance的upstream,所以找到的配置就是它了:

前面曾提到,Nginx将对客户端的http请求处理分为多个阶段,而其中有个NGX_HTTP_FIND_CONFIG_PHASE阶段主要就是做配置查找处理,如果当前请求location设置了upstream,即回调函数指针clcf->handler不为空,则表示对该location的请求需要后端真实服务器来处理:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
Filename : ngx_http_core_module.c
ngx_int_t
ngx_http_core_find_config_phase(ngx_http_request_t *r,
ngx_http_phase_handler_t *ph)
{

ngx_http_update_location_config(r);

void
ngx_http_update_location_config(ngx_http_request_t *r)
{

if (clcf->handler) {
r->content_handler = clcf->handler;
}
}

在其它有location更新的情况下,比如redirect重定向location或named命名location或if条件location等,此时也会调用ngx_http_update_location_config()函数进行location配置更新。我们知道upstream模块的主要功能是产生响应数据,虽然这些响应数据来自后端真实服务器,所以在NGX_HTTP_CONTENT_PHASE 阶段的checker函数ngx_http_core_content_phase()内,我们可以看到在r->content_handler不为空的情况下会优先对r->content_handler函数指针进行回调:

1
2
3
4
5
6
7
8
9
10
11
12
Filename : ngx_http_core_module.c
ngx_int_t
ngx_http_core_content_phase(ngx_http_request_t *r,
ngx_http_phase_handler_t *ph)
{

if (r->content_handler) {
r->write_event_handler = ngx_http_request_empty_handler;
ngx_http_finalize_request(r, r->content_handler(r));
return NGX_OK;
}

如果r->content_handler不为空,即存在upstream,那么进入处理,注意第1397行直接返回NGX_OK,也即不再调用挂在该阶段的其它模块回调函数,所以说upstream模块的优先级是最高的。根据前面的回调赋值,调用r->content_handler()指针函数,实质上就是执行函数ngx_http_proxy_handler(),直到这里,我们才真正走进upstream代理模块的处理逻辑里。

3.回调函数

对于任何一个Upstream模块而言,最核心的实现主要是7个回调函数,upstream代理模块自然也不例外,它实现并注册了这7个回调函数:

回调指针 函数功能 upstream代理模块
create_request 根据nginx与后端服务器通信协议(比如HTTP、Memcache),将客户端的HTTP请求信息转换为对应的发送到后端服务器的真实请求 ngx_http_proxy_create_request 由于nginx与后端服务器通信协议也为HTTP,所以直接拷贝客户端的请求头、请求体(如果有)到变量r->upstream->request_bufs内。
process_header 根据nginx与后端服务器通信协议,将后端服务器返回的头部信息转换为对客户端响应的HTTP响应头。 ngx_http_proxy_process_status_line 此时后端服务器返回的头部信息已经保存在变量r->upstream->buffer内,将这串字符串解析为HTTP响应头存储到变量r->upstream->headers_in内。
input_filter_init 根据前面获得的后端服务器返回的头部信息,为进一步处理后端服务器将返回的响应体做初始准备工作。 ngx_http_proxy_input_filter_init 根据已解析的后端服务器返回的头部信息,设置需进一步处理的后端服务器将返回的响应体的长度,该值保存在变量r->upstream->length内。
input_filter 正式处理后端服务器返回的响应体 ngx_http_proxy_non_buffered_copy_filter 本次收到的响应体数据长度为bytes,数据长度存储在r->upstream->buffer内,把它加入到r->upstream->out_bufs响应数据链等待发送给客户端。
finalize_request 正常结束与后端服务器的交互,比如剩余待取数据长度为0或读到EOF等,之后就会调用该函数。由于nginx会自动完成与后端服务器交互的清理工作,所以该函数一般仅做下日志,标识响应正常结束。 ngx_http_proxy_finalize_request 记录一条日志,标识正常结束与后端服务器的交互,然后函数返回。
reinit_request 对交互重新初始化,比如当nginx发现一台后端服务器出错无法正常完成处理,需要尝试请求另一台后端服务器时就会调用该函数。 ngx_http_proxy_reinit_request设置初始值,设置回调指针,处理比较简单。
abort_request 异常结束与后端服务器的交互后就会调用该函数。大部分情况下,该函数仅做下日志,标识响应异常结束。 ngx_http_proxy_abort_request记录一条日志,标识异常结束与后端服务器的交互,然后函数返回。
上表格中前面5个函数执行的先后次序如下图所示,由于在Client/Proxy/Server之间,一次请求/响应数据可以发送多次(下图中只画出一次就发送完毕的情况),所以下图中对应的函数也可能被执行多次,不过一般情况下,这5个函数执行的先后次序就是这样了。

4.总结

这些回调函数如何夹杂到nginx中被调用并不需要完全搞清楚,要写一个upstream模块,我们只要实现上面提到的这7个函数即可,当然,可以看到最主要的也就是create_request、process_header和input_filter这三个回调,它们实现从HTTP协议到Nginx与后端服务器之间交互协议的来回转换,使得在用户看来,他访问的就是一台功能完整的Web服务器,而也许事实上,显示在他面前的数据来自Memcache或别的什么服务器。

参考文献:

nginx核心讲解

Nginx缓存详解

1. 相关配置

1.1 配置指令

Nginx缓存由proxy_cache_path指令开启

1
proxy_cache_path D:\output levels=1:2 keys_zone=my_cache:10m max_size=2g inactive=60m use_temp_path=off;

对于每个参数的具体含义可以参考nginx官方文档,对于缓存文件名则需要proxy_cache_key指令指定

1
proxy_cache my_cache;

对于该指令的具体用法可以参考nginx官方文档

1
proxy_cache_valid 200 1h;

需要注意的是,如果没有这条指令,nginx将不会缓存上游的数据。对于该指令的具体用法可以参考nginx官方文档

2. 源码解析

Nginx与文件缓存相关的代码在src/http/ngx_http_file_cache.c

2.1 关键结构体

与缓存相关的结构体有ngx_path_tngx_http_file_cache_sh_tngx_http_file_cache_sngx_http_file_cache_node_tngx_http_file_cache_header_t,来看看这几个结构体的定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158

typedef struct {
ngx_str_t name;
size_t len;
size_t level[3];

ngx_path_manager_pt manager; //决定是否启用cache manager进程
ngx_path_loader_pt loader; //决定是否启用cache loader进程
void *data;

u_char *conf_file; //nginx配置文件路径
ngx_uint_t line;
} ngx_path_t;

typedef struct {
ngx_rbtree_t rbtree;
ngx_rbtree_node_t sentinel;
ngx_queue_t queue;
ngx_atomic_t cold;
ngx_atomic_t loading;
off_t size;
ngx_uint_t count;
ngx_uint_t watermark;
} ngx_http_file_cache_sh_t;

struct ngx_http_file_cache_s {
ngx_http_file_cache_sh_t *sh;
ngx_slab_pool_t *shpool; /* shpool是用于管理共享内存的 slab allocator ,所有缓存节点占用空间都由它进行分配 */

ngx_path_t *path; /* ngx_http_file_cache_set_slot中创建ngx_path_t空间 */

off_t max_size; //缓存目录能保存的缓存最大值
size_t bsize;

time_t inactive; /* 触发LRU算法阈值 */

time_t fail_time;

ngx_uint_t files;
ngx_uint_t loader_files; /* 阈值,当load的文件个数大于这个值之后,load进程会短暂的休眠(时间位loader_sleep) */
ngx_msec_t last;
ngx_msec_t loader_sleep; /* 阈值,执行一次缓存文件加载到共享内存后 , 进程的休眠时间 , 默认200 */
ngx_msec_t loader_threshold;

ngx_uint_t manager_files;
ngx_msec_t manager_sleep; /* 阈值,处理一定数量缓存结点后, 进程的休眠时间 , 默认200 */
ngx_msec_t manager_threshold;

ngx_shm_zone_t *shm_zone; /* 共享内存区 */

ngx_uint_t use_temp_path; /* 是否使用临时目录 */
/* unsigned use_temp_path:1 */
};

typedef struct {
ngx_rbtree_node_t node; /* 缓存查询树的节点 */
ngx_queue_t queue; /* LRU页面置换算法 队列中的节点 */

u_char key[NGX_HTTP_CACHE_KEY_LEN
- sizeof(ngx_rbtree_key_t)];

unsigned count:20;
unsigned uses:10; //缓存使用次数
unsigned valid_msec:10;
unsigned error:10;
unsigned exists:1;
unsigned updating:1; // 缓存分片更新标志位
unsigned deleting:1; // 缓存分片删除标志位
unsigned purged:1;
/* 10 unused bits */

ngx_file_uniq_t uniq;
time_t expire; //过期时间
time_t valid_sec;
size_t body_start;
off_t fs_size;
ngx_msec_t lock_time;
} ngx_http_file_cache_node_t;

//在写缓存文件时会将这个结构体写入
typedef struct {
ngx_uint_t version;
time_t valid_sec;
time_t updating_sec;
time_t error_sec;
time_t last_modified;
time_t date;
uint32_t crc32;
u_short valid_msec;
u_short header_start; /* 缓存文件中http头开始的偏移 */
u_short body_start;
u_char etag_len;
u_char etag[NGX_HTTP_CACHE_ETAG_LEN];
u_char vary_len;
u_char vary[NGX_HTTP_CACHE_VARY_LEN];
u_char variant[NGX_HTTP_CACHE_KEY_LEN];
} ngx_http_file_cache_header_t;

struct ngx_http_cache_s {
ngx_file_t file; /* 缓存文件描述结构体 */
ngx_array_t keys; /* 存放proxy_cache_key指令的值 */
uint32_t crc32;
u_char key[NGX_HTTP_CACHE_KEY_LEN]; /* 存放计算md5后的值 */
u_char main[NGX_HTTP_CACHE_KEY_LEN]; /* 跟key相同 */

ngx_file_uniq_t uniq;
time_t valid_sec;
time_t updating_sec;
time_t error_sec;
time_t last_modified;
time_t date;

ngx_str_t etag;
ngx_str_t vary;
u_char variant[NGX_HTTP_CACHE_KEY_LEN];

size_t header_start; /* http头在缓存中的偏移位置 */
size_t body_start; /* http响应体在缓存中的偏移位置 */
off_t length; /* 缓存文件的大小,见ngx_http_file_cache_open */
off_t fs_size;

ngx_uint_t min_uses;
ngx_uint_t error;
ngx_uint_t valid_msec;
ngx_uint_t vary_tag;

ngx_buf_t *buf; /* 存储缓存文件头 */

ngx_http_file_cache_t *file_cache;
ngx_http_file_cache_node_t *node; //ngx_http_file_cache_exists中创建空间和赋值

#if (NGX_THREADS || NGX_COMPAT)
ngx_thread_task_t *thread_task;
#endif

ngx_msec_t lock_timeout;
ngx_msec_t lock_age;
ngx_msec_t lock_time;
ngx_msec_t wait_time;

ngx_event_t wait_event;

unsigned lock:1;
unsigned waiting:1;

unsigned updated:1;
unsigned updating:1;
unsigned exists:1;
unsigned temp_file:1;
unsigned purged:1;
unsigned reading:1;
unsigned secondary:1;
unsigned background:1;

unsigned stale_updating:1;
unsigned stale_error:1;
};

部分字段含义见注释

2.2 生成标记缓存文件的key

生成标记缓存文件的key由ngx_http_file_cache_create_key函数实现,我们来看看具体实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37

void
ngx_http_file_cache_create_key(ngx_http_request_t *r)
{
size_t len;
ngx_str_t *key;
ngx_uint_t i;
ngx_md5_t md5;
ngx_http_cache_t *c;

c = r->cache;

len = 0;

ngx_crc32_init(c->crc32);
ngx_md5_init(&md5);

key = c->keys.elts;
for (i = 0; i < c->keys.nelts; i++) {
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http cache key: \"%V\"", &key[i]);

len += key[i].len;

ngx_crc32_update(&c->crc32, key[i].data, key[i].len);
ngx_md5_update(&md5, key[i].data, key[i].len);
}

c->header_start = sizeof(ngx_http_file_cache_header_t)
+ sizeof(ngx_http_file_cache_key) + len + 1;

ngx_crc32_final(c->crc32);
ngx_md5_final(c->key, &md5);

ngx_memcpy(c->main, c->key, NGX_HTTP_CACHE_KEY_LEN);
}

该函数实现较简单,主要是计算proxy_cache_key指令值的md5值,并保存,然后初始化header_start成员的值,这个地方需要注意一下缓存文件头部信息
[ngx_http_file_cache_header_t][“\nKEY: “][orig_key][“\n”][header][body]

2.3 缓存文件名生成

生成缓存文件名主要由ngx_http_file_cache_name实现,现在来看看源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35

static ngx_int_t
ngx_http_file_cache_name(ngx_http_request_t *r, ngx_path_t *path)
{
u_char *p;
ngx_http_cache_t *c;

c = r->cache;

if (c->file.name.len) {
return NGX_OK;
}

c->file.name.len = path->name.len + 1 + path->len
+ 2 * NGX_HTTP_CACHE_KEY_LEN;

c->file.name.data = ngx_pnalloc(r->pool, c->file.name.len + 1);
if (c->file.name.data == NULL) {
return NGX_ERROR;
}

ngx_memcpy(c->file.name.data, path->name.data, path->name.len);

p = c->file.name.data + path->name.len + 1 + path->len;
p = ngx_hex_dump(p, c->key, NGX_HTTP_CACHE_KEY_LEN);
*p = '\0';

ngx_create_hashed_filename(path, c->file.name.data, c->file.name.len);

ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"cache file: \"%s\"", c->file.name.data);

return NGX_OK;
}

  1. 该函数首先判断当前缓存文件名是否已经生成成功,若已生成,则直接返回,代码如下:
1
2
3
if (c->file.name.len) {
return NGX_OK;
}
  1. 接下来计算缓存文件名长度,其中path->name.lenproxy_cache_path指令第一个参数,path->len为levels长度,比如level=1:2则path->len为5,包含两个/,代码如下:
1
2
c->file.name.len = path->name.len + 1 + path->len
+ 2 * NGX_HTTP_CACHE_KEY_LEN;
  1. 接着为缓存文件名申请内存,代码如下:
1
c->file.name.data = ngx_pnalloc(r->pool, c->file.name.len + 1);
  1. 接着将proxy_cache_path指令设置的路径复制到c->file.name.data最前端,完成后c->file.name.data的值为:D:\output,代码如下:
1
ngx_memcpy(c->file.name.data, path->name.data, path->name.len);
  1. 接着将p指向c->file.name.data偏移 path->name.len + 1 + path->len的位置处,这样做的目的是准备生成32位长的md5文件名,并预留level设置的目录
1
2
3
p = c->file.name.data + path->name.len + 1 + path->len;
p = ngx_hex_dump(p, c->key, NGX_HTTP_CACHE_KEY_LEN);
*p = '\0';

这一步完成之后c->file.name.data的值为D:\output屯屯屯md5(proxy_cache_key)

  1. 调用ngx_create_hashed_filename函数补全第5步预留的level目录
1
ngx_create_hashed_filename(path, c->file.name.data, c->file.name.len);

2.4 生成由levels参数指定的目录层级

生成由levels参数指定的目录层级由ngx_create_hashed_filename实现,现在来看看源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
void
ngx_create_hashed_filename(ngx_path_t *path, u_char *file, size_t len)
{
size_t i, level;
ngx_uint_t n;

i = path->name.len + 1;

file[path->name.len + path->len] = '/';

for (n = 0; n < NGX_MAX_PATH_LEVEL; n++) {
level = path->level[n];

if (level == 0) {
break;
}

len -= level;
file[i - 1] = '/';
ngx_memcpy(&file[i], &file[len], level);
i += level + 1;
}
}
  1. 从上一节中我们知道参数file的值为D:\output屯屯屯md5(proxy_cache_key),该函数首先用一个变量i保存path长度加1,这个设计非常巧妙,在后续中会使用到,代码如下:
1
i = path->name.len + 1;
  1. 接着修改file的值,在层级目录后添加一个反斜杠,修改后为D:\output屯屯屯/md5(proxy_cache_key)
1
file[path->name.len + path->len]  = '/';
  1. 接下来进入一个for循环,填充file中的屯屯屯,在第一次循环中首先获取levels=1:2中的1,接着用变量len减去变量level,接着在output后添加一个’/‘,然后将file中从len位置复制1个字符到*output/*后,修改i的值;第二次循环,首先获取levels=1:2中的2,接着用len减去2,接着在第一次缓存复制的字符后添加一个’/‘,然后把file从len处复制2个字符到上一步的’/‘后,修改i的值;第三次跳出循环,至此填充完成,代码如下:
1
2
3
4
5
6
7
8
9
10
11
12
for (n = 0; n < NGX_MAX_PATH_LEVEL; n++) {
level = path->level[n];

if (level == 0) {
break;
}

len -= level;
file[i - 1] = '/';
ngx_memcpy(&file[i], &file[len], level);
i += level + 1;
}

2.5 从红黑树中查找缓存节点

缓存key跟缓存文件名生成好之后,紧接着根据生成好的key从红黑树中查找,若不存在则插入,找到则返回对应的缓存节点,这个功能由ngx_http_file_cache_exists函数实现,我们看看具体实现。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113

static ngx_int_t
ngx_http_file_cache_exists(ngx_http_file_cache_t *cache, ngx_http_cache_t *c)
{
ngx_int_t rc;
ngx_http_file_cache_node_t *fcn;

ngx_shmtx_lock(&cache->shpool->mutex);

fcn = c->node;//后面没找到则会创建node节点

if (fcn == NULL) {
fcn = ngx_http_file_cache_lookup(cache, c->key); //以 c->key 为查找条件从缓存中查找缓存节点:
}

if (fcn) { //cache中存在该key
ngx_queue_remove(&fcn->queue);

//该客户端在新建连接后,如果之前有缓存该文件,则c->node为NULL,表示这个连接请求第一次走到这里,有一个客户端在获取数据,如果在
//连接范围内(还没有断开连接)多次获取该缓存文件,则也只会加1,表示当前有多少个客户端连接在获取该缓存
if (c->node == NULL) { //如果该请求第一次使用此缓存节点,则增加相关引用和使用次数
fcn->uses++;
fcn->count++;
}

if (fcn->error) {

if (fcn->valid_sec < ngx_time()) {
goto renew; //缓存已过期
}

rc = NGX_OK;

goto done;
}

if (fcn->exists || fcn->uses >= c->min_uses) { //该请求的缓存已经存在,并且对该缓存的请求次数达到了最低要求次数min_uses
//表示该缓存文件是否存在,Proxy_cache_min_uses 3,则第3次后开始获取后端数据,获取完毕后在ngx_http_file_cache_update中置1,但是只有在地4次请求的时候才会在ngx_http_file_cache_exists赋值为1
c->exists = fcn->exists;
if (fcn->body_start) {
c->body_start = fcn->body_start;
}

rc = NGX_OK;

goto done;
}

//例如配置Proxy_cache_min_uses 5,则需要客户端请求5才才能从缓存中取,如果现在只有4次,则都需要从后端获取数据
rc = NGX_AGAIN;

goto done;
}

//没找到,则在下面创建node节点,添加到ngx_http_file_cache_t->sh->rbtree红黑树中
fcn = ngx_slab_calloc_locked(cache->shpool,
sizeof(ngx_http_file_cache_node_t));
if (fcn == NULL) {
ngx_shmtx_unlock(&cache->shpool->mutex);

(void) ngx_http_file_cache_forced_expire(cache);

ngx_shmtx_lock(&cache->shpool->mutex);

fcn = ngx_slab_calloc_locked(cache->shpool,
sizeof(ngx_http_file_cache_node_t));
if (fcn == NULL) {
ngx_log_error(NGX_LOG_ALERT, ngx_cycle->log, 0,
"could not allocate node%s", cache->shpool->log_ctx);
rc = NGX_ERROR;
goto failed;
}
}

ngx_memcpy((u_char *) &fcn->node.key, c->key, sizeof(ngx_rbtree_key_t));

ngx_memcpy(fcn->key, &c->key[sizeof(ngx_rbtree_key_t)],
NGX_HTTP_CACHE_KEY_LEN - sizeof(ngx_rbtree_key_t));

ngx_rbtree_insert(&cache->sh->rbtree, &fcn->node); //把该节点添加到红黑树中

fcn->uses = 1;
fcn->count = 1;

renew:

rc = NGX_DECLINED; //uri第一次请求的时候创建node节点,同时返回NGX_DECLINED。或者缓存过期需要把该节点相关信息恢复为默认值

fcn->valid_msec = 0;
fcn->error = 0;
fcn->exists = 0;
fcn->valid_sec = 0;
fcn->uniq = 0;
fcn->body_start = 0;
fcn->fs_size = 0;

done:

fcn->expire = ngx_time() + cache->inactive;

ngx_queue_insert_head(&cache->sh->queue, &fcn->queue); //新创建的node节点添加到cache->sh->queue头部

c->uniq = fcn->uniq;//文件的uniq 赋值见ngx_http_file_cache_update
c->error = fcn->error;
c->node = fcn; //把新创建的fcn赋值给c->node

failed:

ngx_shmtx_unlock(&cache->shpool->mutex);

return rc;
}

2.6 打开缓存文件

上一节中介绍了如何从红黑树中查找缓存节点,找到缓存节点之后,就要需要根据缓存节点中的缓存文件路径去打开缓存文件了,Nginx使用ngx_http_file_cache_open函数实现,接下来我们来看看实现:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
ngx_int_t
ngx_http_file_cache_open(ngx_http_request_t *r)
{
ngx_int_t rc, rv;
ngx_uint_t test;
ngx_http_cache_t *c;
ngx_pool_cleanup_t *cln;
ngx_open_file_info_t of;
ngx_http_file_cache_t *cache;
ngx_http_core_loc_conf_t *clcf;

c = r->cache;

if (c->waiting) {
return NGX_AGAIN;
}

if (c->reading) {
return ngx_http_file_cache_read(r, c);
}

cache = c->file_cache;

if (c->node == NULL) {
cln = ngx_pool_cleanup_add(r->pool, 0);
if (cln == NULL) {
return NGX_ERROR;
}

cln->handler = ngx_http_file_cache_cleanup;
cln->data = c;
}

rc = ngx_http_file_cache_exists(cache, c);

ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http file cache exists: %i e:%d", rc, c->exists);

if (rc == NGX_ERROR) {
return rc;
}

if (rc == NGX_AGAIN) {
return NGX_HTTP_CACHE_SCARCE;
}

if (rc == NGX_OK) {

if (c->error) {
return c->error;
}

c->temp_file = 1;
test = c->exists ? 1 : 0;
rv = NGX_DECLINED;

} else { /* rc == NGX_DECLINED */

test = cache->sh->cold ? 1 : 0;

if (c->min_uses > 1) {

if (!test) {
return NGX_HTTP_CACHE_SCARCE;
}

rv = NGX_HTTP_CACHE_SCARCE;

} else {
c->temp_file = 1;
rv = NGX_DECLINED;
}
}

if (ngx_http_file_cache_name(r, cache->path) != NGX_OK) {
return NGX_ERROR;
}

if (!test) {
goto done;
}

clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);

ngx_memzero(&of, sizeof(ngx_open_file_info_t));

of.uniq = c->uniq;
of.valid = clcf->open_file_cache_valid;
of.min_uses = clcf->open_file_cache_min_uses;
of.events = clcf->open_file_cache_events;
of.directio = NGX_OPEN_FILE_DIRECTIO_OFF;
of.read_ahead = clcf->read_ahead;

if (ngx_open_cached_file(clcf->open_file_cache, &c->file.name, &of, r->pool)
!= NGX_OK)
{
switch (of.err) {

case 0:
return NGX_ERROR;

case NGX_ENOENT:
case NGX_ENOTDIR:
goto done;

default:
ngx_log_error(NGX_LOG_CRIT, r->connection->log, of.err,
ngx_open_file_n " \"%s\" failed", c->file.name.data);
return NGX_ERROR;
}
}

ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http file cache fd: %d", of.fd);

c->file.fd = of.fd;
c->file.log = r->connection->log;
c->uniq = of.uniq;
c->length = of.size;
c->fs_size = (of.fs_size + cache->bsize - 1) / cache->bsize;

c->buf = ngx_create_temp_buf(r->pool, c->body_start);
if (c->buf == NULL) {
return NGX_ERROR;
}

return ngx_http_file_cache_read(r, c);

done:

if (rv == NGX_DECLINED) {
return ngx_http_file_cache_lock(r, c);
}

return rv;

}

2.7 读缓存文件

若缓存文件存在,则会读取缓存文件头部,并根据读取出的头部信息进行下一步操作,nginx使用ngx_http_file_cache_read函数实现这个功能,我们来看一下具体实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
static ngx_int_t
ngx_http_file_cache_read(ngx_http_request_t *r, ngx_http_cache_t *c)
{
u_char *p;
time_t now;
ssize_t n;
ngx_str_t *key;
ngx_int_t rc;
ngx_uint_t i;
ngx_http_file_cache_t *cache;
ngx_http_file_cache_header_t *h;

n = ngx_http_file_cache_aio_read(r, c);

if (n < 0) {
return n;
}

//写缓冲区封装过程参考:ngx_http_upstream_process_header
//缓存文件中前面部分格式:[ngx_http_file_cache_header_t]["\nKEY: "][orig_key]["\n"][header]

if ((size_t) n < c->header_start) {
ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" is too small", c->file.name.data);
return NGX_DECLINED;
}

h = (ngx_http_file_cache_header_t *) c->buf->pos;

if (h->version != NGX_HTTP_CACHE_VERSION) {
ngx_log_error(NGX_LOG_INFO, r->connection->log, 0,
"cache file \"%s\" version mismatch", c->file.name.data);
return NGX_DECLINED; //如果返回这个NGX_DECLINED,会把cached置0,返回出去后只有从后端从新获取数据
}

if (h->crc32 != c->crc32 || (size_t) h->header_start != c->header_start) {
ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" has md5 collision", c->file.name.data);
return NGX_DECLINED;
}

p = c->buf->pos + sizeof(ngx_http_file_cache_header_t)
+ sizeof(ngx_http_file_cache_key);

key = c->keys.elts;
for (i = 0; i < c->keys.nelts; i++) {
if (ngx_memcmp(p, key[i].data, key[i].len) != 0) {
ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" has md5 collision",
c->file.name.data);
return NGX_DECLINED;
}

p += key[i].len;
}

if ((size_t) h->body_start > c->body_start) {
ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" has too long header",
c->file.name.data);
return NGX_DECLINED;
}

if (h->vary_len > NGX_HTTP_CACHE_VARY_LEN) {
ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" has incorrect vary length",
c->file.name.data);
return NGX_DECLINED;
}

if (h->vary_len) {
ngx_http_file_cache_vary(r, h->vary, h->vary_len, c->variant);

if (ngx_memcmp(c->variant, h->variant, NGX_HTTP_CACHE_KEY_LEN) != 0) {
ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http file cache vary mismatch");
return ngx_http_file_cache_reopen(r, c);
}
}

c->buf->last += n;

c->valid_sec = h->valid_sec;
c->updating_sec = h->updating_sec;
c->error_sec = h->error_sec;
c->last_modified = h->last_modified;
c->date = h->date;
c->valid_msec = h->valid_msec;
c->body_start = h->body_start;
c->etag.len = h->etag_len;
c->etag.data = h->etag;

r->cached = 1;

cache = c->file_cache;

if (cache->sh->cold) {

ngx_shmtx_lock(&cache->shpool->mutex);

if (!c->node->exists) {
c->node->uses = 1;
c->node->body_start = c->body_start;
c->node->exists = 1;
c->node->uniq = c->uniq;
c->node->fs_size = c->fs_size;

cache->sh->size += c->fs_size;
}

ngx_shmtx_unlock(&cache->shpool->mutex);
}

now = ngx_time();

if (c->valid_sec < now) {
c->stale_updating = c->valid_sec + c->updating_sec >= now;
c->stale_error = c->valid_sec + c->error_sec >= now;

ngx_shmtx_lock(&cache->shpool->mutex);

if (c->node->updating) {
rc = NGX_HTTP_CACHE_UPDATING;

} else {
c->node->updating = 1;
c->updating = 1;
c->lock_time = c->node->lock_time;
rc = NGX_HTTP_CACHE_STALE;
}

ngx_shmtx_unlock(&cache->shpool->mutex);

ngx_log_debug3(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
"http file cache expired: %i %T %T",
rc, c->valid_sec, now);

return rc;
}

return NGX_OK;
}


  1. 调用ngx_http_file_cache_aio_read函数读取缓存文件,读取大小为c->header_start,读取完成后,对返回值进行校验,确定是否读取成功,代码如下:
1
2
3
4
5
6
7
8
9
10
11
12
13

n = ngx_http_file_cache_aio_read(r, c);

if (n < 0) {
return n;
}

if ((size_t) n < c->header_start) {
ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" is too small", c->file.name.data);
return NGX_DECLINED;
}

  1. 取出缓存文件头部,并对其字段进行校验,封包过程参考ngx_http_file_cache_set_header函数
1
2
3

h = (ngx_http_file_cache_header_t *) c->buf->pos;

  1. 使用读出的缓存文件头部,更新内存中缓存节点信息
1
2
3
4
5
6
7
8
9
10
11
12
13

c->buf->last += n; //buf后续会被用来存放http头,所以这个地方移动last指针

c->valid_sec = h->valid_sec;
c->updating_sec = h->updating_sec;
c->error_sec = h->error_sec;
c->last_modified = h->last_modified;
c->date = h->date;
c->valid_msec = h->valid_msec;
c->body_start = h->body_start;
c->etag.len = h->etag_len;
c->etag.data = h->etag;

2.8 发送缓存

若2.7节读取缓存文件成功,而且缓存校验也成功,则开始发送缓存内容,该功能由ngx_http_upstream_cache_send函数实现,接下来我们看看源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
static ngx_int_t
ngx_http_upstream_cache_send(ngx_http_request_t *r, ngx_http_upstream_t *u)
{
ngx_int_t rc;
ngx_http_cache_t *c;

r->cached = 1;
c = r->cache;

if (c->header_start == c->body_start) {
r->http_version = NGX_HTTP_VERSION_9;
return ngx_http_cache_send(r);
}

/* TODO: cache stack */

u->buffer = *c->buf;
u->buffer.pos += c->header_start;

ngx_memzero(&u->headers_in, sizeof(ngx_http_upstream_headers_in_t));
u->headers_in.content_length_n = -1;
u->headers_in.last_modified_time = -1;

if (ngx_list_init(&u->headers_in.headers, r->pool, 8,
sizeof(ngx_table_elt_t))
!= NGX_OK)
{
return NGX_ERROR;
}

if (ngx_list_init(&u->headers_in.trailers, r->pool, 2,
sizeof(ngx_table_elt_t))
!= NGX_OK)
{
return NGX_ERROR;
}

rc = u->process_header(r);

if (rc == NGX_OK) {

if (ngx_http_upstream_process_headers(r, u) != NGX_OK) {
return NGX_DONE;
}

return ngx_http_cache_send(r);
}

if (rc == NGX_ERROR) {
return NGX_ERROR;
}

if (rc == NGX_AGAIN) {
rc = NGX_HTTP_UPSTREAM_INVALID_HEADER;
}

/* rc == NGX_HTTP_UPSTREAM_INVALID_HEADER */

ngx_log_error(NGX_LOG_CRIT, r->connection->log, 0,
"cache file \"%s\" contains invalid header",
c->file.name.data);

/* TODO: delete file */

return rc;
}

2.9 向上游回源

若缓存不存在,nginx则向上游回源,那么整个流程就走到ngx_http_upstream_init_request函数的#if (NGX_HTTP_CACHE)块后,我们来看看其源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269

static void
ngx_http_upstream_init_request(ngx_http_request_t *r)
{
ngx_str_t *host;
ngx_uint_t i;
ngx_resolver_ctx_t *ctx, temp;
ngx_http_cleanup_t *cln;
ngx_http_upstream_t *u;
ngx_http_core_loc_conf_t *clcf;
ngx_http_upstream_srv_conf_t *uscf, **uscfp;
ngx_http_upstream_main_conf_t *umcf;

if (r->aio) {
return;
}

u = r->upstream;

#if (NGX_HTTP_CACHE)

if (u->conf->cache) {
ngx_int_t rc;

rc = ngx_http_upstream_cache(r, u);

if (rc == NGX_BUSY) {
r->write_event_handler = ngx_http_upstream_init_request;
return;
}

r->write_event_handler = ngx_http_request_empty_handler;

if (rc == NGX_ERROR) {
ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

if (rc == NGX_OK) {
rc = ngx_http_upstream_cache_send(r, u);

if (rc == NGX_DONE) {
return;
}

if (rc == NGX_HTTP_UPSTREAM_INVALID_HEADER) {
rc = NGX_DECLINED;
r->cached = 0;
u->buffer.start = NULL;
u->cache_status = NGX_HTTP_CACHE_MISS;
u->request_sent = 1;
}

if (ngx_http_upstream_cache_background_update(r, u) != NGX_OK) {
rc = NGX_ERROR;
}
}

if (rc != NGX_DECLINED) {
ngx_http_finalize_request(r, rc);
return;
}
}

#endif

u->store = u->conf->store;
/*
*设置Nginx与下游客户端之间TCP连接的检查方法
*实际上,这两个方法都会通过ngx_http_upstream_check_broken_connection方法检查Nginx与下游的连接是否正常,如果出现错误,就会立即终止连接。
*/
if (!u->store && !r->post_action && !u->conf->ignore_client_abort) {
//注意这时候的r还是客户端的连接,与上游服务器的连接r还没有建立
r->read_event_handler = ngx_http_upstream_rd_check_broken_connection;
r->write_event_handler = ngx_http_upstream_wr_check_broken_connection;
}
//有接收到客户端包体,则把包体结构赋值给u->request_bufs,在后面的if (u->create_request(r) != NGX_OK) {会用到
if (r->request_body) {
u->request_bufs = r->request_body->bufs;
}

if (u->create_request(r) != NGX_OK) { //ngx_http_proxy_create_request
ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

if (ngx_http_upstream_set_local(r, u, u->conf->local) != NGX_OK) {
ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

if (u->conf->socket_keepalive) {
u->peer.so_keepalive = 1;
}

clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
/* 初始化ngx_http_upstream_t结构中成员output向下游发送响应的方式 */
u->output.alignment = clcf->directio_alignment;
u->output.pool = r->pool;
u->output.bufs.num = 1;
u->output.bufs.size = clcf->client_body_buffer_size;

if (u->output.output_filter == NULL) {
u->output.output_filter = ngx_chain_writer;
u->output.filter_ctx = &u->writer;
}

u->writer.pool = r->pool;
/* 添加用于表示上游响应的状态,例如:错误编码、包体长度等 */
if (r->upstream_states == NULL) {

r->upstream_states = ngx_array_create(r->pool, 1,
sizeof(ngx_http_upstream_state_t));
if (r->upstream_states == NULL) {
ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

} else {

u->state = ngx_array_push(r->upstream_states);
if (u->state == NULL) {
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

ngx_memzero(u->state, sizeof(ngx_http_upstream_state_t));
}

cln = ngx_http_cleanup_add(r, 0);
if (cln == NULL) {
ngx_http_finalize_request(r, NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

cln->handler = ngx_http_upstream_cleanup; //当请求结束时,一定会调用ngx_http_upstream_cleanup方法
cln->data = r;
u->cleanup = &cln->handler;

if (u->resolved == NULL) {

uscf = u->conf->upstream;

} else {

#if (NGX_HTTP_SSL)
u->ssl_name = u->resolved->host;
#endif

host = &u->resolved->host;

umcf = ngx_http_get_module_main_conf(r, ngx_http_upstream_module);

uscfp = umcf->upstreams.elts;

for (i = 0; i < umcf->upstreams.nelts; i++) {

uscf = uscfp[i];

if (uscf->host.len == host->len
&& ((uscf->port == 0 && u->resolved->no_port)
|| uscf->port == u->resolved->port)
&& ngx_strncasecmp(uscf->host.data, host->data, host->len) == 0)
{
goto found;
}
}

if (u->resolved->sockaddr) {

if (u->resolved->port == 0
&& u->resolved->sockaddr->sa_family != AF_UNIX)
{
ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
"no port in upstream \"%V\"", host);
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

if (ngx_http_upstream_create_round_robin_peer(r, u->resolved)
!= NGX_OK)
{
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

ngx_http_upstream_connect(r, u);

return;
}

if (u->resolved->port == 0) {
ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
"no port in upstream \"%V\"", host);
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

temp.name = *host;
// 初始化域名解析器
ctx = ngx_resolve_start(clcf->resolver, &temp);
if (ctx == NULL) {
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

if (ctx == NGX_NO_RESOLVER) {
ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
"no resolver defined to resolve %V", host);

ngx_http_upstream_finalize_request(r, u, NGX_HTTP_BAD_GATEWAY);
return;
}

ctx->name = *host;
ctx->handler = ngx_http_upstream_resolve_handler; //设置域名解析完成后的回调函数。
ctx->data = r;
ctx->timeout = clcf->resolver_timeout;

u->resolved->ctx = ctx;
//开始域名解析,没有完成也会返回的。
if (ngx_resolve_name(ctx) != NGX_OK) {
u->resolved->ctx = NULL;
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

return; // 域名还没有解析完成,则直接返回
}

found:

if (uscf == NULL) {
ngx_log_error(NGX_LOG_ALERT, r->connection->log, 0,
"no upstream configuration");
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

u->upstream = uscf;

#if (NGX_HTTP_SSL)
u->ssl_name = uscf->host;
#endif

if (uscf->peer.init(r, uscf) != NGX_OK) {
ngx_http_upstream_finalize_request(r, u,
NGX_HTTP_INTERNAL_SERVER_ERROR);
return;
}

u->peer.start_time = ngx_current_msec;

if (u->conf->next_upstream_tries
&& u->peer.tries > u->conf->next_upstream_tries)
{
u->peer.tries = u->conf->next_upstream_tries;
}

ngx_http_upstream_connect(r, u);
}

2.10 接受上游返回的数据

该功能由ngx_http_upstream_send_response函数实现,我们来看看源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343

static void
ngx_http_upstream_send_response(ngx_http_request_t *r, ngx_http_upstream_t *u)
{
ssize_t n;
ngx_int_t rc;
ngx_event_pipe_t *p;
ngx_connection_t *c;
ngx_http_core_loc_conf_t *clcf;

rc = ngx_http_send_header(r);

if (rc == NGX_ERROR || rc > NGX_OK || r->post_action) {
ngx_http_upstream_finalize_request(r, u, rc);
return;
}

u->header_sent = 1;

if (u->upgrade) {

#if (NGX_HTTP_CACHE)

if (r->cache) {
ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
}

#endif

ngx_http_upstream_upgrade(r, u);
return;
}

c = r->connection;

if (r->header_only) {

if (!u->buffering) {
ngx_http_upstream_finalize_request(r, u, rc);
return;
}

if (!u->cacheable && !u->store) {
ngx_http_upstream_finalize_request(r, u, rc);
return;
}

u->pipe->downstream_error = 1;
}

if (r->request_body && r->request_body->temp_file
&& r == r->main && !r->preserve_body
&& !u->conf->preserve_output)
{
ngx_pool_run_cleanup_file(r->pool, r->request_body->temp_file->file.fd);
r->request_body->temp_file->file.fd = NGX_INVALID_FILE;
}

clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);

if (!u->buffering) {

#if (NGX_HTTP_CACHE)

if (r->cache) {
ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
}

#endif

if (u->input_filter == NULL) {
u->input_filter_init = ngx_http_upstream_non_buffered_filter_init;
u->input_filter = ngx_http_upstream_non_buffered_filter;
u->input_filter_ctx = r;
}

u->read_event_handler = ngx_http_upstream_process_non_buffered_upstream;
r->write_event_handler =
ngx_http_upstream_process_non_buffered_downstream;

r->limit_rate = 0;

if (u->input_filter_init(u->input_filter_ctx) == NGX_ERROR) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

if (clcf->tcp_nodelay && ngx_tcp_nodelay(c) != NGX_OK) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

n = u->buffer.last - u->buffer.pos;

if (n) {
u->buffer.last = u->buffer.pos;

u->state->response_length += n;

if (u->input_filter(u->input_filter_ctx, n) == NGX_ERROR) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

ngx_http_upstream_process_non_buffered_downstream(r);

} else {
u->buffer.pos = u->buffer.start;
u->buffer.last = u->buffer.start;

if (ngx_http_send_special(r, NGX_HTTP_FLUSH) == NGX_ERROR) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

if (u->peer.connection->read->ready || u->length == 0) {
ngx_http_upstream_process_non_buffered_upstream(r, u);
}
}

return;
}

/* TODO: preallocate event_pipe bufs, look "Content-Length" */

#if (NGX_HTTP_CACHE)

if (r->cache && r->cache->file.fd != NGX_INVALID_FILE) {
ngx_pool_run_cleanup_file(r->pool, r->cache->file.fd);
r->cache->file.fd = NGX_INVALID_FILE;
}

switch (ngx_http_test_predicates(r, u->conf->no_cache)) {

case NGX_ERROR:
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;

case NGX_DECLINED:
u->cacheable = 0;
break;

default: /* NGX_OK */

if (u->cache_status == NGX_HTTP_CACHE_BYPASS) {

/* create cache if previously bypassed */

if (ngx_http_file_cache_create(r) != NGX_OK) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}
}

break;
}

if (u->cacheable) {
time_t now, valid;

now = ngx_time();

valid = r->cache->valid_sec;

if (valid == 0) {
valid = ngx_http_file_cache_valid(u->conf->cache_valid,
u->headers_in.status_n);
if (valid) {
r->cache->valid_sec = now + valid;
}
}
//如果没有配置proxy_cache_valid,valid的值为0
if (valid) {
r->cache->date = now;
r->cache->body_start = (u_short) (u->buffer.pos - u->buffer.start);

if (u->headers_in.status_n == NGX_HTTP_OK
|| u->headers_in.status_n == NGX_HTTP_PARTIAL_CONTENT)
{
r->cache->last_modified = u->headers_in.last_modified_time;

if (u->headers_in.etag) {
r->cache->etag = u->headers_in.etag->value;

} else {
ngx_str_null(&r->cache->etag);
}

} else {
r->cache->last_modified = -1;
ngx_str_null(&r->cache->etag);
}

if (ngx_http_file_cache_set_header(r, u->buffer.start) != NGX_OK) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

} else {
u->cacheable = 0;
}
}

ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http cacheable: %d", u->cacheable);

if (u->cacheable == 0 && r->cache) {
ngx_http_file_cache_free(r->cache, u->pipe->temp_file);
}

if (r->header_only && !u->cacheable && !u->store) {
ngx_http_upstream_finalize_request(r, u, 0);
return;
}

#endif

p = u->pipe;

p->output_filter = ngx_http_upstream_output_filter;
p->output_ctx = r;
p->tag = u->output.tag;
p->bufs = u->conf->bufs;
p->busy_size = u->conf->busy_buffers_size;
p->upstream = u->peer.connection;
p->downstream = c;
p->pool = r->pool;
p->log = c->log;
p->limit_rate = u->conf->limit_rate;
p->start_sec = ngx_time();

p->cacheable = u->cacheable || u->store;

p->temp_file = ngx_pcalloc(r->pool, sizeof(ngx_temp_file_t));
if (p->temp_file == NULL) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

p->temp_file->file.fd = NGX_INVALID_FILE;
p->temp_file->file.log = c->log;
p->temp_file->path = u->conf->temp_path; //u->conf->temp_path的值在ngx_http_proxy_handler函数中赋值,赋值流程为ngx_http_proxy_merge_loc_conf->ngx_conf_merge_path_value
p->temp_file->pool = r->pool;

if (p->cacheable) {
p->temp_file->persistent = 1;

#if (NGX_HTTP_CACHE)
if (r->cache && !r->cache->file_cache->use_temp_path) {
p->temp_file->path = r->cache->file_cache->path;
p->temp_file->file.name = r->cache->file.name;
}
#endif

} else {
p->temp_file->log_level = NGX_LOG_WARN;
p->temp_file->warn = "an upstream response is buffered "
"to a temporary file";
}

p->max_temp_file_size = u->conf->max_temp_file_size;
p->temp_file_write_size = u->conf->temp_file_write_size;

#if (NGX_THREADS)
if (clcf->aio == NGX_HTTP_AIO_THREADS && clcf->aio_write) {
p->thread_handler = ngx_http_upstream_thread_handler;
p->thread_ctx = r;
}
#endif

p->preread_bufs = ngx_alloc_chain_link(r->pool);
if (p->preread_bufs == NULL) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

p->preread_bufs->buf = &u->buffer;
p->preread_bufs->next = NULL;
u->buffer.recycled = 1;

p->preread_size = u->buffer.last - u->buffer.pos;

if (u->cacheable) {

p->buf_to_file = ngx_calloc_buf(r->pool);
if (p->buf_to_file == NULL) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

p->buf_to_file->start = u->buffer.start;
p->buf_to_file->pos = u->buffer.start;
p->buf_to_file->last = u->buffer.pos;
p->buf_to_file->temporary = 1;
}

if (ngx_event_flags & NGX_USE_IOCP_EVENT) {
/* the posted aio operation may corrupt a shadow buffer */
p->single_buf = 1;
}

/* TODO: p->free_bufs = 0 if use ngx_create_chain_of_bufs() */
p->free_bufs = 1;

/*
* event_pipe would do u->buffer.last += p->preread_size
* as though these bytes were read
*/
u->buffer.last = u->buffer.pos;

if (u->conf->cyclic_temp_file) {

/*
* we need to disable the use of sendfile() if we use cyclic temp file
* because the writing a new data may interfere with sendfile()
* that uses the same kernel file pages (at least on FreeBSD)
*/

p->cyclic_temp_file = 1;
c->sendfile = 0;

} else {
p->cyclic_temp_file = 0;
}

p->read_timeout = u->conf->read_timeout;
p->send_timeout = clcf->send_timeout;
p->send_lowat = clcf->send_lowat;

p->length = -1;

if (u->input_filter_init
&& u->input_filter_init(p->input_ctx) != NGX_OK)
{
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}

u->read_event_handler = ngx_http_upstream_process_upstream;
r->write_event_handler = ngx_http_upstream_process_downstream;

ngx_http_upstream_process_upstream(r, u);
}

读取数据的具体操作在ngx_http_upstream_process_upstream函数中实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46

static void
ngx_http_upstream_process_upstream(ngx_http_request_t *r,
ngx_http_upstream_t *u)
{
ngx_event_t *rev;
ngx_event_pipe_t *p;
ngx_connection_t *c;

c = u->peer.connection;
p = u->pipe;
rev = c->read;

ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http upstream process upstream");

c->log->action = "reading upstream";

if (rev->timedout) {

p->upstream_error = 1;
ngx_connection_error(c, NGX_ETIMEDOUT, "upstream timed out");

} else { //请求没有超时,那么对后端,处理一下读事件。ngx_event_pipe开始处理

if (rev->delayed) {

ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
"http upstream delayed");

if (ngx_handle_read_event(rev, 0) != NGX_OK) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
}

return;
}

if (ngx_event_pipe(p, 0) == NGX_ABORT) {
ngx_http_upstream_finalize_request(r, u, NGX_ERROR);
return;
}
}
//注意走到这里的时候,后端发送的头部行信息已经在前面的ngx_http_upstream_send_response->ngx_http_send_header已经把头部行部分发送给客户端了
//该函数处理的只是后端放回过来的网页包体部分
ngx_http_upstream_process_request(r, u);
}

接下来分析一下ngx_event_pipe这个函数,在有buffering的时候,使用event_pipe进行数据的转发,调用ngx_event_pipe_write_to_downstream函数读取数据,或者发送数据给客户端。
ngx_event_pipe将upstream响应发送回客户端。do_write代表是否要往客户端发送,写数据。如果设置了,那么会先发给客户端,再读upstream数据,当然,如果读取了数据,也会调用这里的。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
ngx_int_t
ngx_event_pipe(ngx_event_pipe_t *p, ngx_int_t do_write)
{
ngx_int_t rc;
ngx_uint_t flags;
ngx_event_t *rev, *wev;

for ( ;; ) {
if (do_write) {
p->log->action = "sending to client";

rc = ngx_event_pipe_write_to_downstream(p);

if (rc == NGX_ABORT) {
return NGX_ABORT;
}

if (rc == NGX_BUSY) {
return NGX_OK;
}
}

p->read = 0;
p->upstream_blocked = 0;

p->log->action = "reading upstream";

if (ngx_event_pipe_read_upstream(p) == NGX_ABORT) {
return NGX_ABORT;
}

if (!p->read && !p->upstream_blocked) {
break;
}

do_write = 1;
}

if (p->upstream->fd != (ngx_socket_t) -1) {
rev = p->upstream->read;

flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0;

if (ngx_handle_read_event(rev, flags) != NGX_OK) {
return NGX_ABORT;
}

if (!rev->delayed) {
if (rev->active && !rev->ready) {
ngx_add_timer(rev, p->read_timeout);

} else if (rev->timer_set) {
ngx_del_timer(rev);
}
}
}

if (p->downstream->fd != (ngx_socket_t) -1
&& p->downstream->data == p->output_ctx)
{
wev = p->downstream->write;
if (ngx_handle_write_event(wev, p->send_lowat) != NGX_OK) {
return NGX_ABORT;
}

if (!wev->delayed) {
if (wev->active && !wev->ready) {
ngx_add_timer(wev, p->send_timeout);

} else if (wev->timer_set) {
ngx_del_timer(wev);
}
}
}

return NGX_OK;
}

这个函数中最重要的就是ngx_event_pipe_write_to_downstream跟ngx_event_pipe_read_upstream,这两个函数将处理来自上游的数据以及将数据转发到客户端。

Nginx反向代理详解

1. 相关配置

Nginx实现反向代理功能主要由proxy_pass、upstream指令实现,配置指令如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17

http {
...
upstream proxy {
server 127.0.0.1:8080
}

...
server {
...
location / {
proxy_pass http://proxy
}
...
}
}

具体配置可以参考ngx_http_proxy_module

2. 源码解析

Nginx实现反向代理功能的源码在src/http/modules/ngx_http_proxy_module.c,因为反向代理模块是一种upstream模块,所以还有一些基础代码在src/http/ngx_http_upstream.c中

2.1 入口函数

反向代理模块的入口函数是ngx_http_proxy_handler,我们看一下源码

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
static ngx_int_t
ngx_http_proxy_handler(ngx_http_request_t *r)
{
ngx_int_t rc;
ngx_http_upstream_t *u;
ngx_http_proxy_ctx_t *ctx;
ngx_http_proxy_loc_conf_t *plcf;
#if (NGX_HTTP_CACHE)
ngx_http_proxy_main_conf_t *pmcf;
#endif

if (ngx_http_upstream_create(r) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}

ctx = ngx_pcalloc(r->pool, sizeof(ngx_http_proxy_ctx_t));
if (ctx == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}

ngx_http_set_ctx(r, ctx, ngx_http_proxy_module);

plcf = ngx_http_get_module_loc_conf(r, ngx_http_proxy_module);

u = r->upstream;

if (plcf->proxy_lengths == NULL) {
ctx->vars = plcf->vars;
u->schema = plcf->vars.schema;
#if (NGX_HTTP_SSL)
u->ssl = (plcf->upstream.ssl != NULL);
#endif

} else {
if (ngx_http_proxy_eval(r, ctx, plcf) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
}

u->output.tag = (ngx_buf_tag_t) &ngx_http_proxy_module;

u->conf = &plcf->upstream;

#if (NGX_HTTP_CACHE)
pmcf = ngx_http_get_module_main_conf(r, ngx_http_proxy_module);

u->caches = &pmcf->caches;
u->create_key = ngx_http_proxy_create_key;
#endif

u->create_request = ngx_http_proxy_create_request;
u->reinit_request = ngx_http_proxy_reinit_request;
u->process_header = ngx_http_proxy_process_status_line;
u->abort_request = ngx_http_proxy_abort_request;
u->finalize_request = ngx_http_proxy_finalize_request;
r->state = 0;

if (plcf->redirects) {
u->rewrite_redirect = ngx_http_proxy_rewrite_redirect;
}

if (plcf->cookie_domains || plcf->cookie_paths) {
u->rewrite_cookie = ngx_http_proxy_rewrite_cookie;
}

u->buffering = plcf->upstream.buffering;

u->pipe = ngx_pcalloc(r->pool, sizeof(ngx_event_pipe_t));
if (u->pipe == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}

u->pipe->input_filter = ngx_http_proxy_copy_filter;
u->pipe->input_ctx = r;

u->input_filter_init = ngx_http_proxy_input_filter_init;
u->input_filter = ngx_http_proxy_non_buffered_copy_filter;
u->input_filter_ctx = r;

u->accel = 1;

if (!plcf->upstream.request_buffering
&& plcf->body_values == NULL && plcf->upstream.pass_request_body
&& (!r->headers_in.chunked
|| plcf->http_version == NGX_HTTP_VERSION_11))
{
r->request_body_no_buffering = 1;
}

rc = ngx_http_read_client_request_body(r, ngx_http_upstream_init);

if (rc >= NGX_HTTP_SPECIAL_RESPONSE) {
return rc;
}

return NGX_DONE;
}
  1. 该函数首先调用ngx_http_upstream_create函数从内存池中创建ngx_http_upstream_s结构体;
1
2
3
if (ngx_http_upstream_create(r) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
  1. 接下来初始化ngx_http_upstream_s各成员,应当注意ngx_http_upstream_s结构体的几个回调函数,这个是实现反向代理的重要功能
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
u->create_request = ngx_http_proxy_create_request;
u->reinit_request = ngx_http_proxy_reinit_request;
u->process_header = ngx_http_proxy_process_status_line;
u->abort_request = ngx_http_proxy_abort_request;
u->finalize_request = ngx_http_proxy_finalize_request;
r->state = 0;

if (plcf->redirects) {
u->rewrite_redirect = ngx_http_proxy_rewrite_redirect;
}

if (plcf->cookie_domains || plcf->cookie_paths) {
u->rewrite_cookie = ngx_http_proxy_rewrite_cookie;
}

u->buffering = plcf->upstream.buffering;

u->pipe = ngx_pcalloc(r->pool, sizeof(ngx_event_pipe_t));
if (u->pipe == NULL) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}

u->pipe->input_filter = ngx_http_proxy_copy_filter;
u->pipe->input_ctx = r;

u->input_filter_init = ngx_http_proxy_input_filter_init;
u->input_filter = ngx_http_proxy_non_buffered_copy_filter;
u->input_filter_ctx = r;

u->accel = 1;

2.2 绑定入口函数

  1. nginx在解析配置文件时,遇到proxy_pass指令时将上述的入口函数绑定到content阶段的handler上,解析proxy_pass执行后的参数,确定上游服务器,那么当请求到达时,进入到入口函数。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
static char *
ngx_http_proxy_pass(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
ngx_http_proxy_loc_conf_t *plcf = conf;

size_t add;
u_short port;
ngx_str_t *value, *url;
ngx_url_t u;
ngx_uint_t n;
ngx_http_core_loc_conf_t *clcf;
ngx_http_script_compile_t sc;

if (plcf->upstream.upstream || plcf->proxy_lengths) {
return "is duplicate";
}

clcf = ngx_http_conf_get_module_loc_conf(cf, ngx_http_core_module);

clcf->handler = ngx_http_proxy_handler;

if (clcf->name.len && clcf->name.data[clcf->name.len - 1] == '/') {
clcf->auto_redirect = 1;
}

value = cf->args->elts;

url = &value[1];

// 计算变量数
n = ngx_http_script_variables_count(url);

if (n) {

ngx_memzero(&sc, sizeof(ngx_http_script_compile_t));

sc.cf = cf;
sc.source = url;
sc.lengths = &plcf->proxy_lengths;
sc.values = &plcf->proxy_values;
sc.variables = n;
sc.complete_lengths = 1;
sc.complete_values = 1;

if (ngx_http_script_compile(&sc) != NGX_OK) {
return NGX_CONF_ERROR;
}

#if (NGX_HTTP_SSL)
plcf->ssl = 1;
#endif

return NGX_CONF_OK;
}

if (ngx_strncasecmp(url->data, (u_char *) "http://", 7) == 0) {
add = 7;
port = 80;

} else if (ngx_strncasecmp(url->data, (u_char *) "https://", 8) == 0) {

#if (NGX_HTTP_SSL)
plcf->ssl = 1;

add = 8;
port = 443;
#else
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"https protocol requires SSL support");
return NGX_CONF_ERROR;
#endif

} else {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0, "invalid URL prefix");
return NGX_CONF_ERROR;
}

ngx_memzero(&u, sizeof(ngx_url_t));

u.url.len = url->len - add;
u.url.data = url->data + add;
u.default_port = port;
u.uri_part = 1;
u.no_resolve = 1;

// 根据proxy_pass指令后面的值,查找upstream块
plcf->upstream.upstream = ngx_http_upstream_add(cf, &u, 0);
if (plcf->upstream.upstream == NULL) {
return NGX_CONF_ERROR;
}

plcf->vars.schema.len = add;
plcf->vars.schema.data = url->data;
plcf->vars.key_start = plcf->vars.schema;

ngx_http_proxy_set_vars(&u, &plcf->vars);

plcf->location = clcf->name;

if (clcf->named
#if (NGX_PCRE)
|| clcf->regex
#endif
|| clcf->noname)
{
if (plcf->vars.uri.len) {
ngx_conf_log_error(NGX_LOG_EMERG, cf, 0,
"\"proxy_pass\" cannot have URI part in "
"location given by regular expression, "
"or inside named location, "
"or inside \"if\" statement, "
"or inside \"limit_except\" block");
return NGX_CONF_ERROR;
}

plcf->location.len = 0;
}

plcf->url = *url;

return NGX_CONF_OK;
}


3. 注意事项

  1. 对session的处理,nginx默认使用轮询,若某个IP的请求被代理到A上,这个IP的下一个请求可能会被代理到B上,这样就会有问题,可考虑使用ip hash

Nginx日志详解

1. 相关配置

Nginx日志系统由两条指令开启error_log、access_log

1
2
3
4
5
6
7
8
error_log  logs/error.log debug;

http {
...
access_log logs/access.log main;
...
}

具体配置可以参考error_logaccess_log

2. 源码解析

首先来看一下关于日志的几个宏定义

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
filename:log.h
#if (NGX_HAVE_C99_VARIADIC_MACROS)

#define NGX_HAVE_VARIADIC_MACROS 1

#define ngx_log_error(level, log, ...) \
if ((log)->log_level >= level) ngx_log_error_core(level, log, __VA_ARGS__)

void ngx_log_error_core(ngx_uint_t level, ngx_log_t *log, ngx_err_t err,
const char *fmt, ...);

#define ngx_log_debug(level, log, ...) \
if ((log)->log_level & level) \
ngx_log_error_core(NGX_LOG_DEBUG, log, __VA_ARGS__)

/*********************************/

#elif (NGX_HAVE_GCC_VARIADIC_MACROS)

#define NGX_HAVE_VARIADIC_MACROS 1

#define ngx_log_error(level, log, args...) \
if ((log)->log_level >= level) ngx_log_error_core(level, log, args)

void ngx_log_error_core(ngx_uint_t level, ngx_log_t *log, ngx_err_t err,
const char *fmt, ...);

#define ngx_log_debug(level, log, args...) \
if ((log)->log_level & level) \
ngx_log_error_core(NGX_LOG_DEBUG, log, args)

/*********************************/

#else /* no variadic macros */

#define NGX_HAVE_VARIADIC_MACROS 0

void ngx_cdecl ngx_log_error(ngx_uint_t level, ngx_log_t *log, ngx_err_t err,
const char *fmt, ...);
void ngx_log_error_core(ngx_uint_t level, ngx_log_t *log, ngx_err_t err,
const char *fmt, va_list args);
void ngx_cdecl ngx_log_debug_core(ngx_log_t *log, ngx_err_t err,
const char *fmt, ...);


#endif /* variadic macros */

从上面的定义可以看出,实现日志功能的核心在于ngx_log_error_core函数,接下来我们就来看看这个函数的实现

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#if (NGX_HAVE_VARIADIC_MACROS)

void
ngx_log_error_core(ngx_uint_t level, ngx_log_t *log, ngx_err_t err,
const char *fmt, ...)

#else

void
ngx_log_error_core(ngx_uint_t level, ngx_log_t *log, ngx_err_t err,
const char *fmt, va_list args)

#endif
{
#if (NGX_HAVE_VARIADIC_MACROS)
va_list args;
#endif
u_char *p, *last, *msg;
ssize_t n;
ngx_uint_t wrote_stderr, debug_connection;
u_char errstr[NGX_MAX_ERROR_STR];

last = errstr + NGX_MAX_ERROR_STR;

p = ngx_cpymem(errstr, ngx_cached_err_log_time.data,
ngx_cached_err_log_time.len);

p = ngx_slprintf(p, last, " [%V] ", &err_levels[level]);

/* pid#tid */
p = ngx_slprintf(p, last, "%P#" NGX_TID_T_FMT ": ",
ngx_log_pid, ngx_log_tid);

if (log->connection) {
p = ngx_slprintf(p, last, "*%uA ", log->connection);
}

msg = p;

#if (NGX_HAVE_VARIADIC_MACROS)

va_start(args, fmt);
p = ngx_vslprintf(p, last, fmt, args);
va_end(args);

#else

p = ngx_vslprintf(p, last, fmt, args);

#endif

if (err) {
p = ngx_log_errno(p, last, err);
}

if (level != NGX_LOG_DEBUG && log->handler) {
p = log->handler(log, p, last - p);
}

if (p > last - NGX_LINEFEED_SIZE) {
p = last - NGX_LINEFEED_SIZE;
}

ngx_linefeed(p);

wrote_stderr = 0;
debug_connection = (log->log_level & NGX_LOG_DEBUG_CONNECTION) != 0;

while (log) {

if (log->log_level < level && !debug_connection) {
break;
}

if (log->writer) {
log->writer(log, level, errstr, p - errstr);
goto next;
}

if (ngx_time() == log->disk_full_time) {

/*
* on FreeBSD writing to a full filesystem with enabled softupdates
* may block process for much longer time than writing to non-full
* filesystem, so we skip writing to a log for one second
*/

goto next;
}

n = ngx_write_fd(log->file->fd, errstr, p - errstr);

if (n == -1 && ngx_errno == NGX_ENOSPC) {
log->disk_full_time = ngx_time();
}

if (log->file->fd == ngx_stderr) {
wrote_stderr = 1;
}

next:

log = log->next;
}

if (!ngx_use_stderr
|| level > NGX_LOG_WARN
|| wrote_stderr)
{
return;
}

msg -= (7 + err_levels[level].len + 3);

(void) ngx_sprintf(msg, "nginx: [%V] ", &err_levels[level]);

(void) ngx_write_console(ngx_stderr, msg, p - msg);
}
  1. 从代码中可以看出,nginx的作者对于变参处理还是很细心的,把各种情况都考虑到了,在处理好变参定义之后,就开始初始化字符串格式:
1
2
3
4
5
6
7
8
9
10
11
12
13
14

p = ngx_cpymem(errstr, ngx_cached_err_log_time.data,
ngx_cached_err_log_time.len);

p = ngx_slprintf(p, last, " [%V] ", &err_levels[level]);

/* pid#tid */
p = ngx_slprintf(p, last, "%P#" NGX_TID_T_FMT ": ",
ngx_log_pid, ngx_log_tid);

if (log->connection) {
p = ngx_slprintf(p, last, "*%uA ", log->connection);
}

  1. 处理变参参数,将变参的值复制到日志串中:
1
2
3
4
5
6
7
8
9
10
11
#if (NGX_HAVE_VARIADIC_MACROS)

va_start(args, fmt);
p = ngx_vslprintf(p, last, fmt, args);
va_end(args);

#else

p = ngx_vslprintf(p, last, fmt, args);

#endif
  1. 处理错误号跟换行:
1
2
3
4
5
6
7
8
9
10
11
12
13
if (err) {
p = ngx_log_errno(p, last, err);
}

if (level != NGX_LOG_DEBUG && log->handler) {
p = log->handler(log, p, last - p);
}

if (p > last - NGX_LINEFEED_SIZE) {
p = last - NGX_LINEFEED_SIZE;
}
ngx_linefeed(p);

  1. 最后就是将日志串输出到文件或者标准错误流

3. 总结

Nginx模块详解

1. Nginx模块综述

nginx的模块非常之多,可以认为所有代码都是以模块的形式组织,这包括核心模块和功能模块,针对不同的应用场合,并非所有的功能模块都要被用到,附录A给出的是默认configure(即简单的http服务器应用)下被连接的模块,这里虽说是模块连接,但nginx不会像apache或lighttpd那样在编译时生成so动态库而在程序执行时再进行动态加载,nginx模块源文件会在生成nginx时就直接被编译到其二进制执行文件中,所以如果要选用不同的功能模块,必须对nginx做重新配置和编译。对于功能模块的选择,如果要修改默认值,需要在进行configure时进行指定,比如新增http_flv功能模块(默认是没有这个功能的,各个选项的默认值可以在文件auto/options内看到):

1
[root@localhost nginx-1.2.0]# ./configure --with-http_flv_module

执行后,生成的objs/ngx_modules.c文件内就包含有对ngx_http_flv_module模块的引用了,要再去掉http_flv功能模块,则需要重新configure,即不带–with-http_flv_module配置后再编译生成新的nginx执行程序。通过执行./configure –help,我们可以看到更多的配置选项。
虽然Nginx模块有很多,并且每个模块实现的功能各不相同,但是根据模块的功能性质,可以将它们分为四个类别:
1, handlers:处理客户端请求并产生待响应内容,比如ngx_http_static_module模块,负责客户端的静态页面请求处理并将对应的磁盘文件准备为响应内容输出。
2, filters:对handlers产生的响应内容做各种过滤处理(即是增删改),比如模块ngx_http_not_modified_filter_module,对待响应内容进行过滤检测,如果通过时间戳判断出前后两次请求的响应内容没有发生任何改变,那么可以直接响应 “304 Not Modified”状态标识,让客户端使用缓存即可,而原本待发送的响应内容将被清除掉。
3, upstream:如果存在后端真实服务器,nginx可利用upstream模块充当反向代理(Proxy)的角色,对客户端发起的请求只负责进行转发(当然也包括后端真实服务器响应的回转),比如ngx_http_proxy_module就为标准的代理模块。
4, load-balance:在nginx充当中间代理时,由于后端真实服务器往往多于一个,对于某一次客户端的请求,如何选择对应的后端真实服务器来进行处理,这就有类似于ngx_http_upstream_ip_hash_module这样的模块来实现不同的负载均衡算法(Load Balance)。
对于这几类模块,我们马上会分别进行详细介绍并分析各自典型代表模块,不过在此之前先从nginx模块源码上来进行直观认识。前面讲过nginx的所有代码都是以模块形式进行组织,而封装nginx模块的结构体为ngx_module_s,定义如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
Filename : ngx_conf_file.h
struct ngx_module_s {
ngx_uint_t ctx_index; //当前模块在同类模块中的序号
ngx_uint_t index; //当前模块在所有模块中的序号

ngx_uint_t version; //当前模块版本号

void *ctx; //指向当前模块特有的数据
ngx_command_t *commands; //指向当前模块配置项解析数组
ngx_uint_t type; //模块类型
//以下为模块回调函数,回调时机可根据函数名看出
ngx_int_t (*init_master)(ngx_log_t *log);

};
Filename : ngx_core.h
typedef struct ngx_module_s ngx_module_t;

结构体ngx_module_s值得关注的几个字段分别为ctx、commands、type,其中commands字段表示当前模块可以解析的配置项目,这在配置文件解析一章做过详细描述;表示模块类型的type值只有5种可能的值,而同一类型模块的ctx指向的数据类型也相同:

序号 type值 ctx指向数据类型
1 NGX_CORE_MODULE ngx_core_module_t
2 NGX_EVENT_MODULE ngx_event_module_t
3 NGX_CONF_MODULE NULL
4 NGX_HTTP_MODULE ngx_http_module_t
5 NGX_MAIL_MODULE ngx_mail_module_t

上表中第三列里的数据类型非常重要,它们的字段基本都是一些回调函数,这些回调函数会在其模块对应的配置文件解析过程前/中/后会适时的被调用,做一些内存准备、初始化、配置值检查、初始值填充与合并、回调函数挂载等初始工作,以ngx_http_core_module模块为例,该模块type类型为NGX_HTTP_MODULE,ctx指向的ngx_http_module_t结构体变量ngx_http_core_module_ctx:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
Filename : ngx_http_core_module.c
static ngx_http_module_t ngx_http_core_module_ctx = {
ngx_http_core_preconfiguration, /* preconfiguration */
NULL, /* postconfiguration */

ngx_http_core_create_main_conf, /* create main configuration */
ngx_http_core_init_main_conf, /* init main configuration */

ngx_http_core_create_srv_conf, /* create server configuration */
ngx_http_core_merge_srv_conf, /* merge server configuration */

ngx_http_core_create_loc_conf, /* create location configuration */
ngx_http_core_merge_loc_conf /* merge location configuration */
};

根据上面代码注释,可以很明显的看出各个回调函数的回调时机,比如函数*ngx_http_core_preconfiguration()将在进行http块配置解析前被调用,所以在ngx_http_block()*函数里可以看到这样的代码:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
Filename : ngx_http.c
static char *
ngx_http_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)

if (module->preconfiguration) {
if (module->preconfiguration(cf) != NGX_OK) {
return NGX_CONF_ERROR;
}
}

rv = ngx_conf_parse(cf, NULL);

if (module->postconfiguration) {
if (module->postconfiguration(cf) != NGX_OK) {
return NGX_CONF_ERROR;
}
}


至于这些回调函数内的具体逻辑,如前所述一般是一些初始或默认值填充工作,但也有回调函数挂载的设置,比如ngx_http_static_module模块的postconfiguration字段回调函数ngx_http_static_init()就是将自己的处理函数ngx_http_static_handler()挂载在http处理状态机上,但总体来看这毕竟都只是一些简单的初始准备工作,值得一提的还有ngx_http_core_create_main_conf、ngx_http_core_create_srv_conf、ngx_http_core_create_loc_conf这三个回调函数用来创建存储位于http块、server块、location块配置项的内存。

参考文献:

nginx核心讲解

Dos常用命令

sc

sc命令可用于卸载windows服务,如下所示:

sc delete serv

其中serv为服务名,可在服务中查看

xcopy

xcopy命令用于Dos下复制文件夹及子文件夹,如下所示:

xcopy d:\abc\aaa\*.* c:\windows\system /s

其中第一个参数是源位置,第二个参数是目标位置
/s:复制目录和子目录,除了空的。

taskkill

taskkill命令用于杀进程,可根据pid,也可根据进程名。

taskkill /im nginx.exe /f
taskkill /pid {pid}

中国菜翻译方法

以主料为主

配料或配汁为辅的翻译原则
菜肴的主料和配料

主料(名称/形状)+ with + 配料

如:松仁香菇 Chinese Mushrooms with Pine Nuts

菜肴的主料和配汁

主料 + with / in + 汤汁(Sauce)

如:冰梅凉瓜 Bitter Melon in Plum Sauc

以烹制方法为主

原料为辅的翻译原则
菜肴的做法和主料

做法(动词过去分词)+ 主料(名称/形状)

如:拌双耳 Tossed Black and White Fungus

菜肴的做法、主料和汤汁

做法(动词过去分词) + 主料(名称/形状)+ with / in + 汤汁

如:京酱肉丝 Sautéed Shredded Pork in Sweet Bean Sauce

以形状、口感为主

原料为辅的翻译原则
菜肴形状或口感以及主配料

形状/口感 + 主料

如:玉兔馒头 Rabbit-Shaped Mantou
菜肴的做法、形状或口感、做法以及主配料

做法(动词过去分词)+ 形状/口感 + 主料 + 配料

如: 小炒黑山羊 Sautéed Sliced Lamb with Pepper and Parsley