nginx upstream模块详解(处理流程篇二 upstream与event_pipe交互)
2018-03-15 12:13
681 查看
ngx_event_pipe 提供了upstream对上游服务器返回包体数据 同时能做将包体数据发送请求端
ngx_event_pipe具体的结构在点击打开链接
ngx_event_pipe函数负责在upstream包体数据处理过程中读取上游服务器包体数据 并且在处理上游包体数据的过程中 发送到请求端 这种处理流程经过测试验证 的确如此。
提供给upstream模块服务的函数只有ngx_event_pipe 其内置处理的函数有:
ngx_event_pipe_read_upstream 负责读取上游返回的包体 ngx_event_pipe_write_to_downstream负责将包体发送到请求端
ngx_event_pipe_write_chain_to_temp_file 会将包体的数据逐步写入到一个临时文件 这里的临时文件到后面被用作缓存文件
ngx_event_pipe_remove_shadow_links
ngx_event_pipe_drain_chains
ngx_event_pipe函数处理过程ngx_int_t
ngx_event_pipe(ngx_event_pipe_t *p, ngx_int_t do_write)
{
...
for ( ;; ) {
if (do_write) {
p->log->action = "sending to client";
rc = ngx_event_pipe_write_to_downstream(p); //写标记 包体数据写入到下游请求端
if (rc == NGX_ABORT) {
return NGX_ABORT;
}
if (rc == NGX_BUSY) { //来不及处理
return NGX_OK;
}
}
p->read = 0;
p->upstream_blocked = 0;
p->log->action = "reading upstream";
if (ngx_event_pipe_read_upstream(p) == NGX_ABORT) { //执行上游包体数据读取处理
return NGX_ABORT;
}
if (!p->read && !p->upstream_blocked) { //在上游包体读取未阻塞状态下 没有读取到数据 跳出
break;
}
do_write = 1; //包体读取一般先执行
}
if (p->upstream->fd != (ngx_socket_t) -1) { //对上游连接的socket是有效的
rev = p->upstream->read;
flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; //上游包体读取出错或者没有数据可读 事件会被清理 否则什么也不做
if (ngx_handle_read_event(rev, flags) != NGX_OK) { //满足flag标记对读事件的处理
return NGX_ABORT;
}
... //添加定时器
}
if (p->downstream->fd != (ngx_socket_t) -1
&& p->downstream->data == p->output_ctx) //同上
{
wev = p->downstream->write;
if (ngx_handle_write_event(wev, p->send_lowat) != NGX_OK) { //对低潮值进行发送(如果有) 同时会对请求端的写事件进行处理
return NGX_ABORT;
}
... //定时器
}
return NGX_OK;
}nginx_event_pipe_read_upstream 处理说明static ngx_int_t
ngx_event_pipe_read_upstream(ngx_event_pipe_t *p)
{
...
if (p->upstream_eof || p->upstream_error || p->upstream_done) {
return NGX_OK;
}
#if (NGX_THREADS)
...
#endif
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe read upstream: %d", p->upstream->read->ready);
for ( ;; ) {
if (p->upstream_eof || p->upstream_error || p->upstream_done) {
break;
}
if (p->preread_bufs == NULL && !p->upstream->read->ready) {
break;
}
if (p->preread_bufs) { //pre-read 预读取buffer (包含了缓存header头部和key信息 以及http头) 实际由u->buffer传入
/* use the pre-read bufs if they exist */
chain = p->preread_bufs;
p->preread_bufs = NULL;
n = p->preread_size;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe preread: %z", n);
if (n) { //已经读取了数据read标记
p->read = 1;
}
} else {
#if (NGX_HAVE_KQUEUE)
/*
* kqueue notifies about the end of file or a pending error.
* This test allows not to allocate a buf on these conditions
* and not to call c->recv_chain().
*/
if (p->upstream->read->available == 0
&& p->upstream->read->pending_eof)
{
p->upstream->read->ready = 0;
p->upstream->read->eof = 1;
p->upstream_eof = 1;
p->read = 1;
if (p->upstream->read->kq_errno) {
p->upstream->read->error = 1;
p->upstream_error = 1;
p->upstream_eof = 0;
ngx_log_error(NGX_LOG_ERR, p->log,
p->upstream->read->kq_errno,
"kevent() reported that upstream "
"closed connection");
}
break;
}
#endif
if (p->limit_rate) { //有限速设置
if (p->upstream->read->delayed) {
break;
}
limit = (off_t) p->limit_rate * (ngx_time() - p->start_sec + 1)
- p->read_length;
if (limit <= 0) {
p->upstream->read->delayed = 1;
delay = (ngx_msec_t) (- limit * 1000 / p->limit_rate + 1); //设置上游读取延时并加入到定时器
ngx_add_timer(p->upstream->read, delay);
break;
}
} else {
limit = 0; //
}
if (p->free_raw_bufs) {
/* use the free bufs if they exist */
chain = p->free_raw_bufs;
if (p->single_buf) { //只有一个buffer
p->free_raw_bufs = p->free_raw_bufs->next;
chain->next = NULL;
} else {
p->free_raw_bufs = NULL;
}
} else if (p->allocated < p->bufs.num) { //已经分配的buffer数量没有超过配置的值 配置的数量的缓冲尽可能地申请
/* allocate a new buf if it's still allowed */
b = ngx_create_temp_buf(p->pool, p->bufs.size);
if (b == NULL) {
return NGX_ABORT;
}
p->allocated++;
chain = ngx_alloc_chain_link(p->pool);
if (chain == NULL) {
return NGX_ABORT;
}
chain->buf = b;
chain->next = NULL;
} else if (!p->cacheable
&& p->downstream->data == p->output_ctx
&& p->downstream->write->ready
&& !p->downstream->write->delayed)
{
/*
* if the bufs are not needed to be saved in a cache and
* a downstream is ready then write the bufs to a downstream
*/
p->upstream_blocked = 1;
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe downstream ready");
break;
} else if (p->cacheable
|| p->temp_file->offset < p-&g
4000
t;max_temp_file_size) //满足可缓冲的条件
{
/*
* if it is allowed, then save some bufs from p->in
* to a temporary file, and add them to a p->out chain
*/
rc = ngx_event_pipe_write_chain_to_temp_file(p); //响应包体写入临时文件
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe temp offset: %O", p->temp_file->offset);
if (rc == NGX_BUSY) {
break;
}
if (rc != NGX_OK) {
return rc;
}
chain = p->free_raw_bufs;
if (p->single_buf) {
p->free_raw_bufs = p->free_raw_bufs->next;
chain->next = NULL;
} else {
p->free_raw_bufs = NULL;
}
} else {
/* there are no bufs to read in */ //没有buffer缓冲去读取数据了
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"no pipe bufs to read in");
break;
}
n = p->upstream->recv_chain(p->upstream, chain, limit); //开始接收上游包体数据
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe recv chain: %z", n);
if (p->free_raw_bufs) {
chain->next = p->free_raw_bufs;
}
p->free_raw_bufs = chain;
if (n == NGX_ERROR) { //读取包体出错
p->upstream_error = 1;
break;
}
if (n == NGX_AGAIN) {
if (p->single_buf) { //只有一个buffer 移除掉buffer的shadow_link
ngx_event_pipe_remove_shadow_links(chain->buf);
}
break;
}
p->read = 1; //读取标记设置为真
if (n == 0) { //没有数据可读 upstream_eof标记 并且跳出
p->upstream_eof = 1;
break;
}
}
delay = p->limit_rate ? (ngx_msec_t) n * 1000 / p->limit_rate : 0; //根据读取的字节数及配置的值设置读取延时
p->read_length += n; //读取包体长度更新
cl = chain;
p->free_raw_bufs = NULL;
while (cl && n > 0) { //有效数据长度 这里要先移除掉buffer的shadow_link
ngx_event_pipe_remove_shadow_links(cl->buf);
size = cl->buf->end - cl->buf->last; //buffer可用字节数
if (n >= size) { //buffer不够用
cl->buf->last = cl->buf->end;
/* STUB */ cl->buf->num = p->num++; //stub信息更新
if (p->input_filter(p, cl->buf) == NGX_ERROR) {
return NGX_ABORT;
}
n -= size;
ln = cl;
cl = cl->next;
ngx_free_chain(p->pool, ln); //缓冲chain释放
} else {
cl->buf->last += n;
n = 0;
}
}
if (cl) {
for (ln = cl; ln->next; ln = ln->next) { /* void */ } //取得cl最尾部buffer chain
ln->next = p->free_raw_bufs;
p->free_raw_bufs = cl;
}
if (delay > 0) { //有设置延时 则加入到定时器中
p->upstream->read->delayed = 1;
ngx_add_timer(p->upstream->read, delay);
break;
}
}
#if (NGX_DEBUG)
... //buffer chain缓冲信息打印
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe length: %O", p->length);
#endif
if (p->free_raw_bufs && p->length != -1) { //p->length 代表的是剩余包体的长度
cl = p->free_raw_bufs;
if (cl->buf->last - cl->buf->pos >= p->length) {
p->free_raw_bufs = cl->next;
/* STUB */ cl->buf->num = p->num++;
if (p->input_filter(p, cl->buf) == NGX_ERROR) { //包体的input_filter处理
return NGX_ABORT;
}
ngx_free_chain(p->pool, cl);
}
}
if (p->length == 0) { //包体数据读取完成
p->upstream_done = 1;
p->read = 1;
}
if ((p->upstream_eof || p->upstream_error) && p->free_raw_bufs) { //在没有数据可读或者读取出错情况下 对数据进行处理
/* STUB */ p->free_raw_bufs->buf->num = p->num++;
if (p->input_filter(p, p->free_raw_bufs->buf) == NGX_ERROR) { //
return NGX_ABORT;
}
p->free_raw_bufs = p->free_raw_bufs->next;
if (p->free_bufs && p->buf_to_file == NULL) { //有空闲的buffer同时写入文件的buffer不存在时
for (cl = p->free_raw_bufs; cl; cl = cl->next) {
if (cl->buf->shadow == NULL) { //清除没有shadow的buffer
ngx_pfree(p->pool, cl->buf->start);
}
}
}
}
if (p->cacheable && (p->in || p->buf_to_file)) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write chain");
rc = ngx_event_pipe_write_chain_to_temp_file(p);
if (rc != NGX_OK) {
return rc;
}
}
return NGX_OK;
}ngx_event_pipe_write_to_downstream处理说明
static ngx_int_t
ngx_event_pipe_write_to_downstream(ngx_event_pipe_t *p)
{
...
downstream = p->downstream;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write downstream: %d", downstream->write->ready);
#if (NGX_THREADS)
if (p->writing) {
rc = ngx_event_pipe_write_chain_to_temp_file(p);
if (rc == NGX_ABORT) {
return NGX_ABORT;
}
}
#endif
flushed = 0;
for ( ;; ) {
if (p->downstream_error) { //往请求端发送出错
return ngx_event_pipe_drain_chains(p); //busy, out, in 三个缓冲chain释放 同时释放shadow缓冲并将空闲的buffer加入到pipe中
}
if (p->upstream_eof || p->upstream_error || p->upstream_done) { //满足上游包体数据读取"完成条件"
/* pass the p->out and p->in chains to the output filter */
for (cl = p->busy; cl; cl = cl->next) {
cl->buf->recycled = 0;
}
if (p->out) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write downstream flush out");
for (cl = p->out; cl; cl = cl->next) {
cl->buf->recycled = 0;
}
rc = p->output_filter(p->output_ctx, p->out); //响应到请求段的包体putput_filter过滤处理
if (rc == NGX_ERROR) { //写入到请求端出错
p->downstream_error = 1;
return ngx_event_pipe_drain_chains(p);
}
p->out = NULL;
}
if (p->writing) { //还有往请求端写入的缓冲链
break;
}
if (p->in) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write downstream flush in");
for (cl = p->in; cl; cl = cl->next) {
cl->buf->recycled = 0;
}
rc = p->output_filter(p->output_ctx, p->in); //upstream模块的output_filter会把数据发送给请求端
if (rc == NGX_ERROR) {
p->downstream_error = 1;
return ngx_event_pipe_drain_chains(p); ... } } }
ngx_event_pipe先处理上游包体数据 这里的上游包体数据在event_pipe功能中由ngx_event_pipe_read_upstream负责处理;在处理读取上游包体数据的同时,会得到向下游请求端写入响应包体的处理,实际写入到下游请求端的功能是由ngx_event_pipe_write_to_downstream来完成。
下面是ngx_event_pipe处理流程的图解说明
ngx_event_pipe具体的结构在点击打开链接
ngx_event_pipe函数负责在upstream包体数据处理过程中读取上游服务器包体数据 并且在处理上游包体数据的过程中 发送到请求端 这种处理流程经过测试验证 的确如此。
提供给upstream模块服务的函数只有ngx_event_pipe 其内置处理的函数有:
ngx_event_pipe_read_upstream 负责读取上游返回的包体 ngx_event_pipe_write_to_downstream负责将包体发送到请求端
ngx_event_pipe_write_chain_to_temp_file 会将包体的数据逐步写入到一个临时文件 这里的临时文件到后面被用作缓存文件
ngx_event_pipe_remove_shadow_links
ngx_event_pipe_drain_chains
ngx_event_pipe函数处理过程ngx_int_t
ngx_event_pipe(ngx_event_pipe_t *p, ngx_int_t do_write)
{
...
for ( ;; ) {
if (do_write) {
p->log->action = "sending to client";
rc = ngx_event_pipe_write_to_downstream(p); //写标记 包体数据写入到下游请求端
if (rc == NGX_ABORT) {
return NGX_ABORT;
}
if (rc == NGX_BUSY) { //来不及处理
return NGX_OK;
}
}
p->read = 0;
p->upstream_blocked = 0;
p->log->action = "reading upstream";
if (ngx_event_pipe_read_upstream(p) == NGX_ABORT) { //执行上游包体数据读取处理
return NGX_ABORT;
}
if (!p->read && !p->upstream_blocked) { //在上游包体读取未阻塞状态下 没有读取到数据 跳出
break;
}
do_write = 1; //包体读取一般先执行
}
if (p->upstream->fd != (ngx_socket_t) -1) { //对上游连接的socket是有效的
rev = p->upstream->read;
flags = (rev->eof || rev->error) ? NGX_CLOSE_EVENT : 0; //上游包体读取出错或者没有数据可读 事件会被清理 否则什么也不做
if (ngx_handle_read_event(rev, flags) != NGX_OK) { //满足flag标记对读事件的处理
return NGX_ABORT;
}
... //添加定时器
}
if (p->downstream->fd != (ngx_socket_t) -1
&& p->downstream->data == p->output_ctx) //同上
{
wev = p->downstream->write;
if (ngx_handle_write_event(wev, p->send_lowat) != NGX_OK) { //对低潮值进行发送(如果有) 同时会对请求端的写事件进行处理
return NGX_ABORT;
}
... //定时器
}
return NGX_OK;
}nginx_event_pipe_read_upstream 处理说明static ngx_int_t
ngx_event_pipe_read_upstream(ngx_event_pipe_t *p)
{
...
if (p->upstream_eof || p->upstream_error || p->upstream_done) {
return NGX_OK;
}
#if (NGX_THREADS)
...
#endif
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe read upstream: %d", p->upstream->read->ready);
for ( ;; ) {
if (p->upstream_eof || p->upstream_error || p->upstream_done) {
break;
}
if (p->preread_bufs == NULL && !p->upstream->read->ready) {
break;
}
if (p->preread_bufs) { //pre-read 预读取buffer (包含了缓存header头部和key信息 以及http头) 实际由u->buffer传入
/* use the pre-read bufs if they exist */
chain = p->preread_bufs;
p->preread_bufs = NULL;
n = p->preread_size;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe preread: %z", n);
if (n) { //已经读取了数据read标记
p->read = 1;
}
} else {
#if (NGX_HAVE_KQUEUE)
/*
* kqueue notifies about the end of file or a pending error.
* This test allows not to allocate a buf on these conditions
* and not to call c->recv_chain().
*/
if (p->upstream->read->available == 0
&& p->upstream->read->pending_eof)
{
p->upstream->read->ready = 0;
p->upstream->read->eof = 1;
p->upstream_eof = 1;
p->read = 1;
if (p->upstream->read->kq_errno) {
p->upstream->read->error = 1;
p->upstream_error = 1;
p->upstream_eof = 0;
ngx_log_error(NGX_LOG_ERR, p->log,
p->upstream->read->kq_errno,
"kevent() reported that upstream "
"closed connection");
}
break;
}
#endif
if (p->limit_rate) { //有限速设置
if (p->upstream->read->delayed) {
break;
}
limit = (off_t) p->limit_rate * (ngx_time() - p->start_sec + 1)
- p->read_length;
if (limit <= 0) {
p->upstream->read->delayed = 1;
delay = (ngx_msec_t) (- limit * 1000 / p->limit_rate + 1); //设置上游读取延时并加入到定时器
ngx_add_timer(p->upstream->read, delay);
break;
}
} else {
limit = 0; //
}
if (p->free_raw_bufs) {
/* use the free bufs if they exist */
chain = p->free_raw_bufs;
if (p->single_buf) { //只有一个buffer
p->free_raw_bufs = p->free_raw_bufs->next;
chain->next = NULL;
} else {
p->free_raw_bufs = NULL;
}
} else if (p->allocated < p->bufs.num) { //已经分配的buffer数量没有超过配置的值 配置的数量的缓冲尽可能地申请
/* allocate a new buf if it's still allowed */
b = ngx_create_temp_buf(p->pool, p->bufs.size);
if (b == NULL) {
return NGX_ABORT;
}
p->allocated++;
chain = ngx_alloc_chain_link(p->pool);
if (chain == NULL) {
return NGX_ABORT;
}
chain->buf = b;
chain->next = NULL;
} else if (!p->cacheable
&& p->downstream->data == p->output_ctx
&& p->downstream->write->ready
&& !p->downstream->write->delayed)
{
/*
* if the bufs are not needed to be saved in a cache and
* a downstream is ready then write the bufs to a downstream
*/
p->upstream_blocked = 1;
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe downstream ready");
break;
} else if (p->cacheable
|| p->temp_file->offset < p-&g
4000
t;max_temp_file_size) //满足可缓冲的条件
{
/*
* if it is allowed, then save some bufs from p->in
* to a temporary file, and add them to a p->out chain
*/
rc = ngx_event_pipe_write_chain_to_temp_file(p); //响应包体写入临时文件
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe temp offset: %O", p->temp_file->offset);
if (rc == NGX_BUSY) {
break;
}
if (rc != NGX_OK) {
return rc;
}
chain = p->free_raw_bufs;
if (p->single_buf) {
p->free_raw_bufs = p->free_raw_bufs->next;
chain->next = NULL;
} else {
p->free_raw_bufs = NULL;
}
} else {
/* there are no bufs to read in */ //没有buffer缓冲去读取数据了
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"no pipe bufs to read in");
break;
}
n = p->upstream->recv_chain(p->upstream, chain, limit); //开始接收上游包体数据
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe recv chain: %z", n);
if (p->free_raw_bufs) {
chain->next = p->free_raw_bufs;
}
p->free_raw_bufs = chain;
if (n == NGX_ERROR) { //读取包体出错
p->upstream_error = 1;
break;
}
if (n == NGX_AGAIN) {
if (p->single_buf) { //只有一个buffer 移除掉buffer的shadow_link
ngx_event_pipe_remove_shadow_links(chain->buf);
}
break;
}
p->read = 1; //读取标记设置为真
if (n == 0) { //没有数据可读 upstream_eof标记 并且跳出
p->upstream_eof = 1;
break;
}
}
delay = p->limit_rate ? (ngx_msec_t) n * 1000 / p->limit_rate : 0; //根据读取的字节数及配置的值设置读取延时
p->read_length += n; //读取包体长度更新
cl = chain;
p->free_raw_bufs = NULL;
while (cl && n > 0) { //有效数据长度 这里要先移除掉buffer的shadow_link
ngx_event_pipe_remove_shadow_links(cl->buf);
size = cl->buf->end - cl->buf->last; //buffer可用字节数
if (n >= size) { //buffer不够用
cl->buf->last = cl->buf->end;
/* STUB */ cl->buf->num = p->num++; //stub信息更新
if (p->input_filter(p, cl->buf) == NGX_ERROR) {
return NGX_ABORT;
}
n -= size;
ln = cl;
cl = cl->next;
ngx_free_chain(p->pool, ln); //缓冲chain释放
} else {
cl->buf->last += n;
n = 0;
}
}
if (cl) {
for (ln = cl; ln->next; ln = ln->next) { /* void */ } //取得cl最尾部buffer chain
ln->next = p->free_raw_bufs;
p->free_raw_bufs = cl;
}
if (delay > 0) { //有设置延时 则加入到定时器中
p->upstream->read->delayed = 1;
ngx_add_timer(p->upstream->read, delay);
break;
}
}
#if (NGX_DEBUG)
... //buffer chain缓冲信息打印
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe length: %O", p->length);
#endif
if (p->free_raw_bufs && p->length != -1) { //p->length 代表的是剩余包体的长度
cl = p->free_raw_bufs;
if (cl->buf->last - cl->buf->pos >= p->length) {
p->free_raw_bufs = cl->next;
/* STUB */ cl->buf->num = p->num++;
if (p->input_filter(p, cl->buf) == NGX_ERROR) { //包体的input_filter处理
return NGX_ABORT;
}
ngx_free_chain(p->pool, cl);
}
}
if (p->length == 0) { //包体数据读取完成
p->upstream_done = 1;
p->read = 1;
}
if ((p->upstream_eof || p->upstream_error) && p->free_raw_bufs) { //在没有数据可读或者读取出错情况下 对数据进行处理
/* STUB */ p->free_raw_bufs->buf->num = p->num++;
if (p->input_filter(p, p->free_raw_bufs->buf) == NGX_ERROR) { //
return NGX_ABORT;
}
p->free_raw_bufs = p->free_raw_bufs->next;
if (p->free_bufs && p->buf_to_file == NULL) { //有空闲的buffer同时写入文件的buffer不存在时
for (cl = p->free_raw_bufs; cl; cl = cl->next) {
if (cl->buf->shadow == NULL) { //清除没有shadow的buffer
ngx_pfree(p->pool, cl->buf->start);
}
}
}
}
if (p->cacheable && (p->in || p->buf_to_file)) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write chain");
rc = ngx_event_pipe_write_chain_to_temp_file(p);
if (rc != NGX_OK) {
return rc;
}
}
return NGX_OK;
}ngx_event_pipe_write_to_downstream处理说明
static ngx_int_t
ngx_event_pipe_write_to_downstream(ngx_event_pipe_t *p)
{
...
downstream = p->downstream;
ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write downstream: %d", downstream->write->ready);
#if (NGX_THREADS)
if (p->writing) {
rc = ngx_event_pipe_write_chain_to_temp_file(p);
if (rc == NGX_ABORT) {
return NGX_ABORT;
}
}
#endif
flushed = 0;
for ( ;; ) {
if (p->downstream_error) { //往请求端发送出错
return ngx_event_pipe_drain_chains(p); //busy, out, in 三个缓冲chain释放 同时释放shadow缓冲并将空闲的buffer加入到pipe中
}
if (p->upstream_eof || p->upstream_error || p->upstream_done) { //满足上游包体数据读取"完成条件"
/* pass the p->out and p->in chains to the output filter */
for (cl = p->busy; cl; cl = cl->next) {
cl->buf->recycled = 0;
}
if (p->out) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write downstream flush out");
for (cl = p->out; cl; cl = cl->next) {
cl->buf->recycled = 0;
}
rc = p->output_filter(p->output_ctx, p->out); //响应到请求段的包体putput_filter过滤处理
if (rc == NGX_ERROR) { //写入到请求端出错
p->downstream_error = 1;
return ngx_event_pipe_drain_chains(p);
}
p->out = NULL;
}
if (p->writing) { //还有往请求端写入的缓冲链
break;
}
if (p->in) {
ngx_log_debug0(NGX_LOG_DEBUG_EVENT, p->log, 0,
"pipe write downstream flush in");
for (cl = p->in; cl; cl = cl->next) {
cl->buf->recycled = 0;
}
rc = p->output_filter(p->output_ctx, p->in); //upstream模块的output_filter会把数据发送给请求端
if (rc == NGX_ERROR) {
p->downstream_error = 1;
return ngx_event_pipe_drain_chains(p); ... } } }
static ngx_int_t ngx_event_pipe_write_chain_to_temp_file(ngx_event_pipe_t *p) { ... #if (NGX_THREADS) if (p->writing) { ... //多线程处理 } #endif if (p->buf_to_file) { //写往临时文件的buffer加入到为发送到请求段的out 缓冲链 out = ngx_alloc_chain_link(p->pool); if (out == NULL) { return NGX_ABORT; } out->buf = p->buf_to_file; out->next = p->in; } else { out = p->in; } if (!p->cacheable) { size = 0; cl = out; ll = NULL; prev_last_shadow = 1; ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0, "pipe offset: %O", p->temp_file->offset); do { bsize = cl->buf->last - cl->buf->pos; ngx_log_debug4(NGX_LOG_DEBUG_EVENT, p->log, 0, "pipe buf ls:%d %p, pos %p, size: %z", cl->buf->last_shadow, cl->buf->start, cl->buf->pos, bsize); if (prev_last_shadow && ((size + bsize > p->temp_file_write_size) //缓冲链中的数据大小超过了配置的temp_file_write_size的大小 或者最大临时文件大小 || (p->temp_file->offset + size + bsize > p->max_temp_file_size))) { break; } prev_last_shadow = cl->buf->last_shadow; size += bsize; ll = &cl->next; cl = cl->next; } while (cl); //while遍历 ngx_log_debug1(NGX_LOG_DEBUG_EVENT, p->log, 0, "size: %z", size); if (ll == NULL) { return NGX_BUSY; } if (cl) { //out缓冲链最后一个buffer不为空 p->in = cl; *ll = NULL; } else { //为空 更新 p->in = NULL; p->last_in = &p->in; } } else { p->in = NULL; p->last_in = &p->in; } #if (NGX_THREADS) if (p->thread_handler) { p->temp_file->thread_write = 1; p->temp_file->file.thread_task = p->thread_task; p->temp_file->file.thread_handler = p->thread_handler; p->temp_file->file.thread_ctx = p->thread_ctx; } #endif n = ngx_write_chain_to_temp_file(p->temp_file, out); //out chain更新到临时文件中 if (n == NGX_ERROR) { return NGX_ABORT; } #if (NGX_THREADS) if (n == NGX_AGAIN) { p->writing = out; p->thread_task = p->temp_file->file.thread_task; return NGX_AGAIN; } done: #endif if (p->buf_to_file) { p->temp_file->offset = p->buf_to_file->last - p->buf_to_file->pos; n -= p->buf_to_file->last - p->buf_to_file->pos; p->buf_to_file = NULL; out = out->next; } if (n > 0) { //成功有数据写入到临时文件 /* update previous buffer or add new buffer */ if (p->out) { for (cl = p->out; cl->next; cl = cl->next) { /* void */ } b = cl->buf; if (b->file_last == p->temp_file->offset) { p->temp_file->offset += n; b->file_last = p->temp_file->offset; goto free; } last_out = &cl->next; } else { last_out = &p->out; } cl = ngx_chain_get_free_buf(p->pool, &p->free); if (cl == NULL) { return NGX_ABORT; } b = cl->buf; ngx_memzero(b, sizeof(ngx_buf_t)); b->tag = p->tag; b->file = &p->temp_file->file; b->file_pos = p->temp_file->offset; p->temp_file->offset += n; //增加临时文件偏移 b->file_last = p->temp_file->offset; //更新buffer文件偏移信息 b->in_file = 1; b->temp_file = 1; *last_out = cl; } free: for (last_free = &p->free_raw_bufs; *last_free != NULL; last_free = &(*last_free)->next) //找到free_raw_bufs最后的空闲buffer { /* void */ } for (cl = out; cl; cl = next) { next = cl->next; cl->next = p->free; p->free = cl; b = cl->buf; if (b->last_shadow) { tl = ngx_alloc_chain_link(p->pool); if (tl == NULL) { return NGX_ABORT; } tl->buf = b->shadow; tl->next = NULL; *last_free = tl; last_free = &tl->next; b->shadow->pos = b->shadow->start; b->shadow->last = b->shadow->start; ngx_event_pipe_remove_shadow_links(b->shadow); } } return NGX_OK; }函数 ngx_event_pipe会被upstream模块设置好的ngx_http_upstream_process_upstream 上游有包体数据过来,进行触发;同时也会被ngx_http_upstream_process_downstream这个设为下游请求端响应写处理触发。
ngx_event_pipe先处理上游包体数据 这里的上游包体数据在event_pipe功能中由ngx_event_pipe_read_upstream负责处理;在处理读取上游包体数据的同时,会得到向下游请求端写入响应包体的处理,实际写入到下游请求端的功能是由ngx_event_pipe_write_to_downstream来完成。
下面是ngx_event_pipe处理流程的图解说明
相关文章推荐
- nginx upstream模块详解(处理流程篇一 upstream处理)
- nginx模块描述,模块分类,处理流程
- Nginx之模块处理流程
- nginx upstream 模块详解
- nginx之upstream模块缓存系统详解
- Nginx event核心模块之epoll模块详解(三)
- nginx之upstream模块缓存系统详解
- 详解Nginx的核心配置模块中对于请求体的接受流程
- nginx 的模块及处理流程
- 举例详解Python中smtplib模块处理电子邮件的使用
- nginx模块开发(三):upstream
- PBOC/EMV-交易流程详解--POS与卡片的数据交互进行分析
- nginx学习笔记三(nginx启动框架的处理流程)
- XBMC界面交互处理流程
- Nginx RTMP 模块 nginx-rtmp-module 指令详解
- Nginx源码分析 - Event事件篇 - Event模块和配置的初始化
- 第二节struts1的处理流程及配置文件详解
- nginx利用第三方模块nginx_upstream_check_module来检查后端服务器的健康情况
- Nginx学习笔记--Nginx图片缩略图,水印处理模块
- android处理触摸(touchEvent)详细流程