summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorMax Romanov <max.romanov@nginx.com>2020-02-03 11:20:59 +0300
committerMax Romanov <max.romanov@nginx.com>2020-02-03 11:20:59 +0300
commit7ea9ebc55a22fec53bb095975e639d4fbfebb575 (patch)
tree15925f0f0df88ea50955213ccee3b972e0a5639f
parent8c0f2cebf5eba555d104c42b556e54635f5d0890 (diff)
downloadunit-7ea9ebc55a22fec53bb095975e639d4fbfebb575.tar.gz
unit-7ea9ebc55a22fec53bb095975e639d4fbfebb575.tar.bz2
Fixed req_app_link reference counting on cancellation.
Re-scheduled req_app_link structures should have use_count exactly equal to the number of references from the application and port list. However, there's one extra usage decrement that occurs after the req_app_link is created because the use_count is initialised as 1. This patch removes all excess instances of the usage decrement that caused preliminary req_app_link release and router process crash. To reproduce the issue need to cause request rescheduling between 2 app processes. This issue was introduced in 61e9f23a566d.
-rw-r--r--src/nxt_router.c23
1 files changed, 7 insertions, 16 deletions
diff --git a/src/nxt_router.c b/src/nxt_router.c
index 6a1f3792..3ff048c5 100644
--- a/src/nxt_router.c
+++ b/src/nxt_router.c
@@ -3750,8 +3750,6 @@ nxt_router_response_error_handler(nxt_task_t *task, nxt_port_recv_msg_t *msg,
nxt_router_app_prepare_request(task, req_app_link);
}
- nxt_request_app_link_use(task, req_app_link, -1);
-
msg->port_msg.last = 0;
return;
@@ -4220,28 +4218,23 @@ re_ra_cancelled:
if (nxt_router_port_post_select(task, &state) == NXT_OK) {
/*
* There should be call nxt_request_app_link_inc_use(re_ra),
- * but we need to decrement use then. So, let's skip both.
+ * because of one more link in the queue.
+ * Corresponding decrement is in nxt_router_app_process_request().
*/
+ nxt_request_app_link_inc_use(re_ra);
+
nxt_work_queue_add(&task->thread->engine->fast_work_queue,
nxt_router_app_process_request,
&task->thread->engine->task, app, re_ra);
-
- } else {
- /*
- * This call should be unconditional, but we want to spare
- * couple of CPU ticks to postpone the head death of the universe.
- */
-
- nxt_request_app_link_use(task, re_ra, -1);
}
}
if (req_app_link != NULL) {
/*
- * Here we do the same trick as described above,
- * but without conditions.
- * Skip required nxt_request_app_link_inc_use(req_app_link).
+ * There should be call nxt_request_app_link_inc_use(req_app_link),
+ * because of one more link in the queue. But one link was
+ * recently removed from app->requests link.
*/
nxt_work_queue_add(&task->thread->engine->fast_work_queue,
@@ -5205,8 +5198,6 @@ nxt_router_app_timeout(nxt_task_t *task, void *obj, void *data)
if (nxt_router_port_post_select(task, &state) == NXT_OK) {
nxt_router_app_prepare_request(task, pending_ra);
}
-
- nxt_request_app_link_use(task, pending_ra, -1);
}
nxt_debug(task, "send quit to app '%V' pid %PI", &app->name, port->pid);