public
Last active

Add a 'start_time' variable to nginx 0.8.33 to support an X-REQUEST-START header. This header is used by New Relic RPM to record queue time.

  • Download Gist
request_start_variable.patch
Diff
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
--- src/http/ngx_http_variables.c.orig 2010-01-11 03:21:46.000000000 -0800
+++ src/http/ngx_http_variables.c 2010-02-18 10:01:32.000000000 -0800
@@ -93,6 +93,9 @@
static ngx_int_t ngx_http_variable_pid(ngx_http_request_t *r,
ngx_http_variable_value_t *v, uintptr_t data);
+static ngx_int_t ngx_http_variable_start_time(ngx_http_request_t *r,
+ ngx_http_variable_value_t *v, uintptr_t data);
+
/*
* TODO:
* Apache CGI: AUTH_TYPE, PATH_INFO (null), PATH_TRANSLATED
@@ -253,6 +256,9 @@
{ ngx_string("pid"), NULL, ngx_http_variable_pid,
0, 0, 0 },
+ { ngx_string("start_time"), NULL, ngx_http_variable_start_time,
+ 0, 0, 0 },
+
{ ngx_null_string, NULL, NULL, 0, 0, 0 }
};
@@ -1749,6 +1755,27 @@
return re;
}
+static ngx_int_t
+ngx_http_variable_start_time(ngx_http_request_t *r,
+ ngx_http_variable_value_t *v, uintptr_t data)
+{
+ u_char *p;
+
+ p = ngx_pnalloc(r->pool, NGX_INT64_LEN);
+ if (p == NULL) {
+ return NGX_ERROR;
+ }
+
+ uint64_t usec = (((uint64_t)r->start_sec * 1000 * 1000) + ((uint64_t)r->start_msec * 1000));
+
+ v->len = ngx_sprintf(p, "%L", usec) - p;
+ v->valid = 1;
+ v->no_cacheable = 0;
+ v->not_found = 0;
+ v->data = p;
+
+ return NGX_OK;
+}
ngx_int_t
ngx_http_regex_exec(ngx_http_request_t *r, ngx_http_regex_t *re, ngx_str_t *s)

Great code...

Anyway to capture request start time and response end time for each request in JBoss 3.2 server?

Is it possible to record the time stamp for each request inside the request header (or internal server log files) each time a request enters and leaves a server, e.g., Tin(JBoss) --> Tout(JBoss) --> Tin(MySQL) Tout(MySQL) --> Tin (JBoss) --> Tout (JBoss)?

Thank you.

New Relic (http://newrelic.com) can do that for Java apps on JBoss (not to mention Ruby, PHP, Python, and .NET). It doesn't do cross-JVM tracing (yet), but definitely instruments request time spent in databases like MySQL.

I checked New Relic, but unfortunately it does support JVM 1.5 and above. My application requires JVM 1.4 :(

Need to find a way on old versions JBoss 3.2.3 & j2sdk 1.4.19. Any pointers?

Ouch, 1.4 is pretty old. I don't know of anything that'll help there, sorry.

I know, but I have no options I have to use 1.4 :(

Thanks anyway..

For nginx-1.0.6, the provided patch doesn't work. Here's the updated version:

diff -Nurw nginx-1.0.6-orig/src/http/ngx_http_variables.c nginx-1.0.6/src/http/ngx_http_variables.c
--- nginx-1.0.6-orig/src/http/ngx_http_variables.c      2011-05-30 12:36:17.000000000 +0000
+++ nginx-1.0.6/src/http/ngx_http_variables.c   2011-11-14 16:55:11.000000000 +0000
@@ -92,6 +92,8 @@
     ngx_http_variable_value_t *v, uintptr_t data);
 static ngx_int_t ngx_http_variable_pid(ngx_http_request_t *r,
     ngx_http_variable_value_t *v, uintptr_t data);
+static ngx_int_t ngx_http_variable_start_time(ngx_http_request_t *r,
+    ngx_http_variable_value_t *v, uintptr_t data);

 /*
  * TODO:
@@ -254,6 +256,9 @@
     { ngx_string("pid"), NULL, ngx_http_variable_pid,
       0, 0, 0 },

+    { ngx_string("start_time"), NULL, ngx_http_variable_start_time,
+      0, 0, 0 },
+
     { ngx_null_string, NULL, NULL, 0, 0, 0 }
 };

@@ -1654,6 +1659,28 @@
     v->valid = 1;
     v->no_cacheable = 0;
     v->not_found = 0;
+    v->data = p;
+
+    return NGX_OK;
+}
+
+static ngx_int_t
+ngx_http_variable_start_time(ngx_http_request_t *r,
+    ngx_http_variable_value_t *v, uintptr_t data)
+{
+    u_char *p;
+
+    p = ngx_pnalloc(r->pool, NGX_INT64_LEN);
+    if (p == NULL) {
+      return NGX_ERROR;
+    }
+
+    uint64_t usec = (((uint64_t)r->start_sec * 1000 * 1000) + ((uint64_t)r->start_msec * 1000));
+
+    v->len = ngx_sprintf(p, "%L", usec) - p;
+    v->valid = 1;
+    v->no_cacheable = 0;
+    v->not_found = 0;
     v->data = p;

     return NGX_OK;

Which request does this start_time apply to? The incoming request to nginx, or the backposted request to the upstream server (i.e. via the proxy module)?

It applies to the incoming request.

More questions:

  • Does the timestamp indicate when the request was fully received, when the connection was accept()ed, or when the first byte was received?
  • Are you aware of a way to apply a similar timestamp to a backposted request to an upstream (right before the connect())?

The nginx source knows better than I do, sorry.

On Wed, Mar 28, 2012 at 1:55 PM, mfischer-zd <
reply@reply.github.com

wrote:

More questions:

  • Does the timestamp indicate when the request was fully received, when the connection was accept()ed, or when the first byte was received?
  • Are you aware of a way to apply a similar timestamp to a backposted request to an upstream (right before the connect())?

Reply to this email directly or view it on GitHub:
https://gist.github.com/318681

If you are unable to patch nginx or unwilling, yet have the lua engine available to you, you can use the much simpler:

set_by_lua $start_time 'ngx.now() * 1000 * 1000';

The lua API for nginx makes available the cached timestamp via ngx.time() and ngx.now(), where time is an int in seconds and now is a float in seconds (I am not sure to what point the fractional portion is accurate). If you absolutely need to have the current timestamp, you can update the cached version by force with ngx.update_time(), but this is potentially expensive. Rather, instead, it is suggested that you set timer_resolution in your config to the maximum latency you can tolerate in your recorded numbers.

If you are using CloudFoundry, which uses nginx and lua alongside a sinatra app to dynamically handle routing requests to the currently correct location or load balance across multiple instances, then you should also set the X-Queue-Start header after the subrequest to the router. Despite using a cached timestamp, you will get a fresh one as it's updated after every event in the event loop, and a subrequest is performed non-blocking if performed via the nginx lua api (which it will be).

I stand corrected. Using the above, the result appears to always use scientific notation: X-Request-Start:t=1.349622085668e+15. The correct way to get the desired result is the following:

set_by_lua $start_time '
  local seconds = ngx.now()
  local microseconds = seconds * 1000 * 1000
  return string.format("%d", microseconds);
';

OMG, this question is so important:

"Does the timestamp indicate when the request was fully received, when the connection was accept()ed, or when the first byte was received?"

Please clarify this, because this patch is linked from authoritative sources like the NewRelic docs and not knowing this information looks really unprofessional. At the very least the guys from NewRelic should clarify this issue!

NewRelic writes: "To collect request queuing time, you need to mark the HTTP request with a timestamp when queuing starts." -- Given that they are linking to this patch they seem to think this patch does just that. Seems clear enough to me.

Whether this patch really does that....? I wouldn't trust any clarification from an "authoritative source" ;-) Better test for yourself, apply this patch, upload a large file, and see which timestamp you receive in your backend.

Perhaps you could look at it this way: it's the timestamp of the moment nginx adds the proxy header. Nginx probably won't do that when the connection is accept()ed or when the first byte is received, as there's nothing to add the header to yet. Given you probably use proxy_pass, nginx documents: "the entire client request will be buffered in nginx before being passed on to the backend proxied servers". My guess is the timestamp is close to the time the request is passed into the queue for delivery to a backend proxy server.

... but then... ;) if I look at this patch, it appears to me it's sending the recorded start time of the nginx request, i.e. the time when the connection was accept()ed.

The patch below applies to the nginx stable-1.2 branch. It supports two http variables: $usec and $start_usec. In your nginx.conf file you could add:

  proxy_set_header X-Request-Start 't=$start_usec';
  proxy_set_header X-Queue-Start 't=$usec';

Request-Start has the time with a microseconds resolution that indicates when the first bytes were read from the client.

Queue-Start has the time with a microseconds resolution that indicates when the request is sent to the backend server.

I chose the name usec as it seems to be in line with the variable msec as used by the nginx log format variables.

Note that at the New Relic site they indicate they support these two variables, see the paragraph "Implementation Notes" at https://newrelic.com/docs/features/tracking-front-end-time.

Subject: [PATCH] Feature: $usec and $start_usec variables, useful for setting
 in proxy headers before sending request to backend servers
 so queue times can be measured.

---
 src/http/ngx_http_variables.c | 60 +++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 60 insertions(+)

diff --git a/src/http/ngx_http_variables.c b/src/http/ngx_http_variables.c
index b0949c7..b9a630a 100644
--- a/src/http/ngx_http_variables.c
+++ b/src/http/ngx_http_variables.c
@@ -108,6 +108,10 @@ static ngx_int_t ngx_http_variable_hostname(ngx_http_request_t *r,
     ngx_http_variable_value_t *v, uintptr_t data);
 static ngx_int_t ngx_http_variable_pid(ngx_http_request_t *r,
     ngx_http_variable_value_t *v, uintptr_t data);
+static ngx_int_t ngx_http_variable_usec(ngx_http_request_t *r,
+    ngx_http_variable_value_t *v, uintptr_t data);
+static ngx_int_t ngx_http_variable_start_usec(ngx_http_request_t *r,
+    ngx_http_variable_value_t *v, uintptr_t data);

 /*
  * TODO:
@@ -285,6 +289,12 @@ static ngx_http_variable_t  ngx_http_core_variables[] = {
     { ngx_string("pid"), NULL, ngx_http_variable_pid,
       0, 0, 0 },

+    { ngx_string("usec"), NULL, ngx_http_variable_usec,
+      0, 0, 0 },
+
+    { ngx_string("start_usec"), NULL, ngx_http_variable_start_usec,
+      0, 0, 0 },
+
 #if (NGX_HAVE_TCP_INFO)
     { ngx_string("tcpinfo_rtt"), NULL, ngx_http_variable_tcpinfo,
       0, NGX_HTTP_VAR_NOCACHEABLE, 0 },
@@ -1915,6 +1925,56 @@ ngx_http_variable_pid(ngx_http_request_t *r,
 }


+static ngx_int_t
+ngx_http_variable_usec(ngx_http_request_t *r,
+    ngx_http_variable_value_t *v, uintptr_t data)
+{
+    u_char     *p;
+    ngx_time_t *tp;
+    uint64_t   usec;
+
+    p = ngx_pnalloc(r->pool, NGX_INT64_LEN);
+    if (p == NULL) {
+        return NGX_ERROR;
+    }
+
+    tp = ngx_timeofday();
+    usec = (uint64_t) (tp->sec * 1000 * 1000 + tp->msec * 1000);
+
+    v->len = ngx_sprintf(p, "%L", usec) - p;
+    v->valid = 1;
+    v->no_cacheable = 0;
+    v->not_found = 0;
+    v->data = p;
+
+    return NGX_OK;
+}
+
+
+static ngx_int_t
+ngx_http_variable_start_usec(ngx_http_request_t *r,
+    ngx_http_variable_value_t *v, uintptr_t data)
+{
+    u_char   *p;
+    uint64_t start_usec;
+
+    p = ngx_pnalloc(r->pool, NGX_INT64_LEN);
+    if (p == NULL) {
+        return NGX_ERROR;
+    }
+
+    start_usec = (uint64_t) (r->start_sec * 1000 * 1000 + r->start_msec * 1000);
+
+    v->len = ngx_sprintf(p, "%L", start_usec) - p;
+    v->valid = 1;
+    v->no_cacheable = 0;
+    v->not_found = 0;
+    v->data = p;
+
+    return NGX_OK;
+}
+
+
 void *
 ngx_http_map_find(ngx_http_request_t *r, ngx_http_map_t *map, ngx_str_t *match)
 {
-- 
1.7.11.1

Please sign in to comment on this gist.

Something went wrong with that request. Please try again.