Created
March 25, 2016 15:29
-
-
Save elukey/c38c2220660cbb6d35ca to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/Makefile b/Makefile | |
old mode 100644 | |
new mode 100755 | |
diff --git a/config.c b/config.c | |
index 3987fe2..f15b227 100644 | |
--- a/config.c | |
+++ b/config.c | |
@@ -191,10 +191,6 @@ static int conf_set (const char *name, const char *val, | |
} | |
} else if (!strcmp(name, "logline.data.copy")) | |
conf.datacopy = conf_tof(val); | |
- else if (!strcmp(name, "logline.hash.size")) | |
- conf.loglines_hsize = atoi(val); | |
- else if (!strcmp(name, "logline.hash.max")) | |
- conf.loglines_hmax = atoi(val); | |
else if (!strcmp(name, "logline.scratch.size")) | |
conf.scratch_size = atoi(val); | |
else if (!strcmp(name, "varnish.arg.q")) { | |
diff --git a/varnishkafka.c b/varnishkafka.c | |
index 02f399f..2bc222e 100644 | |
--- a/varnishkafka.c | |
+++ b/varnishkafka.c | |
@@ -73,22 +73,11 @@ static const char *fmt_conf_names[] = { | |
[FMT_CONF_KEY] = "Key" | |
}; | |
-/** | |
- * Logline cache | |
- */ | |
-static struct { | |
- LIST_HEAD(, logline) lps; /* Current loglines in bucket */ | |
- int cnt; /* Current number of loglines in bucket */ | |
- uint64_t hit; /* Cache hits */ | |
- uint64_t miss; /* Cache misses */ | |
- uint64_t purge; /* Cache entry purges (bucket full) */ | |
-} *loglines; | |
- | |
-static int logline_cnt = 0; /* Current number of loglines in memory */ | |
+/* logline accumulator reused across log transactions */ | |
+struct logline *lp = NULL; | |
static void logrotate(void); | |
- | |
/** | |
* Counters | |
*/ | |
@@ -111,7 +100,6 @@ static void print_stats (void) { | |
"\"trunc\":%"PRIu64", " | |
"\"scratch_toosmall\":%"PRIu64", " | |
"\"scratch_tmpbufs\":%"PRIu64", " | |
- "\"lp_curr\":%i, " | |
"\"seq\":%"PRIu64" " | |
"} }\n", | |
(unsigned long long)time(NULL), | |
@@ -121,7 +109,6 @@ static void print_stats (void) { | |
cnt.trunc, | |
cnt.scratch_toosmall, | |
cnt.scratch_tmpbufs, | |
- logline_cnt, | |
conf.sequence_number); | |
} | |
@@ -1566,21 +1553,6 @@ static void render_match (struct logline *lp, uint64_t seq) { | |
} | |
} | |
- | |
- | |
-/** | |
- * Initialize log line lookup hash | |
- */ | |
-static void loglines_init (void) { | |
- loglines = calloc(sizeof(*loglines), conf.loglines_hsize); | |
-} | |
- | |
-/** | |
- * Returns the hash key (bucket) for a given log id | |
- */ | |
-#define logline_hkey(id) ((id) % conf.loglines_hsize) | |
- | |
- | |
/** | |
* Resets the given logline and makes it ready for accumulating a new request. | |
*/ | |
@@ -1589,10 +1561,8 @@ static void logline_reset (struct logline *lp) { | |
struct tmpbuf *tmpbuf; | |
/* Clear logline, except for scratch pad since it will be overwritten */ | |
- | |
for (i = 0 ; i < conf.fconf_cnt ; i++) | |
- memset(lp->match[i], 0, | |
- conf.fconf[i].fmt_cnt * sizeof(*lp->match[i])); | |
+ memset(lp->match[i], 0, conf.fconf[i].fmt_cnt * sizeof(*lp->match[i])); | |
/* Free temporary buffers */ | |
while ((tmpbuf = lp->tmpbuf)) { | |
@@ -1608,69 +1578,22 @@ static void logline_reset (struct logline *lp) { | |
lp->seq = 0; | |
lp->sof = 0; | |
- lp->tags_seen = 0; | |
lp->t_last = time(NULL); | |
} | |
/** | |
- * Free up all loglines. | |
- */ | |
-static void loglines_term (void) { | |
- unsigned int hkey; | |
- for (hkey = 0 ; hkey < conf.loglines_hsize ; hkey++) { | |
- struct logline *lp; | |
- while ((lp = LIST_FIRST(&loglines[hkey].lps))) { | |
- logline_reset(lp); | |
- LIST_REMOVE(lp, link); | |
- free(lp); | |
- logline_cnt--; | |
- } | |
- } | |
- free(loglines); | |
-} | |
- | |
- | |
-/** | |
- * Returns a logline. | |
+ * Returns a new logline | |
*/ | |
-static inline struct logline *logline_get (unsigned int id) { | |
- struct logline *lp, *oldest = NULL; | |
- unsigned int hkey = logline_hkey(id); | |
+static inline struct logline *logline_get () { | |
int i; | |
char *ptr; | |
- | |
- LIST_FOREACH(lp, &loglines[hkey].lps, link) { | |
- if (lp->id == id) { | |
- /* Cache hit: return existing logline */ | |
- loglines[hkey].hit++; | |
- return lp; | |
- } else if (loglines[hkey].cnt > conf.loglines_hmax && | |
- lp->tags_seen && | |
- (!oldest || lp->t_last < oldest->t_last)) { | |
- oldest = lp; | |
- } | |
- } | |
- | |
- /* Cache miss */ | |
- loglines[hkey].miss++; | |
- | |
- if (oldest) { | |
- /* Remove oldest entry. | |
- * We will not loose a log record here since this only | |
- * matches when 'tags_seen' is zero. */ | |
- LIST_REMOVE(oldest, link); | |
- loglines[hkey].cnt--; | |
- loglines[hkey].purge++; | |
- free(oldest); | |
- logline_cnt--; | |
- } | |
+ struct logline *lp; | |
/* Allocate and set up new logline */ | |
lp = malloc(sizeof(*lp) + conf.scratch_size + | |
(conf.total_fmt_cnt * sizeof(*lp->match[0]))); | |
memset(lp, 0, sizeof(*lp)); | |
- lp->id = id; | |
ptr = (char *)(lp+1) + conf.scratch_size; | |
for (i = 0 ; i < conf.fconf_cnt ; i++) { | |
size_t msize = conf.fconf[i].fmt_cnt * sizeof(*lp->match[i]); | |
@@ -1679,10 +1602,6 @@ static inline struct logline *logline_get (unsigned int id) { | |
ptr += msize; | |
} | |
- LIST_INSERT_HEAD(&loglines[hkey].lps, lp, link); | |
- loglines[hkey].cnt++; | |
- logline_cnt++; | |
- | |
return lp; | |
} | |
@@ -1771,15 +1690,13 @@ static int tag_match (struct logline *lp, int spec, enum VSL_tag_e tagid, | |
* A trasaction cursor (vsl.h) points to a list of tags associated with transaction id. | |
* This function parses the current tag pointed by the cursor. | |
*/ | |
-static int parse_tag (struct VSL_transaction *t, uint64_t bitmap) | |
+static int parse_tag(struct VSL_transaction *t, struct logline *lp) | |
{ | |
- struct logline *lp; | |
int is_complete = 0; | |
/* Data carried by the transaction's current cursor */ | |
enum VSL_tag_e tag = VSL_TAG(t->c->rec.ptr); | |
const char * tag_data = VSL_CDATA(t->c->rec.ptr); | |
- long vxid = VSL_ID(t->c->rec.ptr); | |
/* Avoiding VSL_LEN to prevent \0 termination char | |
* to be counted causing \u0000 to be displayed in JSON | |
@@ -1798,12 +1715,6 @@ static int parse_tag (struct VSL_transaction *t, uint64_t bitmap) | |
*/ | |
int spec = VSL_CLIENT(t->c->rec.ptr) ? VSL_CLIENTMARKER : VSL_BACKENDMARKER; | |
- if (unlikely(!(lp = logline_get(vxid)))) | |
- return -1; | |
- | |
- /* Update bitfield of seen tags (-m regexp) */ | |
- lp->tags_seen |= bitmap; | |
- | |
/* Truncate data if exceeding configured max */ | |
if (unlikely(len > conf.tag_size_max)) { | |
cnt.trunc++; | |
@@ -1814,7 +1725,8 @@ static int parse_tag (struct VSL_transaction *t, uint64_t bitmap) | |
if (likely(!(is_complete = tag_match(lp, spec, tag, tag_data, len)))) | |
return conf.pret; | |
- /* Log line is complete: render & output (stdout or kafka) */ | |
+ /* Log line is complete | |
+ : render & output (stdout or kafka) */ | |
render_match(lp, ++conf.sequence_number); | |
/* clean up */ | |
@@ -1852,15 +1764,14 @@ static int __match_proto__(VSLQ_dispatch_f) transaction_scribe (struct VSL_data | |
struct VSL_transaction *t; | |
/* Loop through the transations of the grouping */ | |
while ((t = *pt++)) { | |
+ /* Only client requests are allowed */ | |
+ if (t->type != VSL_t_req) | |
+ continue; | |
+ if (t->reason == VSL_r_esi) | |
+ continue; | |
/* loop through the tags */ | |
- uint64_t bitmap = 0; | |
while (VSL_Next(t->c) == 1) { | |
- /* Only client requests are allowed */ | |
- if (t->type != VSL_t_req) | |
- continue; | |
- if (t->reason == VSL_r_esi) | |
- continue; | |
- parse_tag(t, bitmap); | |
+ parse_tag(t, lp); | |
} | |
} | |
return 0; | |
@@ -2027,8 +1938,6 @@ int main (int argc, char **argv) { | |
conf.daemonize = 1; | |
conf.datacopy = 1; | |
conf.tag_size_max = 2048; | |
- conf.loglines_hsize = 5000; | |
- conf.loglines_hmax = 5; | |
conf.scratch_size = 4096; | |
conf.stats_interval = 60; | |
conf.stats_file = strdup("/tmp/varnishkafka.stats.json"); | |
@@ -2162,9 +2071,6 @@ int main (int argc, char **argv) { | |
if (conf.log_level >= 7) | |
tag_dump(); | |
- /* Prepare logline cache */ | |
- loglines_init(); | |
- | |
/* Daemonize if desired */ | |
if (conf.daemonize) { | |
if (daemon(0, 0) == -1) { | |
@@ -2260,6 +2166,10 @@ int main (int argc, char **argv) { | |
wait_for.tv_nsec = 10000000L; | |
int dispatch_status = 0; | |
+ /* Creating a new logline (will be re-used across log transactions) */ | |
+ if (unlikely(!(lp = logline_get()))) | |
+ return -1; | |
+ | |
while (conf.run) { | |
dispatch_status = VSLQ_Dispatch(conf.vslq, transaction_scribe, NULL); | |
@@ -2297,7 +2207,6 @@ int main (int argc, char **argv) { | |
rd_kafka_destroy(rk); | |
} | |
- loglines_term(); | |
print_stats(); | |
/* if stats_fp is set (i.e. open), close it. */ | |
@@ -2308,6 +2217,8 @@ int main (int argc, char **argv) { | |
free(conf.stats_file); | |
+ free(lp); | |
+ | |
rate_limiters_rollover(time(NULL)); | |
varnish_api_cleaning(); | |
diff --git a/varnishkafka.h b/varnishkafka.h | |
index 0e452d7..ffee6f1 100644 | |
--- a/varnishkafka.h | |
+++ b/varnishkafka.h | |
@@ -85,9 +85,6 @@ struct logline { | |
/* Per fmt_conf logline matches */ | |
struct match *match[FMT_CONF_NUM]; | |
- /* Tags seen (for -m regexp) */ | |
- uint64_t tags_seen; | |
- | |
/* Sequence number */ | |
uint64_t seq; | |
@@ -191,8 +188,6 @@ struct conf { | |
int datacopy; | |
fmt_enc_t fmt_enc; | |
int total_fmt_cnt; | |
- int loglines_hsize; /* Log id hash size */ | |
- int loglines_hmax; /* Max log ids per hash bucket */ | |
int tag_size_max; /* Maximum tag size to accept without | |
* truncating it. */ | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment