Skip to content

Instantly share code, notes, and snippets.

@kingluo
Last active October 5, 2022 13:43
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kingluo/ca30b8f3b76d2af95f98dde93b91d049 to your computer and use it in GitHub Desktop.
Save kingluo/ca30b8f3b76d2af95f98dde93b91d049 to your computer and use it in GitHub Desktop.
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"math/rand"
"net/http"
"strings"
"sync"
"sync/atomic"
"time"
)
type KeyValue struct {
Key uint64 `json:"k"`
Value string `json:"v"`
}
var alphabet []rune = []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz")
func randomString(n int) string {
alphabetSize := len(alphabet)
var sb strings.Builder
for i := 0; i < n; i++ {
ch := alphabet[rand.Intn(alphabetSize)]
sb.WriteRune(ch)
}
s := sb.String()
return s
}
func main() {
url := "http://localhost:10000/run"
url2 := "http://localhost:10000/clean"
parallel := 1
duration := 30 * time.Second
uniqFlag := flag.Bool("uniq", false, "post unique string each time")
flag.Parse()
fmt.Println("url:", url)
fmt.Println("unique:", *uniqFlag)
fmt.Println("run:", duration)
stopCh := make(chan bool)
var wg sync.WaitGroup
rand.Seed(time.Now().UnixNano())
var MAX_STR_SIZE = 1 * 1024 * 1024
buf := []byte(randomString(MAX_STR_SIZE))
var cnt, sum uint64
for i := 0; i < parallel; i++ {
wg.Add(1)
go func() {
kv := &KeyValue{}
defer wg.Done()
for j := 0; ; j++ {
select {
case <-stopCh:
return
default:
kv.Key = rand.Uint64()
if *uniqFlag {
kv.Value = randomString(MAX_STR_SIZE)
} else {
buf[j%len(buf)] = byte(int('a') + j)
kv.Value = string(buf)
}
data, err := json.Marshal(kv)
if err != nil {
panic(err)
}
start := time.Now()
resp, err := http.Post(url, "text/plain", bytes.NewBuffer(data))
if err != nil {
panic(err)
}
if resp.StatusCode != 200 {
panic("post failed")
}
elapsed := time.Now().Sub(start)
atomic.AddUint64(&cnt, 1)
atomic.AddUint64(&sum, uint64(elapsed.Microseconds()))
}
}
}()
}
time.Sleep(duration)
close(stopCh)
wg.Wait()
fmt.Printf("avg rtt: %d microsec\n", sum/cnt)
http.Get(url2)
}
#!/usr/bin/env stap++
global start_time = 0
global latencies
probe process("$^libluajit_path").function("lj_str_new") {
if (tid() == target()) {
start_time = gettimeofday_us()
}
}
probe process("$^libluajit_path").function("lj_str_new").return {
if (tid() == target()) {
if (start_time) {
latencies <<< gettimeofday_us() - start_time
}
}
}
probe begin {
printf("Start tracing process %d ($^exec_path)...\n", target())
%( "$^arg_time :default()" != "" %?
printf("Please wait for $^arg_time seconds...\n")
%:
printf("Hit Ctrl-C to end.\n")
%)
}
%( "$^arg_time" != "" %?
probe timer.s($^arg_time) {
exit()
}
%)
probe end {
count = @count(latencies)
if (count == 0) {
printf("\nNo samples found so far.\n")
} else {
printf("min/avg/max: %d/%d/%d us\n",
@min(latencies), @avg(latencies), @max(latencies))
}
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
void randstr(char* dst, size_t len) {
FILE *f = fopen( "/dev/urandom", "r");
int noused = fread( dst, len, 1, f);
fclose(f);
}
int main(int argc, char** argv) {
const int SZ = 1024*1024*1;
char* foo = malloc(SZ);
randstr(foo, SZ);
char* bar = malloc(SZ);
memcpy(bar, foo, SZ);
bar[SZ-1] = 'A';
int cnt = 0;
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for (int i = 0; i < atoi(argv[1]); i++) {
cnt += memcmp(foo, bar, SZ);
}
gettimeofday(&tv2, NULL);
int duration = (tv2.tv_sec * 1000000 - tv1.tv_sec * 1000000) + (tv2.tv_usec - tv1.tv_usec);
printf("%d, %d\n", cnt, duration/atoi(argv[1]));
}
error_log /dev/stderr info;
worker_processes 1;
events {}
http {
server {
client_max_body_size 2M;
client_body_buffer_size 2M;
listen 10000;
location /run {
content_by_lua_block {
require("workload").run()
}
}
location /clean {
content_by_lua_block {
require("workload").clean()
}
}
}
}
local cjson = require("cjson")
local _M = {}
local MAX_CNT = 800
local DEL = 300
local cnt = 0
local cache = {}
function _M.run()
ngx.req.read_body()
local data = ngx.req.get_body_data()
assert(data)
local kv = cjson.decode(data)
if not cache[kv.k] then
cnt = cnt + 1
if cnt > MAX_CNT then
local del = DEL
cnt = cnt - del
for k, v in pairs(cache) do
cache[k] = nil
del = del - 1
if del == 0 then
break
end
end
end
end
cache[kv.k] = kv.v
end
function _M.clean()
cache = {}
cnt = 0
collectgarbage()
end
return _M
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment