Last active
September 19, 2018 04:34
-
-
Save kriskowal/2f04c57b92abe81604789f42b869309f to your computer and use it in GitHub Desktop.
A control theory playground. Eliminate dropped messages, minimize latency, maximize throughput.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package main | |
import ( | |
"fmt" | |
"image" | |
"image/color" | |
"image/draw" | |
"image/png" | |
"os" | |
"sync" | |
"time" | |
) | |
const ( | |
width = 1000 | |
height = 800 | |
// duration = time.Second / 1000 | |
// duration = time.Second / 10 | |
// duration = time.Second / 2 | |
duration = 10 * time.Second | |
warmup = 10 | |
proportionOver = 1 | |
proportionUnder = 5 | |
) | |
type latencyEntry struct { | |
time time.Time | |
latency time.Duration | |
} | |
func main() { | |
if err := run(); err != nil { | |
fmt.Printf("%s\n", err.Error()) | |
os.Exit(1) | |
} | |
} | |
func run() error { | |
latencies, minLatencies, delays, drops, _, _ := sim(0, 0, 0, 0, duration/warmup) | |
latencies, minLatencies, delays, drops, start, stop := sim(len(latencies)*warmup*2, len(minLatencies)*warmup*2, len(delays)*warmup*2, len(drops)*warmup*2, duration) | |
return render(latencies, minLatencies, delays, drops, start, stop) | |
} | |
func sim(latCap, minLatCap, delayCap, dropCap int, duration time.Duration) ([]latencyEntry, []latencyEntry, []latencyEntry, []time.Time, time.Time, time.Time) { | |
stopped := make(chan struct{}, 0) | |
ch := make(chan time.Time, 1024) | |
feedback := make(chan time.Duration, 1) | |
latencies := make([]latencyEntry, 0, latCap) | |
minLatencies := make([]latencyEntry, 0, minLatCap) | |
delays := make([]latencyEntry, 0, delayCap) | |
drops := make([]time.Time, 0, dropCap) | |
var wg sync.WaitGroup | |
wg.Add(2) | |
start := time.Now() | |
timer := time.NewTimer(0) | |
if !timer.Stop() { | |
<-timer.C | |
} | |
go func() { | |
priorLatency := 400 * time.Nanosecond | |
var delay time.Duration | |
// var integral time.Duration | |
// var priorInstantLatency time.Duration | |
Loop: | |
for { | |
// Wait delay between sending packets | |
timer.Reset(delay) | |
select { | |
case <-timer.C: | |
case <-stopped: | |
break Loop | |
} | |
now := time.Now() | |
select { | |
case ch <- now: | |
case <-stopped: | |
break Loop | |
default: | |
drops = append(drops, now) | |
} | |
// Receive latency feedback | |
select { | |
case instantLatency := <-feedback: | |
delay += (instantLatency - priorLatency) * proportionOver / proportionUnder | |
priorLatency = instantLatency | |
delays = append(delays, latencyEntry{ | |
latency: delay, | |
time: now, | |
}) | |
default: | |
} | |
} | |
wg.Done() | |
}() | |
go func() { | |
Loop: | |
for { | |
select { | |
case t := <-ch: | |
now := time.Now() | |
latency := now.Sub(t) | |
latencies = append(latencies, latencyEntry{ | |
latency: latency, | |
time: now, | |
}) | |
// Send latency feedback | |
select { | |
case feedback <- latency: | |
default: | |
} | |
case <-stopped: | |
break Loop | |
} | |
} | |
wg.Done() | |
}() | |
time.Sleep(duration) | |
close(stopped) | |
wg.Wait() | |
stop := time.Now() | |
return latencies, minLatencies, delays, drops, start, stop | |
} | |
func render(latencies []latencyEntry, minLatencies []latencyEntry, delays []latencyEntry, drops []time.Time, start, stop time.Time) error { | |
var histogram [width][height]int | |
duration := stop.Sub(start) | |
var maxLatency, meanLatency time.Duration | |
minLatency := time.Second | |
for _, entry := range latencies { | |
if entry.latency > maxLatency { | |
maxLatency = entry.latency | |
} | |
if entry.latency < minLatency { | |
minLatency = entry.latency | |
} | |
meanLatency += entry.latency | |
} | |
meanLatency = meanLatency / time.Duration(len(latencies)) | |
spread := meanLatency * 2 | |
var depth int | |
for _, entry := range latencies { | |
x := int(entry.time.Sub(start) * (width - 1) / duration) | |
l := int(entry.latency * (height - 1) / spread) | |
if l >= height { | |
continue | |
} | |
y := height - 1 - l | |
count := histogram[x][y] + 1 | |
if count > depth { | |
depth = count | |
} | |
histogram[x][y] = count | |
} | |
rect := image.Rect(0, 0, width, height) | |
latencyMap := image.NewRGBA(rect) | |
for x := 0; x < width; x++ { | |
for y := 0; y < height; y++ { | |
if histogram[x][y] > 0 { | |
latencyMap.Set(x, y, color.RGBA{0, 0, 0, 127 + uint8(histogram[x][y]*128/depth)}) | |
} | |
} | |
} | |
minLatenciesChart := image.NewRGBA(rect) | |
for _, entry := range minLatencies { | |
x := int(entry.time.Sub(start) * (width - 1) / duration) | |
l := int(entry.latency * (height - 1) / spread) | |
if l >= height { | |
continue | |
} | |
y := height - 1 - l | |
minLatenciesChart.Set(x, y, color.RGBA{0, 0, 255, 255}) | |
} | |
delaysChart := image.NewRGBA(rect) | |
for _, entry := range delays { | |
x := int(entry.time.Sub(start) * (width - 1) / duration) | |
l := int(entry.latency * (height - 1) / spread) | |
if l >= height { | |
continue | |
} | |
y := height - 1 - l | |
delaysChart.Set(x, y, color.RGBA{255, 0, 0, 255}) | |
} | |
img := image.NewRGBA(rect) | |
draw.Draw(img, rect, image.NewUniform(color.RGBA{255, 255, 255, 255}), image.ZP, draw.Over) | |
draw.Draw(img, rect, latencyMap, image.ZP, draw.Over) | |
draw.DrawMask(img, rect, minLatenciesChart, image.ZP, image.NewUniform(color.RGBA{0, 0, 0, 63}), image.ZP, draw.Over) | |
draw.DrawMask(img, rect, delaysChart, image.ZP, image.NewUniform(color.RGBA{0, 0, 0, 63}), image.ZP, draw.Over) | |
file, err := os.Create("heatmap.png") | |
if err != nil { | |
return err | |
} | |
defer file.Close() | |
err = png.Encode(file, img) | |
if err != nil { | |
return err | |
} | |
fmt.Printf("MIN LATENCY %v\n", minLatency) | |
fmt.Printf("MAX LATENCY %v\n", maxLatency) | |
fmt.Printf("RECEIVED %d in %v\n", len(latencies), duration) | |
fmt.Printf("DROPS %d\n", len(drops)) | |
return nil | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment