Create a gist now

Instantly share code, notes, and snippets.

@juliusv /catena_bench.go Secret
Last active Aug 29, 2015

What would you like to do?
package main
import (
"flag"
"fmt"
"time"
"github.com/PreetamJinka/catena"
)
var (
numMetrics = flag.Int("num-metrics", 1000, "The number of metrics.")
samplesPerMetric = flag.Int("samples-per-metric", 1000, "The number of samples per metric.")
)
func main() {
flag.Parse()
db, err := catena.NewDB("/tmp/catena_bench")
if err != nil {
panic(err)
}
totalTime := time.Duration(0)
for t := 0; t <= *samplesPerMetric; t++ {
if t%100 == 0 {
fmt.Println("Appending samples for timestamp", t)
}
rows := make(catena.Rows, 0, *numMetrics)
for i := 0; i < *numMetrics; i++ {
rows = append(rows, catena.Row{
Source: "testsource",
Metric: fmt.Sprintf("testmetric_%d", i),
Timestamp: int64(t * 15),
Value: float64(i),
})
}
start := time.Now()
db.InsertRows(rows)
totalTime += time.Since(start)
}
fmt.Println("Took", totalTime)
}
package main
import (
"flag"
"fmt"
"time"
"github.com/prometheus/client_golang/model"
"github.com/prometheus/prometheus/storage/local"
)
var (
numMetrics = flag.Int("num-metrics", 1000, "The number of metrics.")
samplesPerMetric = flag.Int("samples-per-metric", 1000, "The number of samples per metric.")
// The flags below are copied from Prometheus's main.go.
numMemoryChunks = flag.Int("storage.local.memory-chunks", 1024*1024, "How many chunks to keep in memory. While the size of a chunk is 1kiB, the total memory usage will be significantly higher than this value * 1kiB. Furthermore, for various reasons, more chunks might have to be kept in memory temporarily.")
persistenceStoragePath = flag.String("storage.local.path", "/tmp/prometheus_bench", "Base path for metrics storage.")
persistenceRetentionPeriod = flag.Duration("storage.local.retention", 15*24*time.Hour, "How long to retain samples in the local storage.")
persistenceQueueCapacity = flag.Int("storage.local.persistence-queue-capacity", 32*1024, "How many chunks can be waiting for being persisted before sample ingestion will stop.")
checkpointInterval = flag.Duration("storage.local.checkpoint-interval", 5*time.Minute, "The period at which the in-memory index of time series is checkpointed.")
checkpointDirtySeriesLimit = flag.Int("storage.local.checkpoint-dirty-series-limit", 5000, "If approx. that many time series are in a state that would require a recovery operation after a crash, a checkpoint is triggered, even if the checkpoint interval hasn't passed yet. A recovery operation requires a disk seek. The default limit intends to keep the recovery time below 1min even on spinning disks. With SSD, recovery is much faster, so you might want to increase this value in that case to avoid overly frequent checkpoints.")
chunkType = flag.Int("storage.local.chunk-type", 1, "Which chunk encoding version to use. Currently supported is 0 (delta encoding) and 1 (double-delta encoding).")
)
func main() {
flag.Parse()
o := &local.MemorySeriesStorageOptions{
MemoryChunks: *numMemoryChunks,
PersistenceStoragePath: *persistenceStoragePath,
PersistenceRetentionPeriod: *persistenceRetentionPeriod,
PersistenceQueueCapacity: *persistenceQueueCapacity,
CheckpointInterval: *checkpointInterval,
CheckpointDirtySeriesLimit: *checkpointDirtySeriesLimit,
ChunkType: byte(*chunkType),
}
storage, err := local.NewMemorySeriesStorage(o)
if err != nil {
panic(err)
}
storage.Start()
totalTime := time.Duration(0)
for t := 0; t <= *samplesPerMetric; t++ {
if t%100 == 0 {
fmt.Println("Appending samples for timestamp", t)
}
samples := make(model.Samples, 0, *numMetrics)
for i := 0; i < *numMetrics; i++ {
samples = append(samples, &model.Sample{
Metric: model.Metric{
"source": "testsource",
model.MetricNameLabel: model.LabelValue(fmt.Sprintf("testmetric_%d", i)),
},
Timestamp: model.Timestamp(t).Add(15 * time.Second),
Value: model.SampleValue(i),
})
}
start := time.Now()
storage.AppendSamples(samples)
totalTime += time.Since(start)
}
fmt.Println("Appending took", totalTime)
start := time.Now()
storage.Stop()
fmt.Println("Stopping storage took", time.Since(start))
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment