commit c2572c57029cac82d4c5707b16e5650161afedfe Author: Daniel Legt Date: Wed Apr 15 19:09:21 2026 +0300 Initial Commit diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..45f9b2e --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +.git +.gitignore +data +tmp +README.md.bak diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..666f59a --- /dev/null +++ b/.gitignore @@ -0,0 +1,37 @@ +# Build artifacts +/cpu-benchmark-server +/*.exe +/*.exe~ +/*.dll +/*.so +/*.dylib +/*.test +/*.out +/dist/ +/build/ + +# Go coverage and profiling +/coverage.out +/coverage.html +/cover.out +/cpu.prof +/mem.prof + +# Application data +/data/ +/tmp/ + +# Environment files +.env +.env.* +!.env.example + +# IDE and OS files +.DS_Store +.idea/ +.vscode/ +.codex +*.swp +*.swo + +example_jsons diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..2c3ea33 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,30 @@ +FROM golang:1.23-alpine AS builder + +WORKDIR /src + +COPY go.mod go.sum ./ +RUN go mod download + +COPY . . +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -trimpath -ldflags="-s -w" -o /out/cpu-benchmark-server . + +FROM alpine:3.21 + +WORKDIR /app + +RUN apk add --no-cache ca-certificates + +COPY --from=builder /out/cpu-benchmark-server /app/cpu-benchmark-server +COPY templates /app/templates +COPY example_jsons /app/example_jsons + +ENV APP_ADDR=:8080 +ENV BADGER_DIR=/data/badger +ENV PAGE_SIZE=50 +ENV SHUTDOWN_TIMEOUT=10s + +VOLUME ["/data"] + +EXPOSE 8080 + +ENTRYPOINT ["/app/cpu-benchmark-server"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..46cb945 --- /dev/null +++ b/README.md @@ -0,0 +1,200 @@ +# CPU Benchmark Submission Server + +Production-oriented Go web application for ingesting CPU benchmark results, storing them in BadgerDB, searching them from an in-memory index, and rendering a server-side HTML dashboard. + +## Features + +- `POST /api/submit` accepts either `application/json` or `multipart/form-data`. +- `GET /api/search` performs case-insensitive token matching against submitter/general fields and CPU brand strings. +- `GET /` renders the latest submissions with search and pagination. +- BadgerDB stores each submission under a reverse-timestamp key so native iteration returns newest records first. +- A startup-loaded in-memory search index prevents full DB deserialization for every query. +- Graceful shutdown closes the HTTP server and BadgerDB cleanly to avoid lock issues. + +## Project Layout + +```text +. +├── main.go +├── handlers.go +├── db.go +├── models.go +├── templates/index.html +├── http/ +├── example_jsons/ +├── Dockerfile +└── docker-compose.yml +``` + +## Data Model + +Each stored submission contains: + +- `submissionID`: server-generated UUID +- `submitter`: defaults to `Anonymous` if omitted +- `submittedAt`: server-side storage timestamp +- Benchmark payload fields: + - `config` + - `cpuInfo` + - `startedAt` + - `duration` + - `totalOps` + - `mOpsPerSec` + - `score` + - `coreResults` + +The parser also accepts optional CPU metadata found in your local sample JSON files such as `isHybrid`, `has3DVCache`, `supportedFeatures`, and `cores`. + +## Requirements + +- Go `1.23+` +- Docker and Docker Compose if running the containerized version + +## Local Development + +1. Resolve modules: + + ```bash + go mod tidy + ``` + +2. Start the server: + + ```bash + go run . + ``` + +3. Open: + + - UI: `http://localhost:8080/` + - API health check: `http://localhost:8080/healthz` + +### Environment Variables + +| Variable | Default | Description | +| --- | --- | --- | +| `APP_ADDR` | `:8080` | HTTP listen address | +| `BADGER_DIR` | `data/badger` | BadgerDB directory | +| `PAGE_SIZE` | `50` | Default number of cards per UI page | +| `SHUTDOWN_TIMEOUT` | `10s` | Graceful shutdown timeout | + +## API Usage + +### `POST /api/submit` + +Accepted content types: + +- `application/json` +- `multipart/form-data` + +JSON requests support either: + +1. A wrapper envelope with `submitter` and nested `benchmark` +2. A raw benchmark JSON body, with optional submitter provided via: + - query string `?submitter=...` + - header `X-Submitter` + - top-level `submitter` field + +Multipart requests support: + +- `submitter` text field +- benchmark JSON as one of these file fields: `benchmark`, `file`, `benchmarkFile` +- or benchmark JSON as text fields: `benchmark`, `payload`, `result`, `data` + +Example success response: + +```json +{ + "success": true, + "submissionID": "8f19d442-1be0-4989-97cf-3f8ee6b61548", + "submitter": "Workstation-Lab-A", + "submittedAt": "2026-04-15T15:45:41.327225Z" +} +``` + +### `GET /api/search` + +Query parameters: + +- `text`: token-matches submitter and general searchable fields +- `cpu`: token-matches `cpuInfo.brandString` + +Example: + +```bash +curl "http://localhost:8080/api/search?text=intel&cpu=13700" +``` + +### `GET /` + +Query parameters: + +- `page` +- `text` +- `cpu` + +Examples: + +```text +http://localhost:8080/ +http://localhost:8080/?page=2 +http://localhost:8080/?text=anonymous&cpu=ryzen +``` + +## Request Examples + +Ready-to-run HTTP client examples are included in: + +- `http/submit-json.http` +- `http/submit-multipart.http` +- `http/search.http` + +You can also submit one of the provided sample payloads directly: + +```bash +curl -X POST "http://localhost:8080/api/submit?submitter=Example-CLI" \ + -H "Content-Type: application/json" \ + --data-binary @example_jsons/5800X/cpu-bench-result.json +``` + +Or as multipart: + +```bash +curl -X POST "http://localhost:8080/api/submit" \ + -F "submitter=Example-Multipart" \ + -F "benchmark=@example_jsons/i9/cpu-bench-result.json;type=application/json" +``` + +## Storage and Search Strategy + +- Primary keys are written as `submission::`. +- Reversing the timestamp means lexicographically ascending iteration yields newest submissions first. +- On startup, all submissions are loaded into an in-memory index containing: + - canonical submission payload + - normalized general search text + - normalized CPU brand text +- Searches scan the in-memory ordered slice rather than reopening and deserializing Badger values for every request. + +## Docker + +Build and run with Docker Compose: + +```bash +docker compose up --build +``` + +The container exposes port `8080` and persists BadgerDB data in the named volume `badger-data`. + +To build manually: + +```bash +docker build -t cpu-benchmark-server . +docker run --rm -p 8080:8080 -v cpu-benchmark-data:/data cpu-benchmark-server +``` + +## Notes + +- The UI uses Go templates plus Tailwind CSS via CDN. +- Search is token-based and case-insensitive rather than edit-distance based. +- Unknown JSON fields are ignored, so benchmark clients can evolve without immediately breaking ingestion. +- If you stop the service abruptly and leave a lock behind, restart after the process exits cleanly or remove the old lock file only when you know no other instance is using the DB. diff --git a/db.go b/db.go new file mode 100644 index 0000000..fdffa69 --- /dev/null +++ b/db.go @@ -0,0 +1,254 @@ +package main + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/dgraph-io/badger/v4" + "github.com/google/uuid" +) + +const submissionPrefix = "submission:" + +type indexedSubmission struct { + submission *Submission + searchText string + cpuText string + submittedAt time.Time +} + +type Store struct { + db *badger.DB + mu sync.RWMutex + orderedIDs []string + records map[string]*indexedSubmission +} + +func OpenStore(path string) (*Store, error) { + opts := badger.DefaultOptions(path).WithLogger(nil) + db, err := badger.Open(opts) + if err != nil { + return nil, err + } + + store := &Store{ + db: db, + records: make(map[string]*indexedSubmission), + } + + if err := store.loadIndex(); err != nil { + _ = db.Close() + return nil, err + } + + return store, nil +} + +func (s *Store) Close() error { + return s.db.Close() +} + +func (s *Store) Count() int { + s.mu.RLock() + defer s.mu.RUnlock() + + return len(s.orderedIDs) +} + +func (s *Store) SaveSubmission(result BenchmarkResult, submitter string) (*Submission, error) { + submission := &Submission{ + SubmissionID: uuid.NewString(), + Submitter: normalizeSubmitter(submitter), + SubmittedAt: time.Now().UTC(), + BenchmarkResult: result, + } + + key := submissionKey(submission.SubmittedAt, submission.SubmissionID) + payload, err := json.Marshal(submission) + if err != nil { + return nil, err + } + + if err := s.db.Update(func(txn *badger.Txn) error { + return txn.Set([]byte(key), payload) + }); err != nil { + return nil, err + } + + indexed := newIndexedSubmission(submission) + + s.mu.Lock() + s.records[submission.SubmissionID] = indexed + s.orderedIDs = append([]string{submission.SubmissionID}, s.orderedIDs...) + s.mu.Unlock() + + return cloneSubmission(submission), nil +} + +func (s *Store) ListSubmissions(page, pageSize int) ([]Submission, int) { + s.mu.RLock() + defer s.mu.RUnlock() + + total := len(s.orderedIDs) + start, end, _ := pageBounds(page, pageSize, total) + results := make([]Submission, 0, max(0, end-start)) + + for _, id := range s.orderedIDs[start:end] { + record := s.records[id] + if record == nil { + continue + } + + results = append(results, *cloneSubmission(record.submission)) + } + + return results, total +} + +func (s *Store) SearchSubmissions(text, cpu string) []Submission { + queryText := normalizeSearchText(text) + cpuText := normalizeSearchText(cpu) + + s.mu.RLock() + defer s.mu.RUnlock() + + results := make([]Submission, 0) + for _, id := range s.orderedIDs { + record := s.records[id] + if record == nil { + continue + } + + if !matchesSearch(record.searchText, queryText) { + continue + } + + if !matchesSearch(record.cpuText, cpuText) { + continue + } + + results = append(results, *cloneSubmission(record.submission)) + } + + return results +} + +func (s *Store) loadIndex() error { + return s.db.View(func(txn *badger.Txn) error { + opts := badger.DefaultIteratorOptions + opts.PrefetchValues = true + opts.Prefix = []byte(submissionPrefix) + + it := txn.NewIterator(opts) + defer it.Close() + + for it.Rewind(); it.Valid(); it.Next() { + item := it.Item() + payload, err := item.ValueCopy(nil) + if err != nil { + return err + } + + var submission Submission + if err := json.Unmarshal(payload, &submission); err != nil { + return fmt.Errorf("decode %q: %w", item.Key(), err) + } + + indexed := newIndexedSubmission(&submission) + s.records[submission.SubmissionID] = indexed + s.orderedIDs = append(s.orderedIDs, submission.SubmissionID) + } + + return nil + }) +} + +func newIndexedSubmission(submission *Submission) *indexedSubmission { + return &indexedSubmission{ + submission: cloneSubmission(submission), + searchText: buildSearchText(submission), + cpuText: normalizeSearchText(submission.CPUInfo.BrandString), + submittedAt: submission.SubmittedAt, + } +} + +func buildSearchText(submission *Submission) string { + parts := []string{ + submission.SubmissionID, + submission.Submitter, + submission.CPUInfo.BrandString, + submission.CPUInfo.VendorID, + threadModeLabel(submission.Config.MultiCore), + strconv.Itoa(submission.Config.DurationSecs), + strconv.Itoa(submission.CPUInfo.PhysicalCores), + strconv.Itoa(submission.CPUInfo.LogicalCores), + strconv.FormatInt(submission.Duration, 10), + strconv.FormatInt(submission.TotalOps, 10), + strconv.FormatInt(submission.Score, 10), + fmt.Sprintf("%.4f", submission.MOpsPerSec), + } + + for _, feature := range submission.CPUInfo.SupportedFeatures { + parts = append(parts, feature) + } + + for _, result := range submission.CoreResults { + parts = append(parts, + strconv.Itoa(result.LogicalID), + result.CoreType, + strconv.FormatInt(result.TotalOps, 10), + fmt.Sprintf("%.4f", result.MOpsPerSec), + ) + } + + return normalizeSearchText(strings.Join(parts, " ")) +} + +func submissionKey(timestamp time.Time, submissionID string) string { + reversed := math.MaxInt64 - timestamp.UTC().UnixNano() + return fmt.Sprintf("%s%019d:%s", submissionPrefix, reversed, submissionID) +} + +func normalizeSearchText(value string) string { + return strings.Join(strings.Fields(strings.ToLower(value)), " ") +} + +func matchesSearch(target, query string) bool { + if query == "" { + return true + } + + for _, token := range strings.Fields(query) { + if !strings.Contains(target, token) { + return false + } + } + + return true +} + +func cloneSubmission(submission *Submission) *Submission { + if submission == nil { + return nil + } + + copySubmission := *submission + if len(submission.CoreResults) > 0 { + copySubmission.CoreResults = append([]CoreResult(nil), submission.CoreResults...) + } + + if len(submission.CPUInfo.Cores) > 0 { + copySubmission.CPUInfo.Cores = append([]CPUCoreDescriptor(nil), submission.CPUInfo.Cores...) + } + + if len(submission.CPUInfo.SupportedFeatures) > 0 { + copySubmission.CPUInfo.SupportedFeatures = append([]string(nil), submission.CPUInfo.SupportedFeatures...) + } + + return ©Submission +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..db9413b --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,19 @@ +services: + app: + build: + context: . + dockerfile: Dockerfile + container_name: cpu-benchmark-server + ports: + - "8080:8080" + environment: + APP_ADDR: ":8080" + BADGER_DIR: /data/badger + PAGE_SIZE: "50" + SHUTDOWN_TIMEOUT: 10s + volumes: + - badger-data:/data + restart: unless-stopped + +volumes: + badger-data: diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..f06e66d --- /dev/null +++ b/go.mod @@ -0,0 +1,26 @@ +module cpu-benchmark-server + +go 1.23.0 + +require ( + github.com/dgraph-io/badger/v4 v4.9.1 + github.com/go-chi/chi/v5 v5.2.5 + github.com/google/uuid v1.6.0 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/flatbuffers v25.2.10+incompatible // indirect + github.com/klauspost/compress v1.18.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + google.golang.org/protobuf v1.36.7 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..8fc99dc --- /dev/null +++ b/go.sum @@ -0,0 +1,47 @@ +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/badger/v4 v4.9.1 h1:DocZXZkg5JJHJPtUErA0ibyHxOVUDVoXLSCV6t8NC8w= +github.com/dgraph-io/badger/v4 v4.9.1/go.mod h1:5/MEx97uzdPUHR4KtkNt8asfI2T4JiEiQlV7kWUo8c0= +github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= +github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= +github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-chi/chi/v5 v5.2.5 h1:Eg4myHZBjyvJmAFjFvWgrqDTXFyOzjj7YIm3L3mu6Ug= +github.com/go-chi/chi/v5 v5.2.5/go.mod h1:X7Gx4mteadT3eDOMTsXzmI4/rwUpOwBHLpAfupzFJP0= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/handlers.go b/handlers.go new file mode 100644 index 0000000..663bdb8 --- /dev/null +++ b/handlers.go @@ -0,0 +1,428 @@ +package main + +import ( + "encoding/json" + "fmt" + "html/template" + "io" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +const maxSubmissionBytes = 4 << 20 + +type App struct { + store *Store + templates *template.Template + pageSize int +} + +type indexPageData struct { + Submissions []Submission + QueryText string + QueryCPU string + Page int + TotalPages int + TotalCount int + ShowingFrom int + ShowingTo int + PrevURL string + NextURL string + SearchMode bool +} + +type jsonSubmissionEnvelope struct { + Submitter string `json:"submitter"` + Benchmark *BenchmarkResult `json:"benchmark"` + Result *BenchmarkResult `json:"result"` + Data *BenchmarkResult `json:"data"` +} + +type flatSubmissionEnvelope struct { + Submitter string `json:"submitter"` + BenchmarkResult +} + +type errorResponse struct { + Error string `json:"error"` +} + +func NewApp(store *Store, pageSize int) (*App, error) { + funcs := template.FuncMap{ + "formatInt64": formatInt64, + "formatFloat": formatFloat, + "formatTime": formatTime, + "modeLabel": threadModeLabel, + } + + templates, err := template.New("index.html").Funcs(funcs).ParseFiles("templates/index.html") + if err != nil { + return nil, err + } + + return &App{ + store: store, + templates: templates, + pageSize: pageSize, + }, nil +} + +func (a *App) Routes() http.Handler { + router := chi.NewRouter() + router.Use(middleware.RequestID) + router.Use(middleware.RealIP) + router.Use(middleware.Logger) + router.Use(middleware.Recoverer) + router.Use(middleware.Timeout(30 * time.Second)) + + router.Get("/", a.handleIndex) + router.Get("/healthz", a.handleHealth) + router.Get("/api/search", a.handleSearch) + router.Post("/api/submit", a.handleSubmit) + + return router +} + +func (a *App) handleIndex(w http.ResponseWriter, r *http.Request) { + page := parsePositiveInt(r.URL.Query().Get("page"), 1) + text := strings.TrimSpace(r.URL.Query().Get("text")) + cpu := strings.TrimSpace(r.URL.Query().Get("cpu")) + + searchMode := text != "" || cpu != "" + var ( + submissions []Submission + totalCount int + ) + + if searchMode { + matches := a.store.SearchSubmissions(text, cpu) + totalCount = len(matches) + start, end, normalizedPage := pageBounds(page, a.pageSize, totalCount) + page = normalizedPage + submissions = matches[start:end] + } else { + var count int + submissions, count = a.store.ListSubmissions(page, a.pageSize) + totalCount = count + _, _, page = pageBounds(page, a.pageSize, totalCount) + } + + totalPages := totalPages(totalCount, a.pageSize) + showingFrom := 0 + showingTo := 0 + if totalCount > 0 && len(submissions) > 0 { + showingFrom = (page-1)*a.pageSize + 1 + showingTo = showingFrom + len(submissions) - 1 + } + + data := indexPageData{ + Submissions: submissions, + QueryText: text, + QueryCPU: cpu, + Page: page, + TotalPages: totalPages, + TotalCount: totalCount, + ShowingFrom: showingFrom, + ShowingTo: showingTo, + PrevURL: buildIndexURL(max(1, page-1), text, cpu), + NextURL: buildIndexURL(min(totalPages, page+1), text, cpu), + SearchMode: searchMode, + } + + if err := a.templates.ExecuteTemplate(w, "index.html", data); err != nil { + http.Error(w, fmt.Sprintf("render template: %v", err), http.StatusInternalServerError) + } +} + +func (a *App) handleHealth(w http.ResponseWriter, r *http.Request) { + writeJSON(w, http.StatusOK, map[string]any{ + "status": "ok", + "submissions": a.store.Count(), + }) +} + +func (a *App) handleSearch(w http.ResponseWriter, r *http.Request) { + text := r.URL.Query().Get("text") + cpu := r.URL.Query().Get("cpu") + results := a.store.SearchSubmissions(text, cpu) + writeJSON(w, http.StatusOK, results) +} + +func (a *App) handleSubmit(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, maxSubmissionBytes) + + result, submitter, err := parseSubmissionRequest(r) + if err != nil { + writeJSON(w, http.StatusBadRequest, errorResponse{Error: err.Error()}) + return + } + + if err := result.Validate(); err != nil { + writeJSON(w, http.StatusBadRequest, errorResponse{Error: err.Error()}) + return + } + + submission, err := a.store.SaveSubmission(result, submitter) + if err != nil { + writeJSON(w, http.StatusInternalServerError, errorResponse{Error: fmt.Sprintf("store submission: %v", err)}) + return + } + + writeJSON(w, http.StatusCreated, map[string]any{ + "success": true, + "submissionID": submission.SubmissionID, + "submitter": submission.Submitter, + "submittedAt": submission.SubmittedAt, + }) +} + +func parseSubmissionRequest(r *http.Request) (BenchmarkResult, string, error) { + contentType := r.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil && contentType != "" { + return BenchmarkResult{}, "", fmt.Errorf("parse content type: %w", err) + } + + switch mediaType { + case "", "application/json": + return parseJSONSubmission(r) + case "multipart/form-data": + return parseMultipartSubmission(r) + default: + return BenchmarkResult{}, "", fmt.Errorf("unsupported content type %q", mediaType) + } +} + +func parseJSONSubmission(r *http.Request) (BenchmarkResult, string, error) { + body, err := io.ReadAll(r.Body) + if err != nil { + return BenchmarkResult{}, "", fmt.Errorf("read request body: %w", err) + } + + submitter := firstNonEmpty( + r.URL.Query().Get("submitter"), + r.Header.Get("X-Submitter"), + ) + + var nested jsonSubmissionEnvelope + if err := json.Unmarshal(body, &nested); err == nil { + submitter = firstNonEmpty(nested.Submitter, submitter) + for _, candidate := range []*BenchmarkResult{nested.Benchmark, nested.Result, nested.Data} { + if candidate != nil { + return *candidate, submitter, nil + } + } + } + + var flat flatSubmissionEnvelope + if err := json.Unmarshal(body, &flat); err != nil { + return BenchmarkResult{}, "", fmt.Errorf("decode benchmark JSON: %w", err) + } + + submitter = firstNonEmpty(flat.Submitter, submitter) + return flat.BenchmarkResult, submitter, nil +} + +func parseMultipartSubmission(r *http.Request) (BenchmarkResult, string, error) { + if err := r.ParseMultipartForm(maxSubmissionBytes); err != nil { + return BenchmarkResult{}, "", fmt.Errorf("parse multipart form: %w", err) + } + + submitter := r.FormValue("submitter") + payload, err := readMultipartPayload(r) + if err != nil { + return BenchmarkResult{}, "", err + } + + var result BenchmarkResult + if err := json.Unmarshal(payload, &result); err != nil { + return BenchmarkResult{}, "", fmt.Errorf("decode benchmark JSON: %w", err) + } + + return result, submitter, nil +} + +func readMultipartPayload(r *http.Request) ([]byte, error) { + fileFields := []string{"benchmark", "file", "benchmarkFile"} + for _, field := range fileFields { + file, _, err := r.FormFile(field) + if err == nil { + defer file.Close() + payload, readErr := io.ReadAll(file) + if readErr != nil { + return nil, fmt.Errorf("read multipart benchmark file: %w", readErr) + } + + return payload, nil + } + + if err != http.ErrMissingFile { + return nil, fmt.Errorf("read multipart benchmark file: %w", err) + } + } + + textFields := []string{"benchmark", "payload", "result", "data"} + for _, field := range textFields { + if value := strings.TrimSpace(r.FormValue(field)); value != "" { + return []byte(value), nil + } + } + + return nil, fmt.Errorf("multipart request must include benchmark JSON in a file field or text field named benchmark") +} + +func parsePositiveInt(raw string, fallback int) int { + if raw == "" { + return fallback + } + + value, err := strconv.Atoi(raw) + if err != nil || value <= 0 { + return fallback + } + + return value +} + +func pageBounds(page, pageSize, total int) (int, int, int) { + if pageSize <= 0 { + pageSize = 50 + } + + totalPages := totalPages(total, pageSize) + if totalPages == 0 { + return 0, 0, 1 + } + + if page < 1 { + page = 1 + } + + if page > totalPages { + page = totalPages + } + + start := (page - 1) * pageSize + end := min(total, start+pageSize) + return start, end, page +} + +func totalPages(total, pageSize int) int { + if total == 0 || pageSize <= 0 { + return 0 + } + + pages := total / pageSize + if total%pageSize != 0 { + pages++ + } + + return pages +} + +func buildIndexURL(page int, text, cpu string) string { + if page < 1 { + page = 1 + } + + values := url.Values{} + values.Set("page", strconv.Itoa(page)) + if strings.TrimSpace(text) != "" { + values.Set("text", text) + } + if strings.TrimSpace(cpu) != "" { + values.Set("cpu", cpu) + } + + return "/?" + values.Encode() +} + +func writeJSON(w http.ResponseWriter, status int, payload any) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.WriteHeader(status) + encoder := json.NewEncoder(w) + encoder.SetIndent("", " ") + _ = encoder.Encode(payload) +} + +func firstNonEmpty(values ...string) string { + for _, value := range values { + if trimmed := strings.TrimSpace(value); trimmed != "" { + return trimmed + } + } + + return "" +} + +func formatInt64(value int64) string { + negative := value < 0 + if negative { + value = -value + } + + digits := strconv.FormatInt(value, 10) + if len(digits) <= 3 { + if negative { + return "-" + digits + } + return digits + } + + var builder strings.Builder + if negative { + builder.WriteByte('-') + } + + pre := len(digits) % 3 + if pre > 0 { + builder.WriteString(digits[:pre]) + if len(digits) > pre { + builder.WriteByte(',') + } + } + + for i := pre; i < len(digits); i += 3 { + builder.WriteString(digits[i : i+3]) + if i+3 < len(digits) { + builder.WriteByte(',') + } + } + + return builder.String() +} + +func formatFloat(value float64) string { + return fmt.Sprintf("%.2f", value) +} + +func formatTime(value time.Time) string { + if value.IsZero() { + return "-" + } + + return value.Format("2006-01-02 15:04:05 MST") +} + +func min(a, b int) int { + if a < b { + return a + } + + return b +} + +func max(a, b int) int { + if a > b { + return a + } + + return b +} diff --git a/http/search.http b/http/search.http new file mode 100644 index 0000000..10719b5 --- /dev/null +++ b/http/search.http @@ -0,0 +1,9 @@ +GET http://localhost:8080/api/search?text=intel&cpu=13700 + +### + +GET http://localhost:8080/api/search?text=anonymous + +### + +GET http://localhost:8080/?page=1&text=lab&cpu=ryzen diff --git a/http/submit-json.http b/http/submit-json.http new file mode 100644 index 0000000..a168123 --- /dev/null +++ b/http/submit-json.http @@ -0,0 +1,45 @@ +POST http://localhost:8080/api/submit +Content-Type: application/json + +{ + "submitter": "Workstation-Lab-A", + "benchmark": { + "config": { + "durationSecs": 20, + "intensity": 10, + "coreFilter": 0, + "multiCore": true + }, + "cpuInfo": { + "brandString": "AMD Ryzen 7 5800X 8-Core Processor", + "vendorID": "AuthenticAMD", + "physicalCores": 8, + "logicalCores": 16, + "baseClockMHz": 3801, + "boostClockMHz": 0, + "l1DataKB": 32, + "l2KB": 512, + "l3MB": 32, + "supportedFeatures": ["SSE4.2", "AVX", "AVX2", "FMA3"] + }, + "startedAt": "2026-04-15T18:18:08.0218738+03:00", + "duration": 20008134300, + "totalOps": 61379641344, + "mOpsPerSec": 3067.734373614236, + "score": 306773, + "coreResults": [ + { + "logicalID": 0, + "coreType": "Standard", + "mOpsPerSec": 345.52091146249455, + "totalOps": 6913228800 + }, + { + "logicalID": 1, + "coreType": "Standard", + "mOpsPerSec": 491.21699547968353, + "totalOps": 9828335616 + } + ] + } +} diff --git a/http/submit-multipart.http b/http/submit-multipart.http new file mode 100644 index 0000000..d1ecbed --- /dev/null +++ b/http/submit-multipart.http @@ -0,0 +1,50 @@ +POST http://localhost:8080/api/submit +Content-Type: multipart/form-data; boundary=BenchBoundary + +--BenchBoundary +Content-Disposition: form-data; name="submitter" + +Intel-Test-Rig +--BenchBoundary +Content-Disposition: form-data; name="benchmark"; filename="cpu-bench-result.json" +Content-Type: application/json + +{ + "config": { + "durationSecs": 10, + "intensity": 10, + "coreFilter": 0, + "multiCore": true + }, + "cpuInfo": { + "brandString": "Intel(R) Core(TM) i9-10850K CPU @ 3.60GHz", + "vendorID": "GenuineIntel", + "physicalCores": 10, + "logicalCores": 20, + "baseClockMHz": 3600, + "boostClockMHz": 0, + "l1DataKB": 32, + "l2KB": 256, + "l3MB": 20 + }, + "startedAt": "2026-04-15T17:59:51.4840579+03:00", + "duration": 10056935100, + "totalOps": 49828626432, + "mOpsPerSec": 4954.6532752309395, + "score": 495465, + "coreResults": [ + { + "logicalID": 0, + "coreType": "Standard", + "mOpsPerSec": 234.3495683888822, + "totalOps": 2356838400 + }, + { + "logicalID": 1, + "coreType": "Standard", + "mOpsPerSec": 250.5658742890764, + "totalOps": 2519924736 + } + ] +} +--BenchBoundary-- diff --git a/main.go b/main.go new file mode 100644 index 0000000..650cc88 --- /dev/null +++ b/main.go @@ -0,0 +1,135 @@ +package main + +import ( + "context" + "errors" + "log" + "net/http" + "os" + "os/signal" + "strconv" + "sync" + "syscall" + "time" +) + +type AppConfig struct { + Addr string + BadgerDir string + PageSize int + ShutdownTimeout time.Duration +} + +func main() { + logger := log.New(os.Stdout, "", log.LstdFlags|log.LUTC) + if err := run(logger); err != nil { + logger.Printf("server error: %v", err) + os.Exit(1) + } +} + +func run(logger *log.Logger) error { + cfg := loadConfig() + + if err := os.MkdirAll(cfg.BadgerDir, 0o755); err != nil { + return err + } + + store, err := OpenStore(cfg.BadgerDir) + if err != nil { + return err + } + + var closeOnce sync.Once + closeStore := func() { + if err := store.Close(); err != nil { + logger.Printf("close store: %v", err) + } + } + defer closeOnce.Do(closeStore) + + app, err := NewApp(store, cfg.PageSize) + if err != nil { + return err + } + + server := &http.Server{ + Addr: cfg.Addr, + Handler: app.Routes(), + ReadHeaderTimeout: 5 * time.Second, + ReadTimeout: 15 * time.Second, + WriteTimeout: 30 * time.Second, + IdleTimeout: 60 * time.Second, + } + + ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer stop() + + go func() { + <-ctx.Done() + logger.Printf("shutdown signal received") + + shutdownCtx, cancel := context.WithTimeout(context.Background(), cfg.ShutdownTimeout) + defer cancel() + + if err := server.Shutdown(shutdownCtx); err != nil { + logger.Printf("http shutdown: %v", err) + } + + closeOnce.Do(closeStore) + }() + + logger.Printf("listening on %s", cfg.Addr) + err = server.ListenAndServe() + closeOnce.Do(closeStore) + if err != nil && !errors.Is(err, http.ErrServerClosed) { + return err + } + + return nil +} + +func loadConfig() AppConfig { + return AppConfig{ + Addr: envOrDefault("APP_ADDR", ":8080"), + BadgerDir: envOrDefault("BADGER_DIR", "data/badger"), + PageSize: envIntOrDefault("PAGE_SIZE", 50), + ShutdownTimeout: envDurationOrDefault("SHUTDOWN_TIMEOUT", 10*time.Second), + } +} + +func envOrDefault(key, fallback string) string { + if value := os.Getenv(key); value != "" { + return value + } + + return fallback +} + +func envIntOrDefault(key string, fallback int) int { + value := os.Getenv(key) + if value == "" { + return fallback + } + + parsed, err := strconv.Atoi(value) + if err != nil || parsed <= 0 { + return fallback + } + + return parsed +} + +func envDurationOrDefault(key string, fallback time.Duration) time.Duration { + value := os.Getenv(key) + if value == "" { + return fallback + } + + parsed, err := time.ParseDuration(value) + if err != nil || parsed <= 0 { + return fallback + } + + return parsed +} diff --git a/models.go b/models.go new file mode 100644 index 0000000..91c747f --- /dev/null +++ b/models.go @@ -0,0 +1,120 @@ +package main + +import ( + "errors" + "fmt" + "strings" + "time" +) + +type BenchmarkConfig struct { + DurationSecs int `json:"durationSecs"` + Intensity int `json:"intensity"` + CoreFilter int `json:"coreFilter"` + MultiCore bool `json:"multiCore"` +} + +type CPUInfo struct { + BrandString string `json:"brandString"` + VendorID string `json:"vendorID"` + PhysicalCores int `json:"physicalCores"` + LogicalCores int `json:"logicalCores"` + BaseClockMHz int `json:"baseClockMHz"` + BoostClockMHz int `json:"boostClockMHz"` + L1DataKB int `json:"l1DataKB"` + L2KB int `json:"l2KB"` + L3MB int `json:"l3MB"` + IsHybrid bool `json:"isHybrid,omitempty"` + Has3DVCache bool `json:"has3DVCache,omitempty"` + PCoreCount int `json:"pCoreCount,omitempty"` + ECoreCount int `json:"eCoreCount,omitempty"` + Cores []CPUCoreDescriptor `json:"cores,omitempty"` + SupportedFeatures []string `json:"supportedFeatures,omitempty"` +} + +type CPUCoreDescriptor struct { + LogicalID int `json:"LogicalID"` + PhysicalID int `json:"PhysicalID"` + CoreID int `json:"CoreID"` + Type int `json:"Type"` +} + +type CoreResult struct { + LogicalID int `json:"logicalID"` + CoreType string `json:"coreType"` + MOpsPerSec float64 `json:"mOpsPerSec"` + TotalOps int64 `json:"totalOps"` +} + +type BenchmarkResult struct { + Config BenchmarkConfig `json:"config"` + CPUInfo CPUInfo `json:"cpuInfo"` + StartedAt time.Time `json:"startedAt"` + Duration int64 `json:"duration"` + TotalOps int64 `json:"totalOps"` + MOpsPerSec float64 `json:"mOpsPerSec"` + Score int64 `json:"score"` + CoreResults []CoreResult `json:"coreResults"` +} + +type Submission struct { + SubmissionID string `json:"submissionID"` + Submitter string `json:"submitter"` + SubmittedAt time.Time `json:"submittedAt"` + BenchmarkResult +} + +func (b BenchmarkResult) Validate() error { + if strings.TrimSpace(b.CPUInfo.BrandString) == "" { + return errors.New("cpuInfo.brandString is required") + } + + if b.StartedAt.IsZero() { + return errors.New("startedAt is required and must be RFC3339-compatible") + } + + if b.Config.DurationSecs <= 0 { + return errors.New("config.durationSecs must be greater than zero") + } + + if b.Duration <= 0 { + return errors.New("duration must be greater than zero") + } + + if b.TotalOps < 0 || b.Score < 0 || b.MOpsPerSec < 0 { + return errors.New("duration, totalOps, mOpsPerSec, and score must be non-negative") + } + + if b.CPUInfo.LogicalCores < 0 || b.CPUInfo.PhysicalCores < 0 { + return errors.New("cpu core counts must be non-negative") + } + + for _, result := range b.CoreResults { + if result.LogicalID < 0 { + return fmt.Errorf("coreResults.logicalID must be non-negative") + } + + if result.MOpsPerSec < 0 || result.TotalOps < 0 { + return fmt.Errorf("coreResults values must be non-negative") + } + } + + return nil +} + +func normalizeSubmitter(submitter string) string { + submitter = strings.TrimSpace(submitter) + if submitter == "" { + return "Anonymous" + } + + return submitter +} + +func threadModeLabel(multiCore bool) string { + if multiCore { + return "Multi-threaded" + } + + return "Single-threaded" +} diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..23fae1b --- /dev/null +++ b/templates/index.html @@ -0,0 +1,182 @@ + + + + + + CPU Benchmark Submissions + + + +
+
+

CPU Benchmark Platform

+
+
+

Submission browser and API for local CPU benchmark runs

+

+ Browse recent benchmark submissions, filter by submitter or CPU brand, and inspect per-core throughput details. +

+
+
+
Total results: {{ .TotalCount }}
+ {{ if gt .ShowingTo 0 }} +
Showing {{ .ShowingFrom }} to {{ .ShowingTo }}
+ {{ else }} +
No submissions match the current filters
+ {{ end }} +
+
+
+
+ +
+
+
+ + +
+ + + Clear + +
+
+
+ +
+ {{ if not .Submissions }} +
+ No submissions to display. +
+ {{ end }} + + {{ range .Submissions }} +
+ +
+
+

Submitter

+

{{ .Submitter }}

+

{{ .SubmissionID }}

+
+
+

CPU

+

{{ .CPUInfo.BrandString }}

+

{{ .CPUInfo.VendorID }} • {{ .CPUInfo.PhysicalCores }}C / {{ .CPUInfo.LogicalCores }}T

+
+
+

Score

+

{{ formatInt64 .Score }}

+
+
+

MOps/sec

+

{{ formatFloat .MOpsPerSec }}

+
+
+

Mode

+

{{ modeLabel .Config.MultiCore }}

+
+
+
+ +
+
+
+

Started

+

{{ formatTime .StartedAt }}

+
+
+

Submitted

+

{{ formatTime .SubmittedAt }}

+
+
+

Total ops

+

{{ formatInt64 .TotalOps }}

+
+
+

Benchmark config

+

+ {{ .Config.DurationSecs }}s • intensity {{ .Config.Intensity }} • coreFilter {{ .Config.CoreFilter }} +

+
+
+ +
+ + + + + + + + + + + {{ range .CoreResults }} + + + + + + + {{ else }} + + + + {{ end }} + +
Logical IDCore TypeMOps/secTotal Ops
{{ .LogicalID }}{{ .CoreType }}{{ formatFloat .MOpsPerSec }}{{ formatInt64 .TotalOps }}
No per-core results available.
+
+
+
+ {{ end }} +
+ + {{ if gt .TotalPages 1 }} + + {{ end }} +
+ +