pax_global_header00006660000000000000000000000064147642651570014533gustar00rootroot0000000000000052 comment=b7f81a1bc8094dbec15cbfb10ce69330c68cc52c litetlog-0.4.3/000077500000000000000000000000001476426515700133625ustar00rootroot00000000000000litetlog-0.4.3/.github/000077500000000000000000000000001476426515700147225ustar00rootroot00000000000000litetlog-0.4.3/.github/workflows/000077500000000000000000000000001476426515700167575ustar00rootroot00000000000000litetlog-0.4.3/.github/workflows/test.yml000066400000000000000000000013631476426515700204640ustar00rootroot00000000000000name: Go tests on: [push, pull_request] permissions: contents: read jobs: test: runs-on: ubuntu-latest steps: - name: Install hurl run: | curl --location --remote-name https://github.com/Orange-OpenSource/hurl/releases/download/4.1.0/hurl_4.1.0_amd64.deb sudo apt-get update && sudo apt-get install ./hurl_4.1.0_amd64.deb - name: Checkout repository uses: actions/checkout@v2 with: fetch-depth: 0 - name: Install Go (from go.mod) uses: actions/setup-go@v4 with: go-version-file: go.mod check-latest: true - name: Run tests run: go test ./... - name: Run tests (short + race) run: go test -short -race ./... litetlog-0.4.3/LICENSE000066400000000000000000000013361476426515700143720ustar00rootroot00000000000000Copyright 2023 The litetlog Authors Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. litetlog-0.4.3/NEWS.md000066400000000000000000000036161476426515700144660ustar00rootroot00000000000000## v0.4.3 ### litewitness - Fixed SQLite concurrency issue. - Redacted IP addresses from `/logz`. ### witnessctl - Allow verifier keys that don't match the origin, like the Go sumdb's. ### litebastion - Redacted IP addresses from `/logz`. ## v0.4.2 ### litewitness - Fixed vkey encoding in logs and home page. - Improved `/logz` web page. ### litebastion - Improved `/logz` web page. ## v0.4.1 ### litebastion - Fixed formatting of backend key hashes in logs. ## v0.4.0 ### litebastion - Backend connection lifecycle events (including new details about errors) are now logged at the INFO level (the default). Client-side errors and HTTP/2 debug logs are now logged at the DEBUG level. - `Config.Log` is now a `log/slog.Logger` instead of a `log.Logger`. - `/logz` now exposes the debug logs in a simple public web console. At most ten clients can connect to it at a time. - New `-home-redirect` flag redirects the root to the given URL. - Connections to removed backends are now closed on SIGHUP, using the new `Bastion.FlushBackendConnections` method. ### litewitness - `/logz` now exposes the debug logs in a simple public web console. At most ten clients can connect to it at a time. ## v0.3.0 ### litewitness - Reduced Info log level verbosity, increased Debug log level verbosity. - Sending SIGUSR1 (`killall -USR1 litewitness`) will toggle log level between Info and Debug. - `-key` is now an SSH fingerprint (with `SHA256:` prefix) as printed by `ssh-add -l`. The old format is still accepted for compatibility. - The verifier key of the witness is logged on startup. - A small homepage listing the verifier key and the known logs is served at /. ### witnessctl - New `add-key` and `del-key` commands. - `add-log -key` was removed. The key is now added with `add-key`. ## v0.2.1 ### litewitness - Fix cosignature endianness. https://github.com/FiloSottile/litetlog/issues/12 litetlog-0.4.3/README.md000066400000000000000000000132301476426515700146400ustar00rootroot00000000000000# litetlog The litetlog repository is a collection of open-source tooling for transparency logs designed to be simple and lightweight. ## litewitness litewitness is a synchronous low-latency cosigning witness. (A witness is a service that accepts a new signed tree head, checks its consistency with the previous latest tree head, and returns a signature over it.) It implements the [c2sp.org/tlog-witness](https://c2sp.org/tlog-witness) protocol. It's backed by a SQLite database for storage, and by an ssh-agent for private key operations. To install it, use `go install`. ``` # from anywhere go install filippo.io/litetlog/cmd/{litewitness,witnessctl}@latest # from within a source tree go install filippo.io/litetlog/cmd/{litewitness,witnessctl} ``` litewitness has no config file. All configuration is done via command line flags or `witnessctl` (see below). -db string path to sqlite database (default "litewitness.db") The SQLite database is where known trees and tree heads are stored. It needs to be on a filesystem that supports locking (not a network file system). It will be created if it does not exist. -name string URL-like (e.g. example.com/foo) name of this witness The name of the witness is a URL-like value that will appear in cosignature lines. It does not need to be where the witness is reachable but should be recognizable. -key string SSH fingerprint (with SHA256: prefix) of the witness key -ssh-agent string path to ssh-agent socket (default "litewitness.sock") The witness Ed25519 private key is provided by a ssh-agent instance. The socket is specified explicitly because it's recommended that a dedicated instance is run for litewitness. The use of the ssh-agent protocol allows the key to be provided by a key file, a PKCS#11 module, or custom hardware agents. Example of starting a dedicated ssh-agent and loading a key: ``` ssh-agent -a litewitness.sock SSH_AUTH_SOCK=litewitness.sock ssh-add litewitness.pem ``` -bastion string address of the bastion(s) to reverse proxy through, comma separated, the first online one is selected -listen string address to listen for HTTP requests (default "localhost:7380") Only one of `-bastion` or `-listen` must be specified. The former will cause litewitness to serve requests through a bastion reverse proxy (see below). The latter will listen for HTTP requests on the specified port. (HTTPS needs to be terminated outside of litewitness.) The bastion flag is an optionally comma-separated list of bastions to try in order until one connects successfully. If the connection drops after establishing, litewitness exits. ### witnessctl witnessctl is a CLI tool to operate on the litewitness database. It can be used while litewitness is running. witnessctl add-log -db -origin The `add-log` command adds a new known log starting at a size of zero. Removing a log is not supported, as it presents the risk of signing a split view if re-added. To disable a log, remove all its keys. witnessctl add-key -db -origin -key witnessctl del-key -db -origin -key The `add-key` and `del-key` commands add and remove verifier keys for a known log. The name of the key must match the log origin. witnessctl add-sigsum-log -db -key The `add-sigsum-log` command is a helper that adds a new Sigsum log, computing the origin and key from a 32-byte hex-encoded Ed25519 public key. witnessctl list-logs -db The `list-logs` command lists known logs, in JSON lines like the following. {"origin":"sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562","size":5,"root_hash":"QrtXrQZCCvpIgsSmOsah7HdICzMLLyDfxToMql9WTjY=","keys":["sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562+5202289b+Af/cLU2Y5BJNP+r3iMDC+av9eWCD0fBJVDfzAux5zxAP"]} ## litebastion litebastion is a public-service reverse proxy for witnesses that can't be exposed directly to the internet. In short, a witness connects to a bastion over TLS with a Ed25519 client certificate, "reverses" the direction of the connection, and serves HTTP/2 requests over that connection. The bastion then proxies requests received at `//*` to that witness. -backends string file of accepted key hashes, one per line, reloaded on SIGHUP The only configuration file of litebastion is the backends file, which lists the acceptable client/witness key hashes. -listen string host and port to listen at (default "localhost:8443") -cache string directory to cache ACME certificates at -email string email address to register the ACME account with -host string host to obtain ACME certificate for Since litebastion needs to operate at a lower level than HTTPS on the witness side, it can't be behind a reverse proxy, and needs to configure its own TLS certificate. Use the `-cache`, `-email`, and `-host` flags to configure the ACME client. The ALPN ACME challenge is used, so as long as the `-listen` port receives connections to the `-host` name at port 443, everything should just work. ### bastion as a library It might be desirable to integrate bastion functionality in an existing binary, for example because there is only one IP address and hence only one port 443 to listen on. In that case, you can use the `filippo.io/litetlog/bastion` package. See [pkg.go.dev](https://pkg.go.dev/filippo.io/litetlog/bastion) for the documentation and in particular the [package example](https://pkg.go.dev/filippo.io/litetlog/bastion#example-package). litetlog-0.4.3/bastion/000077500000000000000000000000001476426515700150215ustar00rootroot00000000000000litetlog-0.4.3/bastion/bastion.go000066400000000000000000000165531476426515700170210ustar00rootroot00000000000000// Package bastion runs a reverse proxy service that allows un-addressable // applications (for example those running behind a firewall or a NAT, or where // the operator doesn't wish to take the DoS risk of being reachable from the // Internet) to accept HTTP requests. // // Backends are identified by an Ed25519 public key, they authenticate with a // self-signed TLS 1.3 certificate, and are reachable at a sub-path prefixed by // the key hash. // // Read more at // https://git.glasklar.is/sigsum/project/documentation/-/blob/main/bastion.md. package bastion import ( "context" "crypto/ed25519" "crypto/sha256" "crypto/tls" "encoding/hex" "errors" "fmt" "log/slog" "net/http" "net/http/httputil" "strings" "sync" "time" "golang.org/x/net/http2" ) // Config provides parameters for a new Bastion. type Config struct { // GetCertificate returns the certificate for bastion backend connections. GetCertificate func(*tls.ClientHelloInfo) (*tls.Certificate, error) // AllowedBackend returns whether the backend is allowed to // serve requests. It's passed the hash of its Ed25519 public key. // // AllowedBackend may be called concurrently. AllowedBackend func(keyHash [sha256.Size]byte) bool // Log is used to log backend connections states (as INFO) and errors in // forwarding requests (as DEBUG). If nil, [slog.Default] is used. Log *slog.Logger } // A Bastion keeps track of backend connections, and serves HTTP requests by // routing them to the matching backend. type Bastion struct { c *Config proxy *httputil.ReverseProxy pool *backendConnectionsPool } type keyHash [sha256.Size]byte func (kh keyHash) String() string { return hex.EncodeToString(kh[:]) } // New returns a new Bastion. // // The Config must not be modified after the call to New. func New(c *Config) (*Bastion, error) { b := &Bastion{c: c} b.pool = &backendConnectionsPool{ log: slog.Default(), conns: make(map[keyHash]*http2.ClientConn), } if c.Log != nil { b.pool.log = c.Log } b.proxy = &httputil.ReverseProxy{ Rewrite: func(pr *httputil.ProxyRequest) { pr.Out.URL.Scheme = "https" // needed for the required :scheme header pr.Out.Host = pr.In.Context().Value("backend").(string) pr.SetXForwarded() // We don't interpret the query, so pass it on unmodified. pr.Out.URL.RawQuery = pr.In.URL.RawQuery }, Transport: b.pool, ErrorLog: slog.NewLogLogger(b.pool.log.Handler(), slog.LevelDebug), } return b, nil } // ConfigureServer sets up srv to handle backend connections to the bastion. It // wraps TLSConfig.GetConfigForClient to intercept backend connections, and sets // TLSNextProto for the bastion ALPN protocol. The original tls.Config is still // used for non-bastion backend connections. // // Note that since TLSNextProto won't be nil after a call to ConfigureServer, // the caller might want to call [http2.ConfigureServer] as well. func (b *Bastion) ConfigureServer(srv *http.Server) error { if srv.TLSNextProto == nil { srv.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } srv.TLSNextProto["bastion/0"] = b.pool.handleBackend bastionTLSConfig := &tls.Config{ MinVersion: tls.VersionTLS13, NextProtos: []string{"bastion/0"}, ClientAuth: tls.RequireAnyClientCert, VerifyConnection: func(cs tls.ConnectionState) error { h, err := backendHash(cs) if err != nil { return err } if !b.c.AllowedBackend(h) { return fmt.Errorf("unrecognized backend %x", h) } return nil }, GetCertificate: b.c.GetCertificate, } if srv.TLSConfig == nil { srv.TLSConfig = &tls.Config{} } oldGetConfigForClient := srv.TLSConfig.GetConfigForClient srv.TLSConfig.GetConfigForClient = func(chi *tls.ClientHelloInfo) (*tls.Config, error) { for _, proto := range chi.SupportedProtos { if proto == "bastion/0" { // This is a bastion connection from a backend. return bastionTLSConfig, nil } } if oldGetConfigForClient != nil { return oldGetConfigForClient(chi) } return nil, nil } return nil } func backendHash(cs tls.ConnectionState) (keyHash, error) { pk, ok := cs.PeerCertificates[0].PublicKey.(ed25519.PublicKey) if !ok { return keyHash{}, errors.New("self-signed certificate key type is not Ed25519") } return sha256.Sum256(pk), nil } // ServeHTTP serves requests rooted at "//" by routing them to the // backend that authenticated with that key. Other requests are served a 404 Not // Found status. func (b *Bastion) ServeHTTP(w http.ResponseWriter, r *http.Request) { path := r.URL.Path if !strings.HasPrefix(path, "/") { http.Error(w, "request must start with /KEY_HASH/", http.StatusNotFound) return } path = path[1:] kh, path, ok := strings.Cut(path, "/") if !ok { http.Error(w, "request must start with /KEY_HASH/", http.StatusNotFound) return } ctx := context.WithValue(r.Context(), "backend", kh) r = r.Clone(ctx) r.URL.Path = "/" + path b.proxy.ServeHTTP(w, r) } // FlushBackendConnections closes all for backends that don't pass // [Config.AllowedBackend] anymore. // // ctx is passed to [http2.ClientConn.Shutdown], and FlushBackendConnections // waits for all connections to be closed. func (b *Bastion) FlushBackendConnections(ctx context.Context) { wg := sync.WaitGroup{} defer wg.Wait() b.pool.Lock() defer b.pool.Unlock() for kh, cc := range b.pool.conns { if !b.c.AllowedBackend(kh) { wg.Add(1) go func() { if err := cc.Shutdown(ctx); err != nil { cc.Close() } wg.Done() }() delete(b.pool.conns, kh) } } } type backendConnectionsPool struct { log *slog.Logger sync.RWMutex conns map[keyHash]*http2.ClientConn } func (p *backendConnectionsPool) RoundTrip(r *http.Request) (*http.Response, error) { kh, err := hex.DecodeString(r.Host) if err != nil || len(kh) != sha256.Size { // TODO: return this as a response instead. return nil, errors.New("invalid backend key hash") } p.RLock() cc, ok := p.conns[keyHash(kh)] p.RUnlock() if !ok { // TODO: return this as a response instead. return nil, errors.New("backend unavailable") } return cc.RoundTrip(r) } func (p *backendConnectionsPool) handleBackend(hs *http.Server, c *tls.Conn, h http.Handler) { backend, err := backendHash(c.ConnectionState()) if err != nil { p.log.Info("failed to get backend hash", "err", err) return } l := p.log.With("backend", backend, "remote", c.RemoteAddr()) t := &http2.Transport{ // Send a PING every 15s, with the default 15s timeout. ReadIdleTimeout: 15 * time.Second, CountError: func(errType string) { l.Info("HTTP/2 transport error", "type", errType) }, } cc, err := t.NewClientConn(c) if err != nil { l.Info("failed to convert to HTTP/2 client connection", "err", err) return } ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := cc.Ping(ctx); err != nil { l.Info("did not respond to PING", "err", err) return } p.Lock() if oldCC, ok := p.conns[backend]; ok && !oldCC.State().Closed { go func() { ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() if err := oldCC.Shutdown(ctx); err != nil { oldCC.Close() } }() } p.conns[backend] = cc p.Unlock() l.Info("accepted new backend connection") // We need not to return, or http.Server will close this connection. // There is no way to wait for the ClientConn's closing, so we poll. for !cc.State().Closed { time.Sleep(1 * time.Second) } l.Info("backend connection closed") } litetlog-0.4.3/bastion/example_test.go000066400000000000000000000036061476426515700200470ustar00rootroot00000000000000package bastion_test import ( "crypto/sha256" "io" "log" "net/http" "sync" "time" "filippo.io/litetlog/bastion" "golang.org/x/crypto/acme/autocert" "golang.org/x/net/http2" ) func Example() { // This example shows how to serve on the same address both a bastion // endpoint, and an unrelated HTTPS server. m := &autocert.Manager{ Cache: autocert.DirCache("/var/lib/example-autocert/"), Prompt: autocert.AcceptTOS, Email: "acme@example.com", HostPolicy: autocert.HostWhitelist("bastion.example.com", "www.example.com"), } var allowedBackendsMu sync.RWMutex var allowedBackends map[[sha256.Size]byte]bool b, err := bastion.New(&bastion.Config{ AllowedBackend: func(keyHash [sha256.Size]byte) bool { allowedBackendsMu.RLock() defer allowedBackendsMu.RUnlock() return allowedBackends[keyHash] }, GetCertificate: m.GetCertificate, }) if err != nil { log.Fatalf("failed to load bastion: %v", err) } mux := http.NewServeMux() // Note the use of a host-specific pattern to route HTTP requests for the // bastion endpoint to the Bastion implementation. mux.Handle("bastion.example.com/", b) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { io.WriteString(w, "

Hello, world") }) hs := &http.Server{ Addr: "127.0.0.1:1337", Handler: http.MaxBytesHandler(mux, 10*1024), ReadTimeout: 5 * time.Second, WriteTimeout: 5 * time.Second, TLSConfig: m.TLSConfig(), } // ConfigureServer sets up TLSNextProto and a tls.Config.GetConfigForClient // for backend connections. if err := b.ConfigureServer(hs); err != nil { log.Fatalln("failed to configure bastion:", err) } // HTTP/2 needs to be explicitly re-enabled if desired because it's only // configured automatically by net/http if TLSNextProto is nil. if err := http2.ConfigureServer(hs, nil); err != nil { log.Fatalln("failed to configure HTTP/2:", err) } } litetlog-0.4.3/cmd/000077500000000000000000000000001476426515700141255ustar00rootroot00000000000000litetlog-0.4.3/cmd/apt-transport-tlog/000077500000000000000000000000001476426515700177065ustar00rootroot00000000000000litetlog-0.4.3/cmd/apt-transport-tlog/Dockerfile000066400000000000000000000006201476426515700216760ustar00rootroot00000000000000FROM golang:1.22.1-alpine3.19 as build WORKDIR /src RUN apk add build-base COPY go.mod go.sum ./ RUN go mod download COPY ./ ./ RUN go install -trimpath ./cmd/spicy FROM alpine:3.19.1 RUN apk add bash rclone rsync COPY --from=build /go/bin/spicy /usr/local/bin/spicy COPY cmd/apt-transport-tlog/update-bucket.sh /usr/local/bin/update-bucket.sh CMD ["bash", "/usr/local/bin/update-bucket.sh"] litetlog-0.4.3/cmd/apt-transport-tlog/README.md000066400000000000000000000035631476426515700211740ustar00rootroot00000000000000This is an **extremely early** prototype of a transparency log for APT repositories, and specifically for the Debian archive. The design is simple: offline-verifiable proofs of tlog inclusion ("spicy signatures") are generated for each InRelease file (which is the file signed with OpenPGP, and which contains the hashes of everything else in the repository) and hosted at a public URL; an apt transport plugin downloads and verifies the proof each time an InRelease file is being downloaded from the mirror. The proofs are generated with [`spicy`](https://github.com/FiloSottile/litetlog/blob/main/cmd/spicy/spicy.go) (also a prototype) by the `update-bucket.sh` script. It fetches the latest InRelease files every minute, and if any changes are detected it generates and uploads new proofs. The entries of the log are the whole InRelease files. An auditor would ensure they are all available on snapshot.debian.org, and that the repositories are consistent (e.g. that contents of a package version did not change from one iteration to another). In the future, the [checkpoint](https://c2sp.org/tlog-checkpoint) in the spicy signature would be [cosigned](https://c2sp.org/tlog-cosignature) by witnesses to prevent split-view attacks. This is designed to be easy to integrate upstream by any apt repository: `spicy` would be even easier to run at repository update time (same as `gpg -s`), proofs can be stored and distributed along with the InRelease files (as if they were regular detached signatures), and proof verification can be integrated in APT clients regardless of transport (it requires just simple parsing of a textual format, a few SHA-256 hashes, and Ed25519 signature verification). Even if the upstream keys were compromised, this system would ensure that any malfeasance could be detected, and that individual APT users could not be targeted with modified versions of the repository. litetlog-0.4.3/cmd/apt-transport-tlog/fly.toml000066400000000000000000000011031476426515700213700ustar00rootroot00000000000000app = "debian-spicy-signatures" primary_region = "iad" [build] dockerfile = "Dockerfile" [[vm]] memory = "256mb" cpu_kind = "shared" cpus = 1 [env] RCLONE_CONFIG_TIGRIS_TYPE = "s3" RCLONE_CONFIG_TIGRIS_PROVIDER = "Other" RCLONE_CONFIG_TIGRIS_ENDPOINT = "https://fly.storage.tigris.dev" # RCLONE_CONFIG_TIGRIS_ACCESS_KEY_ID secret # RCLONE_CONFIG_TIGRIS_SECRET_ACCESS_KEY secret BUCKET = "tigris:debian-spicy-signatures" TLOG_KEY_PATH = "/etc/spicy/filippo-io-debian-archive.key" [[files]] guest_path = "/etc/spicy/filippo-io-debian-archive.key" secret_name = "TLOG_KEY_BODY" litetlog-0.4.3/cmd/apt-transport-tlog/tlog.py000077500000000000000000000413561476426515700212410ustar00rootroot00000000000000#!/usr/bin/python3 """ Derived from intoto.py by Lukas Puehringer . https://github.com/in-toto/apt-transport-in-toto/blob/81fd97/intoto.py Copyright 2018 New York University Copyright 2024 Filippo Valsorda Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Install this script as /usr/lib/apt/methods/tlog and make it executable. Change the apt sources.list to use tlog:// instead of https://. Requires the python3-requests package, and spicy in $PATH. """ import os import sys import signal import select import threading import logging import logging.handlers import requests import queue as Queue import subprocess # Configure base logger with lowest log level (i.e. log all messages) and # finetune the actual log levels on handlers logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) # A file handler for debugging purposes LOG_FILE = "/var/log/apt/tlog.log" LOG_HANDLER_FILE = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=100000) LOG_HANDLER_FILE.setLevel(logging.DEBUG) logger.addHandler(LOG_HANDLER_FILE) # A stream handler (stderr) LOG_HANDLER_STDERR = logging.StreamHandler() LOG_HANDLER_STDERR.setLevel(logging.INFO) logger.addHandler(LOG_HANDLER_STDERR) APT_METHOD_HTTPS = os.path.join(os.path.dirname(sys.argv[0]), "https") # Global interrupted boolean. Apt may send SIGINT if it is done with its work. # Upon reception we set INTERRUPTED to true, which may be used to gracefully # terminate. INTERRUPTED = False # TODO: Maybe we can replace the signal handler with a KeyboardInterrupt # try/except block in the main loop, for better readability. def signal_handler(*junk): # Set global INTERRUPTED flag telling worker threads to terminate logger.debug("Received SIGINT, setting global INTERRUPTED true") global INTERRUPTED INTERRUPTED = True # Global BROKENPIPE flag should be set to true, if a `write` or `flush` on a # stream raises a BrokenPipeError, to gracefully terminate reader threads. BROKENPIPE = False # APT Method Interface Message definition # The first line of each message is called the message header. The first 3 # digits (called the Status Code) have the usual meaning found in the http # protocol. 1xx is informational, 2xx is successful and 4xx is failure. The 6xx # series is used to specify things sent to the method. After the status code is # an informational string provided for visual debugging # Only the 6xx series of status codes is sent TO the method. Furthermore the # method may not emit status codes in the 6xx range. The Codes 402 and 403 # require that the method continue reading all other 6xx codes until the proper # 602/603 code is received. This means the method must be capable of handling # an unlimited number of 600 messages. # Message types by their status code. CAPABILITES = 100 LOG = 101 STATUS = 102 URI_START = 200 URI_DONE = 201 URI_FAILURE = 400 GENERAL_FAILURE = 401 AUTH_REQUIRED = 402 MEDIA_FAILURE = 403 URI_ACQUIRE = 600 CONFIGURATION = 601 AUTH_CREDENTIALS = 602 MEDIA_CHANGED = 603 MESSAGE_TYPE = { # Method capabilities CAPABILITES: "Capabilities", # General Logging LOG: "Log", # Inter-URI status reporting (logging progress) STATUS: "Status", # URI is starting acquire URI_START: "URI Start", # URI is finished acquire URI_DONE: "URI Done", # URI has failed to acquire URI_FAILURE: "URI Failure", # Method did not like something sent to it GENERAL_FAILURE: "General Failure", # Method requires authorization to access the URI. Authorization is User/Pass AUTH_REQUIRED: "Authorization Required", # Method requires a media change MEDIA_FAILURE: "Media Failure", # Request a URI be acquired URI_ACQUIRE: "URI Acquire", # Sends the configuration space CONFIGURATION: "Configuration", # Response to the 402 message AUTH_CREDENTIALS: "Authorization Credentials", # Response to the 403 message MEDIA_CHANGED: "Media Changed", } def deserialize_one(message_str): """Parse raw message string as it may be read from stdin and return a dictionary that contains message header status code and info and an optional fields dictionary of additional headers and their values. Raise Exception if the message is malformed. { "code": , "info": "", "fields": [ ("

", ""), ] } NOTE: Message field values are NOT deserialized here, e.g. the Last-Modified time stamp remains a string and Config-Item remains a string of item=value pairs. """ lines = message_str.splitlines() if not lines: raise Exception("Invalid empty message:\n{}".format(message_str)) # Deserialize message header message_header = lines.pop(0) message_header_parts = message_header.split() # TODO: Are we too strict about the format (should we not care about info?) if len(message_header_parts) < 2: raise Exception( "Invalid message header: {}, message was:\n{}".format( message_header, message_str ) ) code = None try: code = int(message_header_parts.pop(0)) except ValueError: pass if not code or code not in list(MESSAGE_TYPE.keys()): raise Exception( "Invalid message header status code: {}, message was:\n{}".format( code, message_str ) ) # TODO: Are we too strict about the format (should we not care about info?) info = " ".join(message_header_parts).strip() if info != MESSAGE_TYPE[code]: raise Exception( "Invalid message header info for status code {}:\n{}," " message was: {}".format(code, info, message_str) ) # TODO: Should we assert that the last line is a blank line? if lines and not lines[-1]: lines.pop() # Deserialize header fields header_fields = [] for line in lines: header_field_parts = line.split(":") if len(header_field_parts) < 2: raise Exception( "Invalid header field: {}, message was:\n{}".format(line, message_str) ) field_name = header_field_parts.pop(0).strip() field_value = ":".join(header_field_parts).strip() header_fields.append((field_name, field_value)) # Construct message data message_data = {"code": code, "info": info} if header_fields: message_data["fields"] = header_fields return message_data def serialize_one(message_data): """Create a message string that may be written to stdout. Message data is expected to have the following format: { "code": , "info": "", "fields": [ ("
", ""), ] } """ message_str = "" # Code must be present code = message_data["code"] # Convenience (if info not present, info for code is used ) info = message_data.get("info") or MESSAGE_TYPE[code] # Add message header message_str += "{} {}\n".format(code, info) # Add message header fields and values (must be list of tuples) for field_name, field_value in message_data.get("fields", []): message_str += "{}: {}\n".format(field_name, field_value) # Blank line to mark end of message message_str += "\n" return message_str def read_one(stream): """Read one apt related message from the passed stream, e.g. sys.stdin for messages from apt, or subprocess.stdout for messages from a transport that we open in a subprocess. The end of a message (EOM) is denoted by a blank line ("\n") and end of file (EOF) is denoted by an empty line. Returns either a message including a trailing blank line or None on EOF. """ message_str = "" # Read from stream until we get a SIGINT/BROKENPIPE, or reach EOF (see below) # TODO: Do we need exception handling for the case where we select/read from # a stream that was closed? If so, we should do it in the main loop for # better readability. while not (INTERRUPTED or BROKENPIPE): # pragma: no branch # Only read if there is data on the stream (non-blocking) if not select.select([stream], [], [], 0)[0]: continue # Read one byte from the stream one = os.read(stream.fileno(), 1).decode() # Break on EOF if not one: break # If we read something append it to the message string message_str += one # Break on EOM (and return message below) if len(message_str) >= 2 and message_str[-2:] == "\n\n": break # Return a message if there is one, otherwise return None if message_str: return message_str return None def write_one(message_str, stream): """Write the passed message to the passed stream.""" try: stream.write(message_str) stream.flush() except BrokenPipeError: # TODO: Move exception handling to main loop for better readability global BROKENPIPE BROKENPIPE = True logger.debug( "BrokenPipeError while writing '{}' to '{}'.".format(message_str, stream) ) # Python flushes standard streams on exit; redirect remaining output # to devnull to avoid another BrokenPipeError at shutdown # See https://docs.python.org/3/library/signal.html#note-on-sigpipe devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) def notify_apt(code, message_text, uri): # Escape LF and CR characters in message bodies to not break the protocol message_text = message_text.replace("\n", "\\n").replace("\r", "\\r") # NOTE: The apt method interface spec references RFC822, which doesn't allow # LF or CR in the message body, except if followed by a LWSP-char (i.e. SPACE # or HTAB, for "folding" of long lines). But apt does not seem to support # folding, and splits lines only at LF. To be safe we escape LF and CR. # See 2.1 Overview in www.fifi.org/doc/libapt-pkg-doc/method.html/ch2.html # See "3.1.1. LONG HEADER FIELDS" and "3.1.2. STRUCTURE OF HEADER FIELDS" in # www.ietf.org/rfc/rfc822.txt write_one( serialize_one( { "code": code, "info": MESSAGE_TYPE[code], "fields": [("Message", message_text), ("URI", uri)], } ), sys.stdout, ) def read_to_queue(stream, queue): """Loop to read messages one at a time from the passed stream until EOF, i.e. the returned message is None, and write to the passed queue. """ while True: msg = read_one(stream) if not msg: return None queue.put(msg) def handle(message_data): logger.debug("Handling message: {}".format(message_data["code"])) if message_data["code"] == CAPABILITES: # TODO(filippo): intercept Capabilities messages to avoid future # features bypassing verification. return True elif message_data["code"] == URI_ACQUIRE: # TODO(filippo): redirect InRelease file fetches to the tlog server. return True elif message_data["code"] == URI_DONE: # TODO(filippo): catch exceptions, print stack trace to stderr, and # notify URI_FAILURE to apt. filename = dict(message_data["fields"]).get("Filename", "") uri = dict(message_data["fields"]).get("URI", "") hit = dict(message_data["fields"]).get("IMS-Hit", "") # TODO(filippo): use Target-Type or Index-File from the URI_ACQUIRE. if not uri.endswith("/InRelease"): return True if hit == "true": return True notify_apt(STATUS, "Fetching InRelease file spicy signature", uri) spicy_uri = ( "https://debian-spicy-signatures.fly.storage.tigris.dev/debian/" + uri.split("/dists/")[-1] + ".spicy" ) logger.debug("Spicy sig URL: {}".format(spicy_uri)) r = requests.get(spicy_uri) r.raise_for_status() with open(filename + ".spicy", "wb") as f: f.write(r.content) notify_apt(STATUS, "Verifying InRelease file spicy signature", uri) subprocess.check_output( [ "spicy", "-verify", "filippo.io/debian-archive+6c61b70b+Aaw9ASjgICSzfKJDcCqz7l3FtSpKvQYCvaRfdfOiIRun", filename, ], stderr=subprocess.STDOUT, ) logger.debug("Verified {} 🌶️".format(uri.split("/dists/")[-1])) # TODO(filippo): use fields from the URI_ACQUIRE. base = uri.split("/dists/")[0] dist = uri.split("/dists/")[1].split("/")[0] print("\r 🌶️ {} {} InRelease.spicy".format(base, dist), file=sys.stderr) return True else: return True def loop(): """Main tlog https transport method loop to relay messages between apt and the apt https transport method and inject spicy verification upon reception of a particular message. """ # Start https transport in a subprocess # Messages from the parent process received on sys.stdin are relayed to the # subprocess' stdin and vice versa, messages written to the subprocess' # stdout are relayed to the parent via sys.stdout. https_proc = subprocess.Popen( [APT_METHOD_HTTPS], stdin=subprocess.PIPE, # nosec stdout=subprocess.PIPE, universal_newlines=True, ) # HTTPS transport message reader thread to add messages from the https # transport (subprocess) to a corresponding queue. https_queue = Queue.Queue() https_thread = threading.Thread( target=read_to_queue, args=(https_proc.stdout, https_queue) ) # APT message reader thread to add messages from apt (parent process) # to a corresponding queue. apt_queue = Queue.Queue() apt_thread = threading.Thread(target=read_to_queue, args=(sys.stdin, apt_queue)) # Start reader threads. # They will run until they see an EOF on their stream, or the global # INTERRUPTED or BROKENPIPE flags are set to true. https_thread.start() apt_thread.start() # Main loop to get messages from queues, i.e. apt queue and https transport # queue, and relay them to the corresponding streams, injecting verification. while True: for name, queue, out in [ ("apt", apt_queue, https_proc.stdin), ("https", https_queue, sys.stdout), ]: should_relay = True try: message = queue.get_nowait() logger.debug("{} sent message:\n{}".format(name, message)) message_data = deserialize_one(message) except Queue.Empty: continue # De-serialization error: Skip message handling, but do relay. except Exception as e: # TODO(filippo): this is insecure, fail closed. logger.debug("Cannot handle message, reason is {}".format(e)) else: logger.debug("Handle message") should_relay = handle(message_data) if should_relay: logger.debug("Relay message") write_one(message, out) # Exit when both threads have terminated (EOF, INTERRUPTED or BROKENPIPE) # NOTE: We do not check if there are still messages on the streams or # in the queue, assuming that there aren't or we can ignore them if both # threads have terminated. if not apt_thread.is_alive() and not https_thread.is_alive(): logger.debug( "The worker threads are dead. Long live the worker threads!" "Terminating." ) # If INTERRUPTED or BROKENPIPE are true it (likely?) means that apt # sent a SIGINT or closed the pipe we were writing to. This means we # should exit and tell the http child process to exit too. # TODO: Could it be that the http child closed a pipe or sent a SITERM? # TODO: Should we behave differently for the two signals? if INTERRUPTED or BROKENPIPE: # pragma: no branch logger.debug("Relay SIGINT to http subprocess") https_proc.send_signal(signal.SIGINT) return if __name__ == "__main__": signal.signal(signal.SIGINT, signal_handler) loop() litetlog-0.4.3/cmd/apt-transport-tlog/update-bucket.sh000066400000000000000000000010571476426515700230020ustar00rootroot00000000000000#!/bin/bash set -xeuo pipefail cd "$(mktemp -d)" rclone -v sync "$BUCKET" . cd debian while true; do sleep 60; date updated=$(rsync debian.csail.mit.edu::debian/dists/ ./ \ --include '*/' --include InRelease --exclude '*' \ --prune-empty-dirs --copy-links \ --out-format='%n' --recursive --times | grep 'InRelease$') || \ continue while IFS= read -r f; do rm "$f.spicy" done <<< "$updated" xargs spicy -assets ../log -key "$TLOG_KEY_PATH" <<< "$updated" rclone -v sync .. "$BUCKET" done litetlog-0.4.3/cmd/litebastion/000077500000000000000000000000001476426515700164425ustar00rootroot00000000000000litetlog-0.4.3/cmd/litebastion/litebastion.go000066400000000000000000000125501476426515700213110ustar00rootroot00000000000000// Command litebastion runs a reverse proxy service that allows un-addressable // applications (for example those running behind a firewall or a NAT, or where // the operator doesn't wish to take the DoS risk of being reachable from the // Internet) to accept HTTP requests. // // Backends are identified by an Ed25519 public key, they authenticate with a // self-signed TLS 1.3 certificate, and are reachable at a sub-path prefixed by // the key hash. // // Read more at https://c2sp.org/https-bastion. package main import ( "context" "crypto/sha256" "crypto/tls" "encoding/hex" "flag" "fmt" "log/slog" "net/http" "os" "os/signal" "strings" "sync" "syscall" "time" "filippo.io/litetlog/bastion" "filippo.io/litetlog/internal/slogconsole" "golang.org/x/crypto/acme" "golang.org/x/crypto/acme/autocert" "golang.org/x/net/http2" ) var listenAddr = flag.String("listen", "localhost:8443", "host and port to listen at") var testCertificates = flag.Bool("testcert", false, "use localhost.pem and localhost-key.pem instead of ACME") var autocertCache = flag.String("cache", "", "directory to cache ACME certificates at") var autocertHost = flag.String("host", "", "host to obtain ACME certificate for") var autocertEmail = flag.String("email", "", "") var allowedBackendsFile = flag.String("backends", "", "file of accepted key hashes, one per line, reloaded on SIGHUP") var homeRedirect = flag.String("home-redirect", "", "redirect / to this URL") type keyHash [sha256.Size]byte func main() { flag.Parse() console := slogconsole.New(nil) console.SetFilter(slogconsole.IPAddressFilter) h := slog.NewTextHandler(os.Stderr, nil) slog.SetDefault(slog.New(slogconsole.MultiHandler(h, console))) http2.VerboseLogs = true // will go to DEBUG due to SetLogLoggerLevel slog.SetLogLoggerLevel(slog.LevelDebug) var getCertificate func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) if *testCertificates { cert, err := tls.LoadX509KeyPair("localhost.pem", "localhost-key.pem") if err != nil { logFatal("can't load test certificates", "err", err) } getCertificate = func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { return &cert, nil } } else { if *autocertCache == "" || *autocertHost == "" || *autocertEmail == "" { logFatal("-cache, -host, and -email or -testcert are required") } m := &autocert.Manager{ Cache: autocert.DirCache(*autocertCache), Prompt: autocert.AcceptTOS, Email: *autocertEmail, HostPolicy: autocert.HostWhitelist(*autocertHost), } getCertificate = m.GetCertificate } if *allowedBackendsFile == "" { logFatal("-backends is missing") } var allowedBackendsMu sync.RWMutex var allowedBackends map[keyHash]bool reloadBackends := func() error { newBackends := make(map[keyHash]bool) backendsList, err := os.ReadFile(*allowedBackendsFile) if err != nil { return err } bs := strings.TrimSpace(string(backendsList)) for _, line := range strings.Split(bs, "\n") { l, err := hex.DecodeString(line) if err != nil { return fmt.Errorf("invalid backend: %q", line) } if len(l) != sha256.Size { return fmt.Errorf("invalid backend: %q", line) } h := keyHash(l) newBackends[h] = true } allowedBackendsMu.Lock() defer allowedBackendsMu.Unlock() allowedBackends = newBackends return nil } if err := reloadBackends(); err != nil { logFatal("failed to load backends", "err", err) } slog.Info("loaded backends", "count", len(allowedBackends)) b, err := bastion.New(&bastion.Config{ AllowedBackend: func(keyHash [sha256.Size]byte) bool { allowedBackendsMu.RLock() defer allowedBackendsMu.RUnlock() return allowedBackends[keyHash] }, GetCertificate: getCertificate, }) if err != nil { logFatal("failed to create bastion", "err", err) } c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGHUP) go func() { for range c { if err := reloadBackends(); err != nil { slog.Error("failed to reload backends", "err", err) } else { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) b.FlushBackendConnections(ctx) cancel() slog.Info("reloaded backends") } } }() mux := http.NewServeMux() mux.Handle("/", b) mux.Handle("/logz", console) if *homeRedirect != "" { mux.HandleFunc("/{$}", func(w http.ResponseWriter, r *http.Request) { http.Redirect(w, r, *homeRedirect, http.StatusFound) }) } hs := &http.Server{ Addr: *listenAddr, Handler: http.MaxBytesHandler(mux, 10*1024), ReadTimeout: 5 * time.Second, WriteTimeout: 5 * time.Second, TLSConfig: &tls.Config{ NextProtos: []string{acme.ALPNProto}, GetCertificate: getCertificate, }, } if err := b.ConfigureServer(hs); err != nil { logFatal("failed to configure bastion", "err", err) } if err := http2.ConfigureServer(hs, nil); err != nil { logFatal("failed to configure HTTP/2", "err", err) } slog.Info("listening", "addr", *listenAddr) ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() e := make(chan error, 1) go func() { e <- hs.ListenAndServeTLS("", "") }() select { case <-ctx.Done(): slog.Info("shutting down on interrupt") ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() hs.Shutdown(ctx) case err := <-e: slog.Error("server error", "err", err) } } func logFatal(msg string, args ...interface{}) { slog.Error(msg, args...) os.Exit(1) } litetlog-0.4.3/cmd/litewitness/000077500000000000000000000000001476426515700164775ustar00rootroot00000000000000litetlog-0.4.3/cmd/litewitness/litewitness.go000066400000000000000000000206061476426515700214040ustar00rootroot00000000000000package main import ( "context" "crypto" "crypto/ed25519" "crypto/rand" "crypto/sha256" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/hex" "errors" "flag" "fmt" "html" "io" "log/slog" "math/big" "net" "net/http" "os" "os/signal" "strings" "syscall" "time" "crawshaw.io/sqlite" "crawshaw.io/sqlite/sqlitex" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" "golang.org/x/net/http2" "filippo.io/litetlog/internal/slogconsole" "filippo.io/litetlog/internal/witness" ) var nameFlag = flag.String("name", "", "URL-like (e.g. example.com/foo) name of this witness") var dbFlag = flag.String("db", "litewitness.db", "path to sqlite database") var sshAgentFlag = flag.String("ssh-agent", "litewitness.sock", "path to ssh-agent socket") var listenFlag = flag.String("listen", "localhost:7380", "address to listen for HTTP requests") var keyFlag = flag.String("key", "", "SSH fingerprint (with SHA256: prefix) of the witness key") var bastionFlag = flag.String("bastion", "", "address of the bastion(s) to reverse proxy through, comma separated, the first online one is selected") var testCertFlag = flag.Bool("testcert", false, "use rootCA.pem for connections to the bastion") func main() { flag.Parse() var level = new(slog.LevelVar) h := slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: level}) console := slogconsole.New(nil) console.SetFilter(slogconsole.IPAddressFilter) slog.SetDefault(slog.New(slogconsole.MultiHandler(h, console))) c := make(chan os.Signal, 1) signal.Notify(c, syscall.SIGUSR1) go func() { for range c { slog.Info("received USR1 signal, toggling log level") if level.Level() == slog.LevelDebug { level.Set(slog.LevelInfo) } else { level.Set(slog.LevelDebug) } } }() signer := connectToSSHAgent() w, err := witness.NewWitness(*dbFlag, *nameFlag, signer, slog.Default()) if err != nil { fatal("creating witness", "err", err) } slog.Info("verifier key", "vkey", w.VerifierKey()) ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() mux := http.NewServeMux() mux.Handle("/", w) mux.Handle("/logz", console) mux.Handle("/{$}", indexHandler(w)) srv := &http.Server{ Addr: *listenFlag, Handler: http.MaxBytesHandler(mux, 10*1024), ReadTimeout: 5 * time.Second, WriteTimeout: 5 * time.Second, BaseContext: func(net.Listener) context.Context { return ctx }, } e := make(chan error, 1) if *bastionFlag != "" { go func() { for _, bastion := range strings.Split(*bastionFlag, ",") { err := connectToBastion(ctx, bastion, signer, srv) if err == errBastionDisconnected { // Connection succeeded and then was interrupted. Restart to // let the scheduler apply any backoff, and then retry all bastions. e <- err return } } e <- errors.New("couldn't connect to any bastion") }() } else { go func() { slog.Info("listening", "addr", *listenFlag) e <- srv.ListenAndServe() }() } select { case <-ctx.Done(): slog.Info("shutting down") ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() srv.Shutdown(ctx) case err := <-e: fatal("server error", "err", err) } } func connectToSSHAgent() *signer { conn, err := net.Dial("unix", *sshAgentFlag) if err != nil { fatal("dialing ssh-agent", "err", err) } a := agent.NewClient(conn) signers, err := a.Signers() if err != nil { fatal("getting keys from ssh-agent", "err", err) } slog.Info("connected to ssh-agent", "addr", *sshAgentFlag) var signer *signer var keys []string for _, s := range signers { if s.PublicKey().Type() != ssh.KeyAlgoED25519 { continue } ss, err := newSigner(s) if err != nil { fatal("new signer", "err", err) } if ssh.FingerprintSHA256(s.PublicKey()) == *keyFlag { signer = ss break } // For backwards compatibility, also accept a hex-encoded SHA-256 hash // of the public key, which is what -key used to be. hh := sha256.Sum256(ss.Public().(ed25519.PublicKey)) h := hex.EncodeToString(hh[:]) if h == *keyFlag { signer = ss break } keys = append(keys, h) } if signer == nil { fatal("ssh-agent does not contain Ed25519 key", "expected", *keyFlag, "found", keys) } slog.Info("found key", "fingerprint", *keyFlag) return signer } type signer struct { s ssh.Signer p ed25519.PublicKey } func newSigner(s ssh.Signer) (*signer, error) { // agent.Key doesn't implement ssh.CryptoPublicKey. k, err := ssh.ParsePublicKey(s.PublicKey().Marshal()) if err != nil { return nil, errors.New("internal error: ssh public key can't be parsed") } ck, ok := k.(ssh.CryptoPublicKey) if !ok { return nil, errors.New("internal error: ssh public key can't be retrieved") } pk, ok := ck.CryptoPublicKey().(ed25519.PublicKey) if !ok { return nil, errors.New("internal error: ssh public key type is not Ed25519") } return &signer{s: s, p: pk}, nil } func (s *signer) Public() crypto.PublicKey { return s.p } func (s *signer) Sign(rand io.Reader, data []byte, opts crypto.SignerOpts) (signature []byte, err error) { if opts.HashFunc() != crypto.Hash(0) { return nil, errors.New("expected crypto.Hash(0)") } sig, err := s.s.Sign(rand, data) if err != nil { return nil, err } return sig.Blob, nil } const indexHeader = ` litewitness
`

func indexHandler(w *witness.Witness) http.HandlerFunc {
	return func(rw http.ResponseWriter, r *http.Request) {
		db, err := witness.OpenDB(*dbFlag)
		if err != nil {
			http.Error(rw, "internal error", http.StatusInternalServerError)
			return
		}
		defer db.Close()

		rw.Header().Set("Content-Type", "text/html; charset=utf-8")
		io.WriteString(rw, indexHeader)
		fmt.Fprintf(rw, "# litewitness %s\n\n", html.EscapeString(*nameFlag))
		fmt.Fprintf(rw, "%s\n\n", html.EscapeString(w.VerifierKey()))
		fmt.Fprintf(rw, "## Logs\n\n")
		sqlitex.Exec(db, "SELECT origin, tree_size, tree_hash FROM log",
			func(stmt *sqlite.Stmt) error {
				fmt.Fprintf(rw, "- %s\n  (size %d, root %s)\n\n",
					html.EscapeString(stmt.ColumnText(0)),
					stmt.ColumnInt64(1), stmt.ColumnText(2))
				return nil
			},
		)
	}
}

var errBastionDisconnected = errors.New("connection to bastion interrupted")

func connectToBastion(ctx context.Context, bastion string, signer *signer, srv *http.Server) error {
	slog.Info("connecting to bastion", "bastion", bastion)
	cert, err := selfSignedCertificate(signer)
	if err != nil {
		fatal("generating self-signed certificate", "err", err)
	}
	dialCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
	defer cancel()
	var roots *x509.CertPool
	if *testCertFlag {
		roots = x509.NewCertPool()
		root, err := os.ReadFile("rootCA.pem")
		if err != nil {
			fatal("reading test root", "err", err)
		}
		roots.AppendCertsFromPEM(root)
	}
	conn, err := (&tls.Dialer{
		Config: &tls.Config{
			Certificates: []tls.Certificate{{
				Certificate: [][]byte{cert},
				PrivateKey:  signer,
			}},
			MinVersion: tls.VersionTLS13,
			MaxVersion: tls.VersionTLS13,
			NextProtos: []string{"bastion/0"},
			RootCAs:    roots,
		},
	}).DialContext(dialCtx, "tcp", bastion)
	if err != nil {
		slog.Info("connecting to bastion failed", "bastion", bastion, "err", err)
		return fmt.Errorf("connecting to bastion: %v", err)
	}
	slog.Info("connected to bastion", "bastion", bastion)
	// TODO: find a way to surface the fatal error, especially since with
	// TLS 1.3 it might be that the bastion rejected the client certificate.
	(&http2.Server{
		CountError: func(errType string) {
			slog.Debug("HTTP/2 server error", "type", errType)
		},
	}).ServeConn(conn, &http2.ServeConnOpts{
		Context:    ctx,
		BaseConfig: srv,
		Handler:    srv.Handler,
	})
	return errBastionDisconnected
}

func selfSignedCertificate(key crypto.Signer) ([]byte, error) {
	tmpl := &x509.Certificate{
		SerialNumber: big.NewInt(1),
		Subject:      pkix.Name{CommonName: "litewitness"},
		NotBefore:    time.Now().Add(-1 * time.Hour),
		NotAfter:     time.Now().Add(24 * time.Hour),
		KeyUsage:     x509.KeyUsageDigitalSignature,
		ExtKeyUsage:  []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
	}
	return x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
}

func fatal(msg string, args ...any) {
	slog.Error(msg, args...)
	os.Exit(1)
}
litetlog-0.4.3/cmd/litewitness/litewitness_test.go000066400000000000000000000051451476426515700224440ustar00rootroot00000000000000package main

import (
	"crypto/tls"
	"net"
	"net/http"
	"os"
	"os/exec"
	"path/filepath"
	"strconv"
	"strings"
	"testing"
	"time"

	"github.com/rogpeppe/go-internal/testscript"
)

func TestMain(m *testing.M) {
	os.Exit(testscript.RunMain(m, map[string]func() int{
		"litewitness": func() (exitCode int) {
			main()
			return 0
		},
	}))
}

func TestScript(t *testing.T) {
	p := testscript.Params{
		Dir: "testdata",
		Setup: func(e *testscript.Env) error {
			bindir := filepath.SplitList(os.Getenv("PATH"))[0]
			// Coverage is not collected because of https://go.dev/issue/60182.
			cmd := exec.Command("go", "build", "-o", bindir)
			if testing.CoverMode() != "" {
				cmd.Args = append(cmd.Args, "-cover")
			}
			cmd.Args = append(cmd.Args, "filippo.io/litetlog/cmd/witnessctl")
			cmd.Args = append(cmd.Args, "filippo.io/litetlog/cmd/litebastion")
			cmd.Stdout = os.Stdout
			cmd.Stderr = os.Stderr
			return cmd.Run()
		},
		Cmds: map[string]func(ts *testscript.TestScript, neg bool, args []string){
			"waitfor": func(ts *testscript.TestScript, neg bool, args []string) {
				if len(args) != 1 {
					ts.Fatalf("usage: waitfor ")
				}
				if strings.HasPrefix(args[0], "http") {
					var lastErr error
					for i := 0; i < 50; i++ {
						t := http.DefaultTransport.(*http.Transport).Clone()
						t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
						r, err := (&http.Client{Transport: t}).Get(args[0])
						if err == nil && r.StatusCode != http.StatusBadGateway {
							return
						}
						time.Sleep(100 * time.Millisecond)
						lastErr = err
					}
					ts.Fatalf("timeout waiting for %s: %v", args[0], lastErr)
				}
				protocol := "unix"
				if strings.Contains(args[0], ":") {
					protocol = "tcp"
				}
				var lastErr error
				for i := 0; i < 50; i++ {
					conn, err := net.Dial(protocol, args[0])
					if err == nil {
						conn.Close()
						return
					}
					time.Sleep(100 * time.Millisecond)
					lastErr = err
				}
				ts.Fatalf("timeout waiting for %s: %v", args[0], lastErr)
			},
			"killall": func(ts *testscript.TestScript, neg bool, args []string) {
				for _, cmd := range ts.BackgroundCmds() {
					cmd.Process.Signal(os.Interrupt)
				}
			},
			"linecount": func(ts *testscript.TestScript, neg bool, args []string) {
				if len(args) != 2 {
					ts.Fatalf("usage: linecount  N")
				}
				count, err := strconv.Atoi(args[1])
				if err != nil {
					ts.Fatalf("invalid count: %v", args[1])
				}
				if got := strings.Count(ts.ReadFile(args[0]), "\n"); got != count {
					ts.Fatalf("%v has %d lines, not %d", args[0], got, count)
				}
			},
		},
	}
	testscript.Run(t, p)
}
litetlog-0.4.3/cmd/litewitness/systemd/000077500000000000000000000000001476426515700201675ustar00rootroot00000000000000litetlog-0.4.3/cmd/litewitness/systemd/litewitness-ssh-agent.service000066400000000000000000000004471476426515700260170ustar00rootroot00000000000000[Unit]
Description=Litewitness SSH Agent
StartLimitIntervalSec=0

[Service]
Environment=SSH_AUTH_SOCK=/var/run/litewitness.sock
ExecStart=/usr/bin/ssh-agent -D -a $SSH_AUTH_SOCK
ExecStartPost=/usr/bin/ssh-add /etc/litewitness/litewitness.pem
Restart=always
RestartSteps=10
RestartMaxDelaySec=1m
litetlog-0.4.3/cmd/litewitness/systemd/litewitness.service000066400000000000000000000006631476426515700241300ustar00rootroot00000000000000[Unit]
Description=Litewitness Transparency Log Witness
Wants=network-online.target litewitness-ssh-agent.service
StartLimitIntervalSec=0

[Service]
EnvironmentFile=/etc/litewitness/litewitness.conf
ExecStart=/usr/local/bin/litewitness -name "$ORIGIN" -key "$KEY" \
	-db /var/lib/litewitness/litewitness.db -ssh-agent /var/run/litewitness.sock
Restart=always
RestartSteps=10
RestartMaxDelaySec=1m

[Install]
WantedBy=multi-user.target
litetlog-0.4.3/cmd/litewitness/testdata/000077500000000000000000000000001476426515700203105ustar00rootroot00000000000000litetlog-0.4.3/cmd/litewitness/testdata/bastion.txt000066400000000000000000000175071476426515700225220ustar00rootroot00000000000000# gentest seed b4e385f4358f7373cfa9184b176f3cccf808e795baf04092ddfde9461014f0c4

# set up log
exec witnessctl add-sigsum-log -key=ffdc2d4d98e4124d3feaf788c0c2f9abfd796083d1f0495437f302ec79cf100f

# start bastion
exec litebastion -testcert -backends=backends.txt &litebastion&
waitfor localhost:8443

# start ssh-agent
env SSH_AUTH_SOCK=$WORK/s # barely below the max path length
! exec ssh-agent -a $SSH_AUTH_SOCK -D & # ssh-agent always exits 2
waitfor $SSH_AUTH_SOCK
chmod 600 witness_key.pem
exec ssh-add witness_key.pem

# fail to start litewitness
! exec litewitness -ssh-agent=$SSH_AUTH_SOCK -name=example.com/witness -bastion=0.0.0.0:443,localhost:8443 -testcert -key=e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a

# reload backends
mv correct_backends.txt backends.txt
exec killall -SIGHUP litebastion

# start litewitness
exec litewitness -ssh-agent=$SSH_AUTH_SOCK -name=example.com/witness -bastion=0.0.0.0:443,localhost:8443 -testcert -key=e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a &litewitness&
waitfor https://localhost:8443/e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a/

# add-checkpoint
exec hurl --cacert rootCA.pem --test add-checkpoint.hurl

# check that litewitness shut down cleanly
killall
wait litewitness
stderr 'shutting down'

# check the litebastion output
killall
wait litebastion
stderr 'reloaded backends'
stderr 'msg="accepted new backend connection" backend=e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a'

# witnessctl list-logs
exec witnessctl list-logs
stdout sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
stdout "size":1


-- backends.txt --
f97f12534ff2478cfc36b00d09a85d4faeb6589ac19a0895c348a499627c531c


-- correct_backends.txt --
f97f12534ff2478cfc36b00d09a85d4faeb6589ac19a0895c348a499627c531c
e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a


-- witness_key.pem --
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtz
c2gtZWQyNTUxOQAAACBkhIrYq+1uhZgbOzh1slK4dn67SwL3A6yjsecbvWqOUAAA
AIgN5+09DeftPQAAAAtzc2gtZWQyNTUxOQAAACBkhIrYq+1uhZgbOzh1slK4dn67
SwL3A6yjsecbvWqOUAAAAEAx/8IRbsvgA6yqgAq3B1e9fVMgbj/r72ptB5bZVTCz
T2SEitir7W6FmBs7OHWyUrh2frtLAvcDrKOx5xu9ao5QAAAAAAECAwQF
-----END OPENSSH PRIVATE KEY-----


-- localhost.pem --
-----BEGIN CERTIFICATE-----
MIIEZDCCAsygAwIBAgIQDT4i6cBMluQ8BpOZSCHjfzANBgkqhkiG9w0BAQsFADCB
mTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMTcwNQYDVQQLDC5maWxp
cHBvQEJpc3Ryb21hdGgtTTIubG9jYWwgKEZpbGlwcG8gVmFsc29yZGEpMT4wPAYD
VQQDDDVta2NlcnQgZmlsaXBwb0BCaXN0cm9tYXRoLU0yLmxvY2FsIChGaWxpcHBv
IFZhbHNvcmRhKTAeFw0yMzA1MTYxNjIxNDBaFw0yNTA4MTYxNjIxNDBaMGIxJzAl
BgNVBAoTHm1rY2VydCBkZXZlbG9wbWVudCBjZXJ0aWZpY2F0ZTE3MDUGA1UECwwu
ZmlsaXBwb0BCaXN0cm9tYXRoLU0yLmxvY2FsIChGaWxpcHBvIFZhbHNvcmRhKTCC
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK0ET3Kvs5ZgYVMUZRXtXoCd
SXT3+8AVRVLcHDP3Xak7CmM0K60WXwNcgunxKkRfFmalmQ80rtwv5JqEO9cBo/SB
h9iDaG13VzHAlWlOablDGWd8qpuVGBsYFMhpuugRvUR0BfV+C6sSOJN97DsVuRp/
WTuYl1Uoo1RxhXb/1iDH5CdZbHj9QjaBPcBP7a2pJHTGo1lOcjQjI0ViTZVCdvhQ
vSn39wLNUQSw/5aPdkmGYp2XmcDMT+rAhc9GUZRlEdAvEVRwuWHuGwc7S9rcKvPp
5eDG4QRFAegtb0yn269AOgO7bzITg+DsLZvKfux5lSJ+OOk6CSCLEYVBXpE600EC
AwEAAaNeMFwwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMB8G
A1UdIwQYMBaAFLzVWGYf7kr87KO4lBicbMWLIQXeMBQGA1UdEQQNMAuCCWxvY2Fs
aG9zdDANBgkqhkiG9w0BAQsFAAOCAYEAWUCV0tUZGtrk6no3CEkJCDtCFD5gcaos
JU3ZyUIij4NGzCufZFhf43YU4KHJwE3T3gsAGfECJl1yuDDq2Gm+kDHTPOX2/Fsa
vF5GiiPfqN+lV/zHXFWMlwFouQupe42klZdVed2vH+M6i/iJt+g3GPf7gaU0cw7p
YT3etZjkGmorroHQY9xKATpyIIZGog6dnpgGEKKHpATCyTEsUAI8prrYYVDK2GYW
bDSFL5o8/XABoQOo0aUljobt02VvTA/lb4bSihhphUzdIvCWa//hNTJ52V2lT1kY
b3qwGySv+OsEQI7SUaYB2VnePDUH4GLSELnNII/4fT4qGjt23HlXTOExPBtuJT9Q
iQFIetf9W3r+WiAZLZ81KQ5gcBsJTFvZkmMPSJ5AZc/iPMi7KE+40cwVfJIt9XhO
O1mpOc50R0XT9DfAZd76NbySq/DbvOaaXoKLbOJOHLM+6RQSq96dVYJnu2pQoS+8
sNNtZChxQBsO83R+ZisAn7tJg7gql5uk
-----END CERTIFICATE-----


-- localhost-key.pem --
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtBE9yr7OWYGFT
FGUV7V6AnUl09/vAFUVS3Bwz912pOwpjNCutFl8DXILp8SpEXxZmpZkPNK7cL+Sa
hDvXAaP0gYfYg2htd1cxwJVpTmm5QxlnfKqblRgbGBTIabroEb1EdAX1fgurEjiT
few7Fbkaf1k7mJdVKKNUcYV2/9Ygx+QnWWx4/UI2gT3AT+2tqSR0xqNZTnI0IyNF
Yk2VQnb4UL0p9/cCzVEEsP+Wj3ZJhmKdl5nAzE/qwIXPRlGUZRHQLxFUcLlh7hsH
O0va3Crz6eXgxuEERQHoLW9Mp9uvQDoDu28yE4Pg7C2byn7seZUifjjpOgkgixGF
QV6ROtNBAgMBAAECggEAdABD1D0BR31vQ2iVLdJsXgcWzRnG7M5WIGv7bMwZO/Gd
0U2VSwOIyJQqIffTwuuN9Vxp9sv3wSFLQF4QUOOErdZ64iuYxGwTRgQ9KNGuCMx4
J+5p5M5dzyyiIozhB9986nxv0lykLhqVXC49SJAT/zQK++4e6LXKbD+AoHx/cJH6
B72M9wERF7O0FYF+M7KwGYXoWbWkzaf4K1t/eUrvmVRqW35DnJAr3tZS6lgY891n
dJriA79T9fFACQuW2rQRr5tV+p3CoRk/hKdRRstl5AfAWheBLWu+g4Fx6qc0y0yZ
x4poYR/h+YFGA/g3ib/hLUZq8WlawP11QzCbTArr0QKBgQDEuFiseDN9So3u595d
g7fQukamRO3N1/06SV0M6v58sKRObQDOQZQOtna9omXR4mOgKK2arew45v6h/uiP
5PcNwN/2B1eIWqOJd/pYO9yAOs6O43Mgx9jNbue4+FtS7Ei/4OSuaun0+PNfeSfj
oS/lzQzc7gsn3Whxy2uoKCi9KwKBgQDhJ2n3O0VIVNDpas9dkBTGPuGTsugMxKoP
Lzjy5LL+wL2KNvX/ytOOPMhzlqW+J5IW1KKlYfaIqdF4Fk4kpWpC9eaxdsqr34jV
quZPJh1CdkiAcAJsJeHA11RcyvV+v9X1fGuZcVSPbx59zCOfjB/WFRn3VHv4Y9SK
a/yjz0VzQwKBgQCs3m6WCP7+UCDP25Rtyw0h8D+bZyEubWqK6ONLMco22qdYf7Ng
jQJFt5Q66tXdDbuMh5KfRc0658rFopLNuGBeBqsIscT6/uONLxCgruIDESppKHkI
SnY7eQVKZ30hLqBzdXafyzYS3x5HPNmP0BqeL4XpM4o8GvXoYeEWIUxDhwKBgGdU
jFgvdmtsqXgGUeaxoicdYq9DMyXw988CMBOhC9jIofQVpy5ealrj4GbT43rydCRQ
L9jQrbEopn6DVheRZsQWUGpvhzH747OTvCDk8Ba5bCqtvR7e/81dxVTUvHgQGGSS
VltUXKd7KrY3Ar5NM6svgfw1ZLOpNTjVuFuBW3E7AoGAaBBTfCO7mGDmL+NWQio2
WnzX0AfGa65X4qW6yMTny92Q969Um0jRNopN4pipMrrTYbxt2s2Mbs/DifnJ8Tj9
dbRb8Q/pe61Sfvo6KiARy0zmONWrzSg9OVxytjGCdnjLqOunEevE2LHbTmgcn4I7
8ivPpVS7Ljayz4tqGBJpWVA=
-----END PRIVATE KEY-----


-- rootCA.pem --
-----BEGIN CERTIFICATE-----
MIIFBDCCA2ygAwIBAgIRALYUMO26UPyXbH30HKC/2MAwDQYJKoZIhvcNAQELBQAw
gZkxHjAcBgNVBAoTFW1rY2VydCBkZXZlbG9wbWVudCBDQTE3MDUGA1UECwwuZmls
aXBwb0BCaXN0cm9tYXRoLU0yLmxvY2FsIChGaWxpcHBvIFZhbHNvcmRhKTE+MDwG
A1UEAww1bWtjZXJ0IGZpbGlwcG9AQmlzdHJvbWF0aC1NMi5sb2NhbCAoRmlsaXBw
byBWYWxzb3JkYSkwHhcNMjMwNTE2MTYyMTM5WhcNMzMwNTE2MTYyMTM5WjCBmTEe
MBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMTcwNQYDVQQLDC5maWxpcHBv
QEJpc3Ryb21hdGgtTTIubG9jYWwgKEZpbGlwcG8gVmFsc29yZGEpMT4wPAYDVQQD
DDVta2NlcnQgZmlsaXBwb0BCaXN0cm9tYXRoLU0yLmxvY2FsIChGaWxpcHBvIFZh
bHNvcmRhKTCCAaIwDQYJKoZIhvcNAQEBBQADggGPADCCAYoCggGBAM1lATi0gr+F
Iea1OGlbkDsueH6JrQf3p9JFCrFeUlXno53wd/2/DXlcbRvOPhLLI61Ngg47UjHW
XJJg2BHQXmsIdwrmp8nn6a2x+uEswyY/93Mg1GN2IEi5VP8dUhZFjXxNKmzCqaXK
R6klgSC1inmj/SMqTITVcvVtSg0l9/fGkM4drHi4Kr8TZPWMgkeHM2MXsT8S6etl
h2FZ+dHSOo0CWfB1irEqA5xDfopGgjouqj5Hs+q9Dfo9rR4zaHPrxdu2Su+CX7AW
kXd0vBR0ZiR3tc564PbEV3QBI7e1q5WcGm+Va4gyOgOwRTvSmh8NADc3IfOcHsYY
Z2/dWCHSEJ83MVCIIeF4o7G4bdzk7ZZWcnRIq2Z3AsbEksRV1h09O3vk+k2XrlqS
XmVsxYTY1GMj9TCwGveeCAWB2kKmKMzZV5TtI5qh99VtfHW8+WJod10OL68O9NJf
GRqePUFVwWVHiNCAClIngUX/Xy3OPdsTgtefJUDeD4h4txPlZFlHcwIDAQABo0Uw
QzAOBgNVHQ8BAf8EBAMCAgQwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4EFgQU
vNVYZh/uSvzso7iUGJxsxYshBd4wDQYJKoZIhvcNAQELBQADggGBAFYRPtaTWHXI
UYpg6+tFIZtwfXgajrlUI4O/DfZyBIcLcgASza03XBPgEWKvdhxpACvT6SA+W2hM
Y4gjsch3z+fTimaWUkXEudZZkVRB5fq+095A+PQid0UlmY95Dyl9yZrYWK/AQd5C
WoFa6jakl/MdbtGPQubiWh39X7YEoRVbBwI6Sxg4yYGh3hkPPrD2Sv4sbxVshfmX
y/qd25eN1bGbpubUjvZntSWOq1px8xLfssf36BJ0NGBZ7bvB+p2NG9HV7xk6aB+d
x1eZSFMz943kedzNNF5NqoUS+Ro5Wf+FaHSG2m6PD4pC2TEf47lnqpMTfAgoZTWO
mBE4xp95XL2hJIJHfoKgEEuz0pvetwEQue6ySx5c4RoIOwcgDQakh+uKGgaxRTOB
NUxMseeK+rqonXWk36PvkVDLLT+AFsyZPBHS1xFGBql5qiJqsmkWgzAbqgPEyOQ1
HfM8tSXxZwhSosXPY/wYE8MIm+R90JAh6ltJAKGUQDax2W2ETzzbpg==
-----END CERTIFICATE-----


-- add-checkpoint.hurl --
POST https://localhost:8443/e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a/add-checkpoint
```
old 0

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
1
KgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom7fPZTqpxWWhyjWduBvTvGVqsokMbqTArsQilegKoFBJQjUFAmQ0+YeSPM3wfUQMFSzVnnNuWRTYrajXpNUbIQY=
```
HTTP 200
[Asserts]
body contains "— example.com/witness"
litetlog-0.4.3/cmd/litewitness/testdata/gentest/000077500000000000000000000000001476426515700217615ustar00rootroot00000000000000litetlog-0.4.3/cmd/litewitness/testdata/gentest/sigsum.go000066400000000000000000000066501476426515700236260ustar00rootroot00000000000000// Run with "go run -mod=mod ./cmd/litewitness/testdata/gentest"
// and re-run "go mod tidy" after use to clean up its dependencies.

package main

import (
	"crypto/ed25519"
	"crypto/rand"
	"crypto/sha256"
	"encoding/base64"
	"encoding/binary"
	"encoding/hex"
	"encoding/pem"
	"flag"
	"fmt"
	"log"
	"net/url"

	"github.com/caarlos0/sshmarshal"
	"golang.org/x/crypto/hkdf"
	"golang.org/x/crypto/ssh"
	"golang.org/x/mod/sumdb/note"
	"golang.org/x/mod/sumdb/tlog"
	sigsum "sigsum.org/sigsum-go/pkg/crypto"
	"sigsum.org/sigsum-go/pkg/merkle"
)

var seedFlag = flag.String("seed", "", "hex-encoded seed")

func main() {
	flag.Parse()
	var seed []byte
	if *seedFlag == "" {
		seed = make([]byte, 32)
		if _, err := rand.Read(seed); err != nil {
			log.Fatal(err)
		}
	} else {
		seed = make([]byte, hex.DecodedLen(len(*seedFlag)))
		if _, err := hex.Decode(seed, []byte(*seedFlag)); err != nil {
			log.Fatal(err)
		}
	}
	fmt.Printf("- seed: %x\n", seed)
	h := hkdf.New(sha256.New, seed, []byte("litewitness gentest"), nil)

	publicKey, privateKey, _ := ed25519.GenerateKey(h)
	fmt.Printf("- log private key: %x\n", privateKey.Seed())
	fmt.Printf("- log public key: %x\n", publicKey)

	keyHash := sigsum.HashBytes(publicKey[:])
	fmt.Printf("- log key hash: %x\n", keyHash)
	origin := fmt.Sprintf("sigsum.org/v1/tree/%x", keyHash)
	fmt.Printf("- origin: %s\n", origin)
	fmt.Printf("- origin URL-encoded: %s\n", url.QueryEscape(origin))

	const algEd25519 = 1
	skey := fmt.Sprintf("PRIVATE+KEY+%s+%08x+%s", origin, noteKeyHash(origin, append([]byte{algEd25519}, publicKey...)), base64.StdEncoding.EncodeToString(append([]byte{algEd25519}, privateKey.Seed()...)))
	s, _ := note.NewSigner(skey)
	fmt.Printf("- log note key: %s\n", skey)

	witSeed := make([]byte, ed25519.SeedSize)
	h.Read(witSeed)
	witKey := ed25519.NewKeyFromSeed(witSeed)
	ss, err := ssh.NewSignerFromSigner(witKey)
	if err != nil {
		log.Fatal(err)
	}
	pkHash := sigsum.HashBytes(ss.PublicKey().(ssh.CryptoPublicKey).CryptoPublicKey().(ed25519.PublicKey))
	fmt.Printf("- witness key hash: %s\n", hex.EncodeToString(pkHash[:]))
	fmt.Printf("- witness key: %x\n", witKey)
	pemKey, err := sshmarshal.MarshalPrivateKey(witKey, "")
	if err != nil {
		log.Fatal(err)
	}
	fmt.Printf("- witness key:\n%s", pem.EncodeToMemory(pemKey))

	tree := merkle.NewTree()
	addLeaf := func(leaf sigsum.Hash) {
		if !tree.AddLeafHash(&leaf) {
			panic("duplicate")
		}
		fmt.Printf("- leaf[%d] hash: %x\n", tree.Size(), leaf)
	}
	signTreeHead := func() {
		checkpoint := fmt.Sprintf("%s\n%d\n%s\n", origin, tree.Size(), tlog.Hash(tree.GetRootHash()))
		n, _ := note.Sign(¬e.Note{Text: checkpoint}, s)
		fmt.Printf("- checkpoint (size %d):\n%s\n", tree.Size(), n)
	}
	consistencyProof := func(oldSize uint64) {
		proof, err := tree.ProveConsistency(oldSize, tree.Size())
		if err != nil {
			log.Fatal(err)
		}
		fmt.Printf("- consistency proof from size %d:\n", oldSize)
		fmt.Printf("old %d\n", oldSize)
		for _, p := range proof {
			fmt.Printf("%s\n", base64.StdEncoding.EncodeToString(p[:]))
		}
	}

	addLeaf(sigsum.Hash{42, 0})
	signTreeHead()

	addLeaf(sigsum.Hash{42, 1})
	addLeaf(sigsum.Hash{42, 2})
	signTreeHead()
	consistencyProof(1)

	addLeaf(sigsum.Hash{42, 3})
	addLeaf(sigsum.Hash{42, 4})
	signTreeHead()
	consistencyProof(1)
	consistencyProof(3)
}

func noteKeyHash(name string, key []byte) uint32 {
	h := sha256.New()
	h.Write([]byte(name))
	h.Write([]byte("\n"))
	h.Write(key)
	sum := h.Sum(nil)
	return binary.BigEndian.Uint32(sum)
}
litetlog-0.4.3/cmd/litewitness/testdata/litewitness.txt000066400000000000000000000142461476426515700234320ustar00rootroot00000000000000# gentest seed b4e385f4358f7373cfa9184b176f3cccf808e795baf04092ddfde9461014f0c4

# set up log
exec witnessctl add-sigsum-log -key=ffdc2d4d98e4124d3feaf788c0c2f9abfd796083d1f0495437f302ec79cf100f

# start ssh-agent
env SSH_AUTH_SOCK=$WORK/s # barely below the max path length
! exec ssh-agent -a $SSH_AUTH_SOCK -D & # ssh-agent always exits 2
waitfor $SSH_AUTH_SOCK
chmod 600 other_key.pem
exec ssh-add other_key.pem
chmod 600 witness_key.pem
exec ssh-add witness_key.pem

# start litewitness
exec litewitness -ssh-agent=$SSH_AUTH_SOCK -name=example.com/witness -key=e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a &litewitness&
waitfor localhost:7380

# add-checkpoint
exec hurl --test --error-format long add-checkpoint.hurl

# check that litewitness shut down cleanly
killall
wait litewitness
stderr 'shutting down'

# witnessctl list-logs
exec witnessctl list-logs
stdout sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
stdout "size":5


-- witness_key.pem --
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtz
c2gtZWQyNTUxOQAAACBkhIrYq+1uhZgbOzh1slK4dn67SwL3A6yjsecbvWqOUAAA
AIgN5+09DeftPQAAAAtzc2gtZWQyNTUxOQAAACBkhIrYq+1uhZgbOzh1slK4dn67
SwL3A6yjsecbvWqOUAAAAEAx/8IRbsvgA6yqgAq3B1e9fVMgbj/r72ptB5bZVTCz
T2SEitir7W6FmBs7OHWyUrh2frtLAvcDrKOx5xu9ao5QAAAAAAECAwQF
-----END OPENSSH PRIVATE KEY-----


-- other_key.pem --
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtz
c2gtZWQyNTUxOQAAACDkZam8RBV490MX6kvcJKCMJy57Z3Qcxbn0K3J2mwXX9QAA
AIgezao7Hs2qOwAAAAtzc2gtZWQyNTUxOQAAACDkZam8RBV490MX6kvcJKCMJy57
Z3Qcxbn0K3J2mwXX9QAAAEA+37qVtCUzwBX6u6EmU8B+8qbO8xU4FdvJqU4utc7R
cuRlqbxEFXj3QxfqS9wkoIwnLntndBzFufQrcnabBdf1AAAAAAECAwQF
-----END OPENSSH PRIVATE KEY-----


-- add-checkpoint.hurl --
POST http://localhost:7380/add-checkpoint
```
old 0

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
1
KgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom7fPZTqpxWWhyjWduBvTvGVqsokMbqTArsQilegKoFBJQjUFAmQ0+YeSPM3wfUQMFSzVnnNuWRTYrajXpNUbIQY=
```
HTTP 200
[Asserts]
body contains "— example.com/witness"


POST http://localhost:7380/add-checkpoint
```
old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
KgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
3
RcCI1Nk56ZcSmIEfIn0SleqtV7uvrlXNccFx595Iwl0=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom2VbtIcdFbwFAy1n7s6IkAxIY6J/GQOTuZF2ORV39d75cbAj2aQYwyJre36kezNobZs4SUUdrcawfAB8WVrx6gx=
```
HTTP 403
[Asserts]
body contains "invalid signature"


POST http://localhost:7380/add-checkpoint
```
old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
KgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e563
3
RcCI1Nk56ZcSmIEfIn0SleqtV7uvrlXNccFx595Iwl0=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e563 UgIom2VbtIcdFbwFAy1n7s6IkAxIY6J/GQOTuZF2ORV39d75cbAj2aQYwyJre36kezNobZs4SUUdrcawfAB8WVrx6go=
```
HTTP 403
[Asserts]
body contains "unknown log"


POST http://localhost:7380/add-checkpoint
```
old 1

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
3
RcCI1Nk56ZcSmIEfIn0SleqtV7uvrlXNccFx595Iwl0=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom2VbtIcdFbwFAy1n7s6IkAxIY6J/GQOTuZF2ORV39d75cbAj2aQYwyJre36kezNobZs4SUUdrcawfAB8WVrx6go=
```
HTTP 422
[Asserts]
body contains "consistency proof"


POST http://localhost:7380/add-checkpoint
```
old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
KgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
3
RcCI1Nk56ZcSmIEfIn0SleqtV7uvrlXNccFx595Iwl0=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom2VbtIcdFbwFAy1n7s6IkAxIY6J/GQOTuZF2ORV39d75cbAj2aQYwyJre36kezNobZs4SUUdrcawfAB8WVrx6go=
```
HTTP 422
[Asserts]
body contains "consistency proof"


POST http://localhost:7380/add-checkpoint
```
old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
KgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
3
RcCI1Nk56ZcSmIEfIn0SleqtV7uvrlXNccFx595Iwl0=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom2VbtIcdFbwFAy1n7s6IkAxIY6J/GQOTuZF2ORV39d75cbAj2aQYwyJre36kezNobZs4SUUdrcawfAB8WVrx6go=
```
HTTP 200
[Asserts]
body contains "— example.com/witness"


POST http://localhost:7380/add-checkpoint
```
old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
+fUDV+k970B4I3uKrqJM4aP1lloPZP8mvr2Z4wRw2LI=
KgQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
5
QrtXrQZCCvpIgsSmOsah7HdICzMLLyDfxToMql9WTjY=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIomw/EOJmWi0i1FQsOj+etB7F8IccFam/jgd6wzRns4QPVmyEZtdvl1U2KEmLOZ/ASRcWJi0tW90dJWAShei7sDww=
```
HTTP 409
[Asserts]
body == "3\n"


POST http://localhost:7380/add-checkpoint
```
old 3
KgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
KgMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
wgiIFdZfYNv6WU1OllBKsWnLYIS/DBMqt8Uh/S4OukE=
KgQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
5
QrtXrQZCCvpIgsSmOsah7HdICzMLLyDfxToMql9WTjY=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIomw/EOJmWi0i1FQsOj+etB7F8IccFam/jgd6wzRns4QPVmyEZtdvl1U2KEmLOZ/ASRcWJi0tW90dJWAShei7sDww=
```
HTTP 200
[Asserts]
body contains "— example.com/witness"


POST http://localhost:7380/add-checkpoint
```
old 0

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
5
QrtXrQZCCvpIgsSmOsah7HdICzMLLyDfxToMql9WTjY=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIomw/EOJmWi0i1FQsOj+etB7F8IccFam/jgd6wzRns4QPVmyEZtdvl1U2KEmLOZ/ASRcWJi0tW90dJWAShei7sDww=
```
HTTP 409
[Asserts]
body == "5\n"
litetlog-0.4.3/cmd/litewitness/testdata/sumdb.txt000066400000000000000000000031671476426515700221720ustar00rootroot00000000000000# set up log
exec witnessctl add-log -origin 'go.sum database tree'
exec witnessctl add-key -origin 'go.sum database tree' -key sum.golang.org+033de0ae+Ac4zctda0e5eza+HJyk9SxEdh+s3Ux18htTTAD8OuAn8

# start ssh-agent
env SSH_AUTH_SOCK=$WORK/s # barely below the max path length
! exec ssh-agent -a $SSH_AUTH_SOCK -D & # ssh-agent always exits 2
waitfor $SSH_AUTH_SOCK
chmod 600 witness_key.pem
exec ssh-add witness_key.pem

# start litewitness
exec litewitness -listen=localhost:7381 -ssh-agent=$SSH_AUTH_SOCK -name=example.com/witness -key=e933707e0e36c30f01d94b5d81e742da373679d88eb0f85f959ccd80b83b992a &litewitness&
waitfor localhost:7381

# add-checkpoint
exec hurl --test --error-format long add-checkpoint.hurl

# check that litewitness shut down cleanly
killall
wait litewitness
stderr 'shutting down'

# witnessctl list-logs
exec witnessctl list-logs
stdout 'go.sum database tree'
stdout "size":35225469


-- witness_key.pem --
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtz
c2gtZWQyNTUxOQAAACBkhIrYq+1uhZgbOzh1slK4dn67SwL3A6yjsecbvWqOUAAA
AIgN5+09DeftPQAAAAtzc2gtZWQyNTUxOQAAACBkhIrYq+1uhZgbOzh1slK4dn67
SwL3A6yjsecbvWqOUAAAAEAx/8IRbsvgA6yqgAq3B1e9fVMgbj/r72ptB5bZVTCz
T2SEitir7W6FmBs7OHWyUrh2frtLAvcDrKOx5xu9ao5QAAAAAAECAwQF
-----END OPENSSH PRIVATE KEY-----


-- add-checkpoint.hurl --
POST http://localhost:7381/add-checkpoint
```
old 0

go.sum database tree
35225469
vt5T6GaLCXvyHFl9VUvvItR43XZxfLgftEcTyO3eJCQ=

— sum.golang.org Az3grpukl5AXaVfYkLiDGORx/DN2nlcS5kZHR5uYOBV2KA2HgXpD+gu9HHONebHLAyaKbbTM75QTtPydhKCExixSfwQ=
```
HTTP 200
[Asserts]
body contains "— example.com/witness"
litetlog-0.4.3/cmd/spicy/000077500000000000000000000000001476426515700152545ustar00rootroot00000000000000litetlog-0.4.3/cmd/spicy/spicy.go000066400000000000000000000206761476426515700167450ustar00rootroot00000000000000package main

import (
	"crypto/rand"
	"flag"
	"fmt"
	"log"
	"os"
	"path/filepath"
	"strconv"
	"strings"

	"filippo.io/litetlog/internal/tlogx"
	"golang.org/x/mod/sumdb/note"
	"golang.org/x/mod/sumdb/tlog"
)

func main() {
	verifyFlag := flag.String("verify", "",
		"verify the file's spicy signature with the given public key")
	keyFlag := flag.String("key", "",
		"the log's private key path (written by -init)")
	initFlag := flag.String("init", "",
		"initialize a new log with the given name (e.g. example.com/spicy)")
	assetsFlag := flag.String("assets", "",
		"directory where log entries and metadata are stored")
	flag.Parse()

	if *verifyFlag != "" {
		if len(flag.Args()) == 0 {
			log.Fatalf("no files to verify")
		}
		vkey, err := note.NewVerifier(*verifyFlag)
		if err != nil {
			log.Fatalf("could not parse public key: %v", err)
		}
		for _, path := range flag.Args() {
			f, err := os.ReadFile(path)
			if err != nil {
				log.Fatalf("could not read %q: %v", path, err)
			}
			sig, err := os.ReadFile(path + ".spicy")
			if err != nil {
				log.Fatalf("could not read %q: %v", path+".spicy", err)
			}
			s := string(sig)
			s, ok := strings.CutPrefix(s, "index ")
			if !ok {
				log.Fatalf("malformed spicy signature for %q", path)
			}
			i, s, ok := strings.Cut(s, "\n")
			if !ok {
				log.Fatalf("malformed spicy signature for %q", path)
			}
			index, err := strconv.ParseInt(i, 10, 64)
			if err != nil {
				log.Fatalf("malformed spicy signature for %q: %v", path, err)
			}
			var proof tlog.RecordProof
			for {
				var h string
				h, s, ok = strings.Cut(s, "\n")
				if !ok {
					log.Fatalf("malformed spicy signature for %q", path)
				}
				if h == "" {
					break
				}
				hh, err := tlog.ParseHash(h)
				if err != nil {
					log.Fatalf("malformed spicy signature for %q: %v", path, err)
				}
				proof = append(proof, hh)
			}
			m, err := note.Open([]byte(s), note.VerifierList(vkey))
			if err != nil {
				log.Fatalf("could not verify checkpoint for %q: %v", path, err)
			}
			c, err := tlogx.ParseCheckpoint(m.Text)
			if err != nil {
				log.Fatalf("could not parse checkpoint for %q: %v", path, err)
			}
			if c.Origin != vkey.Name() {
				log.Fatalf("spicy signature for %q is for a different log: got %q, want %q", path, c.Origin, vkey.Name())
			}
			if err := tlog.CheckRecord(proof, c.N, c.Hash, index, tlog.RecordHash(f)); err != nil {
				log.Fatalf("could not verify inclusion for %q: %v", path, err)
			}
		}
		fmt.Fprintf(os.Stderr, "Spicy signature(s) verified! 🌶️\n")
		return
	}

	if *initFlag != "" {
		latestPath := filepath.Join(*assetsFlag, "latest")
		if _, err := os.Stat(latestPath); err == nil {
			log.Fatalf("log already initialized, %q exists", latestPath)
		}
		edgePath := filepath.Join(*assetsFlag, "edge")
		if _, err := os.Stat(edgePath); err == nil {
			log.Fatalf("log already initialized, %q exists", edgePath)
		}
		if _, err := os.Stat(*keyFlag); err == nil {
			log.Fatalf("log already initialized, %q exists", *keyFlag)
		}

		skey, vkey, err := note.GenerateKey(rand.Reader, *initFlag)
		if err != nil {
			log.Fatalf("could not generate key: %v", err)
		}
		signer, err := note.NewSigner(skey)
		if err != nil {
			log.Fatalf("could not create signer: %v", err)
		}
		checkpoint, err := note.Sign(¬e.Note{
			Text: tlogx.FormatCheckpoint(tlogx.Checkpoint{
				Origin: *initFlag,
			}),
		}, signer)
		if err != nil {
			log.Fatalf("could not sign checkpoint: %v", err)
		}

		if err := os.WriteFile(*keyFlag, []byte(skey), 0600); err != nil {
			log.Fatalf("could not write key: %v", err)
		}
		if err := os.WriteFile(latestPath, checkpoint, 0644); err != nil {
			log.Fatalf("could not write latest checkpoint: %v", err)
		}
		if err := os.WriteFile(edgePath, []byte("size 0\n"), 0644); err != nil {
			log.Fatalf("could not write edge: %v", err)
		}

		fmt.Fprintf(os.Stderr, "Log initialized! 🌶️\n")
		fmt.Fprintf(os.Stderr, "  - Name: %s\n", *initFlag)
		fmt.Fprintf(os.Stderr, "  - Public key: %s\n", vkey)
		fmt.Fprintf(os.Stderr, "  - Private key path: %s\n", *keyFlag)
		fmt.Fprintf(os.Stderr, "  - Assets directory: %s\n", *assetsFlag)
		return
	}

	if len(flag.Args()) == 0 {
		log.Fatalf("no files to append")
	}

	skey, err := os.ReadFile(*keyFlag)
	if err != nil {
		log.Fatalf("could not read key: %v", err)
	}
	signer, err := note.NewSigner(strings.TrimSpace(string(skey)))
	if err != nil {
		log.Fatalf("could not parse key: %v", err)
	}
	verifier, err := tlogx.NewVerifierFromSigner(strings.TrimSpace(string(skey)))
	if err != nil {
		log.Fatalf("could not create verifier: %v", err)
	}

	checkpoint, err := os.ReadFile(filepath.Join(*assetsFlag, "latest"))
	if err != nil {
		log.Fatalf("could not read latest checkpoint: %v", err)
	}
	n, err := note.Open(checkpoint, note.VerifierList(verifier))
	if err != nil {
		log.Fatalf("could not verify latest checkpoint: %v", err)
	}
	c, err := tlogx.ParseCheckpoint(n.Text)
	if err != nil {
		log.Fatalf("could not parse latest checkpoint: %v", err)
	}

	hashes := make(map[int64]tlog.Hash)
	hashReader := tlog.HashReaderFunc(func(indexes []int64) ([]tlog.Hash, error) {
		list := make([]tlog.Hash, 0, len(indexes))
		for _, id := range indexes {
			h, ok := hashes[id]
			if !ok {
				return nil, fmt.Errorf("index %d not in hashes", id)
			}
			list = append(list, h)
		}
		return list, nil
	})

	edge, err := os.ReadFile(filepath.Join(*assetsFlag, "edge"))
	if err != nil {
		log.Fatalf("could not open edge file: %v", err)
	}
	lines := strings.Split(strings.TrimSpace(string(edge)), "\n")
	if len(lines) < 1 {
		log.Fatalf("malformed edge file")
	}
	if size, ok := strings.CutPrefix(lines[0], "size "); !ok {
		log.Fatalf("malformed edge file: %q", lines[0])
	} else {
		n, err := strconv.ParseInt(size, 10, 64)
		if err != nil {
			log.Fatalf("malformed edge file: %v", err)
		}
		if n != c.N {
			log.Fatalf("edge file size mismatch: got %d, latest checkpoint is %d", n, c.N)
		}
	}
	idx := tlogx.RightEdge(c.N)
	if len(idx) != len(lines[1:]) {
		log.Fatalf("edge hash count mismatch: got %d, want %d", len(lines[1:]), len(idx))
	}
	for i, line := range lines[1:] {
		hash, err := tlog.ParseHash(line)
		if err != nil {
			log.Fatalf("malformed edge file: %v", err)
		}
		hashes[idx[i]] = hash
	}

	fmt.Fprintf(os.Stderr, "Log loaded.\n")
	fmt.Fprintf(os.Stderr, "  - Name: %s\n", c.Origin)
	fmt.Fprintf(os.Stderr, "  - Current size: %d\n", c.N)
	fmt.Fprintf(os.Stderr, "  - Assets directory: %s\n", *assetsFlag)

	for i, path := range flag.Args() {
		if _, err := os.Stat(path + ".spicy"); err == nil {
			log.Fatalf("spicy signature already exists for %q", path)
		}
		f, err := os.ReadFile(path)
		if err != nil {
			log.Fatalf("could not read %q: %v", path, err)
		}
		n := c.N + int64(i)
		hh, err := tlog.StoredHashes(n, f, hashReader)
		if err != nil {
			log.Fatalf("could not append %q: %v", path, err)
		}
		for k, h := range hh {
			hashes[tlog.StoredHashIndex(0, n)+int64(k)] = h
		}
		entryPath := filepath.Join(*assetsFlag, strconv.FormatInt(n, 10))
		if err := os.WriteFile(entryPath, f, 0644); err != nil {
			log.Fatalf("could not copy %q to assets: %v", path, err)
		}
		fmt.Fprintf(os.Stderr, "  + %q is now entry %d\n", path, n)
	}

	N := c.N + int64(len(flag.Args()))
	th, err := tlog.TreeHash(N, hashReader)
	if err != nil {
		log.Fatalf("could not compute tree hash: %v", err)
	}
	newCheckpoint, err := note.Sign(¬e.Note{
		Text: tlogx.FormatCheckpoint(tlogx.Checkpoint{
			Origin: c.Origin,
			Tree:   tlog.Tree{N: N, Hash: th},
		})}, signer)
	if err != nil {
		log.Fatalf("could not sign new checkpoint: %v", err)
	}
	newEdge := fmt.Sprintf("size %d\n", N)
	for _, idx := range tlogx.RightEdge(N) {
		newEdge += fmt.Sprintf("%s\n", hashes[idx])
	}

	if err := os.WriteFile(filepath.Join(*assetsFlag, "latest"), newCheckpoint, 0644); err != nil {
		log.Fatalf("could not write new checkpoint: %v", err)
	}
	if err := os.WriteFile(filepath.Join(*assetsFlag, "edge"), []byte(newEdge), 0644); err != nil {
		log.Fatalf("could not write new edge: %v", err)
	}
	fmt.Fprintf(os.Stderr, "  - New size: %d\n", N)

	for i, path := range flag.Args() {
		s := fmt.Sprintf("index %d\n", c.N+int64(i))
		proof, err := tlog.ProveRecord(N, c.N+int64(i), hashReader)
		if err != nil {
			log.Fatalf("could not prove record %d: %v", c.N+int64(i), err)
		}
		for _, p := range proof {
			s += fmt.Sprintf("%s\n", p)
		}
		s += "\n"
		s += string(newCheckpoint)
		if err := os.WriteFile(path+".spicy", []byte(s), 0644); err != nil {
			log.Fatalf("could not write spicy signature: %v", err)
		}
	}
	fmt.Fprintf(os.Stderr, "Spicy signatures written! 🌶️\n")
}
litetlog-0.4.3/cmd/tlogclient-warmup/000077500000000000000000000000001476426515700176025ustar00rootroot00000000000000litetlog-0.4.3/cmd/tlogclient-warmup/main.go000066400000000000000000000014051476426515700210550ustar00rootroot00000000000000package main

import (
	"io"
	"os"
	"path/filepath"

	"filippo.io/litetlog/internal/tlogclient"
	"github.com/cheggaaa/pb/v3"
	"golang.org/x/mod/sumdb/tlog"
)

func main() {
	latest, err := io.ReadAll(os.Stdin)
	if err != nil {
		panic(err)
	}
	tree, err := tlog.ParseTree(latest)
	if err != nil {
		panic(err)
	}

	cacheDir, err := os.UserCacheDir()
	if err != nil {
		panic(err)
	}
	cacheDir = filepath.Join(cacheDir, "tlogclient-warmup")

	fetcher := tlogclient.NewSumDBFetcher("https://sum.golang.org/")
	dirCache := tlogclient.NewPermanentCache(fetcher, cacheDir)
	client := tlogclient.NewClient(dirCache)

	bar := pb.Start64(tree.N)
	for range client.EntriesSumDB(tree, 0) {
		bar.Increment()
	}
	bar.Finish()
	if err := client.Error(); err != nil {
		panic(err)
	}
}
litetlog-0.4.3/cmd/witnessctl/000077500000000000000000000000001476426515700163245ustar00rootroot00000000000000litetlog-0.4.3/cmd/witnessctl/witnessctl.go000066400000000000000000000077051476426515700210630ustar00rootroot00000000000000package main

import (
	"encoding/base64"
	"encoding/hex"
	"flag"
	"fmt"
	"log"
	"os"

	"crawshaw.io/sqlite"
	"crawshaw.io/sqlite/sqlitex"
	"filippo.io/litetlog/internal/witness"
	"golang.org/x/mod/sumdb/note"
	sigsum "sigsum.org/sigsum-go/pkg/crypto"
	"sigsum.org/sigsum-go/pkg/merkle"
)

func usage() {
	fmt.Printf("Usage: %s  [options]\n", os.Args[0])
	fmt.Println("Commands:")
	fmt.Println("    add-log -db  -origin ")
	fmt.Println("    add-key -db  -origin  -key ")
	fmt.Println("    del-key -db  -origin  -key ")
	fmt.Println("    add-sigsum-log -db  -key ")
	fmt.Println("    list-logs -db ")
	os.Exit(1)
}

func main() {
	if len(os.Args) < 2 {
		usage()
	}
	fs := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
	dbFlag := fs.String("db", "litewitness.db", "path to sqlite database")
	switch os.Args[1] {
	case "add-log":
		originFlag := fs.String("origin", "", "log name")
		fs.Parse(os.Args[2:])
		db := openDB(*dbFlag)
		addLog(db, *originFlag)

	case "add-key":
		originFlag := fs.String("origin", "", "log name")
		keyFlag := fs.String("key", "", "verifier key")
		fs.Parse(os.Args[2:])
		db := openDB(*dbFlag)
		addKey(db, *originFlag, *keyFlag)

	case "del-key":
		originFlag := fs.String("origin", "", "log name")
		keyFlag := fs.String("key", "", "verifier key")
		fs.Parse(os.Args[2:])
		db := openDB(*dbFlag)
		delKey(db, *originFlag, *keyFlag)

	case "add-sigsum-log":
		keyFlag := fs.String("key", "", "hex-encoded key")
		fs.Parse(os.Args[2:])
		db := openDB(*dbFlag)
		addSigsumLog(db, *keyFlag)

	case "list-logs":
		fs.Parse(os.Args[2:])
		db := openDB(*dbFlag)
		listLogs(db)

	default:
		usage()
	}
}

func openDB(dbPath string) *sqlite.Conn {
	db, err := witness.OpenDB(dbPath)
	if err != nil {
		log.Fatalf("Error opening database: %v", err)
	}
	return db
}

func addLog(db *sqlite.Conn, origin string) {
	treeHash := merkle.HashEmptyTree()
	if err := sqlitex.Exec(db, "INSERT INTO log (origin, tree_size, tree_hash) VALUES (?, 0, ?)",
		nil, origin, base64.StdEncoding.EncodeToString(treeHash[:])); err != nil {
		log.Fatalf("Error adding log: %v", err)
	}
	log.Printf("Added log %q.", origin)
}

func addKey(db *sqlite.Conn, origin string, vk string) {
	v, err := note.NewVerifier(vk)
	if err != nil {
		log.Fatalf("Error parsing verifier key: %v", err)
	}
	if v.Name() != origin {
		log.Printf("Warning: verifier key name %q does not match origin %q.", v.Name(), origin)
	}
	err = sqlitex.Exec(db, "INSERT INTO key (origin, key) VALUES (?, ?)", nil, origin, vk)
	if err != nil {
		log.Fatalf("Error adding key: %v", err)
	}
	log.Printf("Added key %q.", vk)
}

func delKey(db *sqlite.Conn, origin string, vk string) {
	err := sqlitex.Exec(db, "DELETE FROM key WHERE origin = ? AND key = ?", nil, origin, vk)
	if err != nil {
		log.Fatalf("Error deleting key: %v", err)
	}
	if db.Changes() == 0 {
		log.Fatalf("Key %q not found.", vk)
	}
	log.Printf("Deleted key %q.", vk)
}

func addSigsumLog(db *sqlite.Conn, keyFlag string) {
	if len(keyFlag) != sigsum.PublicKeySize*2 {
		log.Fatal("Key must be 32 hex-encoded bytes.")
	}
	var key sigsum.PublicKey
	if _, err := hex.Decode(key[:], []byte(keyFlag)); err != nil {
		log.Fatalf("Error decoding key: %v", err)
	}
	keyHash := sigsum.HashBytes(key[:])
	origin := fmt.Sprintf("sigsum.org/v1/tree/%x", keyHash)
	vk, err := note.NewEd25519VerifierKey(origin, key[:])
	if err != nil {
		log.Fatalf("Error computing verifier key: %v", err)
	}
	addLog(db, origin)
	addKey(db, origin, vk)
}

func listLogs(db *sqlite.Conn) {
	if err := sqlitex.Exec(db, `
	SELECT json_object(
		'origin', log.origin,
		'size', log.tree_size,
		'root_hash', log.tree_hash,
		'keys', json_group_array(key.key))
	FROM
		log
		LEFT JOIN key on log.origin = key.origin
	GROUP BY
		log.origin
	ORDER BY
		log.origin
	`, func(stmt *sqlite.Stmt) error {
		_, err := fmt.Printf("%s\n", stmt.ColumnText(0))
		return err
	}); err != nil {
		log.Fatalf("Error listing logs: %v", err)
	}
}
litetlog-0.4.3/go.mod000066400000000000000000000013361476426515700144730ustar00rootroot00000000000000module filippo.io/litetlog

go 1.23.0

require (
	crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c
	github.com/cheggaaa/pb/v3 v3.1.5
	github.com/rogpeppe/go-internal v1.11.0
	golang.org/x/crypto v0.15.0
	golang.org/x/mod v0.14.0
	golang.org/x/net v0.18.0
	golang.org/x/sync v0.5.0
	sigsum.org/sigsum-go v0.6.1
)

require (
	github.com/VividCortex/ewma v1.2.0 // indirect
	github.com/fatih/color v1.15.0 // indirect
	github.com/mattn/go-colorable v0.1.13 // indirect
	github.com/mattn/go-isatty v0.0.19 // indirect
	github.com/mattn/go-runewidth v0.0.15 // indirect
	github.com/rivo/uniseg v0.2.0 // indirect
	golang.org/x/sys v0.14.0 // indirect
	golang.org/x/text v0.14.0 // indirect
	golang.org/x/tools v0.15.0 // indirect
)
litetlog-0.4.3/go.sum000066400000000000000000000066251476426515700145260ustar00rootroot00000000000000crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797 h1:yDf7ARQc637HoxDho7xjqdvO5ZA2Yb+xzv/fOnnvZzw=
crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk=
crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c h1:wvzox0eLO6CKQAMcOqz7oH3UFqMpMmK7kwmwV+22HIs=
crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk=
github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk=
sigsum.org/sigsum-go v0.6.1 h1:yumQb99ySNrLgcwxzmVSJQX+kPkppFVwWdn6/tfnbdI=
sigsum.org/sigsum-go v0.6.1/go.mod h1:VuYGNZBDKuff6QNd9mgN9Nfi5ZWnGq4JZz6FUso42BY=
litetlog-0.4.3/internal/000077500000000000000000000000001476426515700151765ustar00rootroot00000000000000litetlog-0.4.3/internal/slogconsole/000077500000000000000000000000001476426515700175255ustar00rootroot00000000000000litetlog-0.4.3/internal/slogconsole/slogconsole.go000066400000000000000000000157071476426515700224150ustar00rootroot00000000000000package slogconsole

import (
	"context"
	"errors"
	"fmt"
	"log/slog"
	"net/http"
	"regexp"
	"slices"
	"strings"
	"sync"
	"time"
)

// Handler is an [slog.Handler] that exposes records over a web console.
//
// It implements [slog.Handler] and [http.Handler]. The HTTP handler accepts
// [server-sent events] requests (with Accept: text/event-stream) and streams
// all records as text to the client. It also serves a simple HTML page that
// connects to the SSE endpoint and prints the logs (with Accept: text/html).
//
// The slog Handler will accept all records (Enabled returns true) if there are
// any web clients connected, and none otherwise. If a client is too slow to
// consume records, they will be dropped.
//
// [server-sent events]: https://html.spec.whatwg.org/multipage/server-sent-events.html
type Handler struct {
	ch *commonHandler
	sh slog.Handler
}

// commonHandler is where all the actual state is.
//
// We need to wrap it to support swapping the slog.Handler for WithAttrs and
// WithGroup. This feels like a significant shortcoming of the slog.Handler
// interface, adding a lot of complexity to otherwise simple Handler
// implementations. (Note how [slog.TextHandler] has to do the same thing.)
type commonHandler struct {
	mu      sync.RWMutex
	clients []chan []byte
	limit   int
	filter  *regexp.Regexp
}

var _ http.Handler = &Handler{}
var _ slog.Handler = &Handler{}

// New returns a new Handler.
//
// opts can be nil, and is passed to [slog.NewTextHandler].
// If Level is not set, it defaults to slog.LevelDebug.
func New(opts *slog.HandlerOptions) *Handler {
	if opts == nil {
		opts = &slog.HandlerOptions{}
	}
	if opts.Level == nil {
		opts.Level = slog.LevelDebug
	}
	h := &commonHandler{limit: 10}
	sh := slog.NewTextHandler(h, opts)
	return &Handler{ch: h, sh: sh}
}

// Handle implements [slog.Handler].
func (h *Handler) Handle(ctx context.Context, r slog.Record) error {
	return h.sh.Handle(ctx, r)
}

// WithAttrs implements [slog.Handler].
func (h *Handler) WithAttrs(attrs []slog.Attr) slog.Handler {
	return &Handler{ch: h.ch, sh: h.sh.WithAttrs(attrs)}
}

// WithGroup implements [slog.Handler].
func (h *Handler) WithGroup(name string) slog.Handler {
	return &Handler{ch: h.ch, sh: h.sh.WithGroup(name)}
}

// Enabled implements [slog.Handler].
func (h *Handler) Enabled(_ context.Context, _ slog.Level) bool {
	h.ch.mu.RLock()
	defer h.ch.mu.RUnlock()
	return len(h.ch.clients) > 0
}

func (h *commonHandler) Write(b []byte) (int, error) {
	h.mu.RLock()
	clients := h.clients
	h.mu.RUnlock()

	for _, c := range clients {
		select {
		case c <- b:
		default:
		}
	}

	return len(b), nil
}

// SetLimit sets the maximum number of clients that can connect to the handler.
// If the limit is reached, new clients will receive a 503 Service Unavailable
// response.
//
// The default limit is 10.
func (h *Handler) SetLimit(limit int) {
	h.ch.mu.Lock()
	defer h.ch.mu.Unlock()
	h.ch.limit = limit
}

var IPAddressFilter = regexp.MustCompile(`\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b|\b(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}\b|\b(?:[0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}::(?:[0-9a-fA-F]{1,4})?(?::[0-9a-fA-F]{1,4}){0,6}\b`)

// SetFilter sets a regular expression that will be redacted in the logs. The
// new filter only applies to new clients.
//
// The default is nil, which means no filtering.
func (h *Handler) SetFilter(filter *regexp.Regexp) {
	h.ch.mu.Lock()
	defer h.ch.mu.Unlock()
	h.ch.filter = filter
}

// ServeHTTP implements [http.Handler].
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
	accept := strings.Split(r.Header.Get("Accept"), ",")
	for _, a := range accept {
		a, _, _ := strings.Cut(a, ";")
		switch a {
		case "text/event-stream":
			h.ch.serveSSE(w, r)
			return
		case "text/html":
			h.ch.serveHTML(w, r)
			return
		}
	}
	http.Error(w, "unsupported Accept", http.StatusNotAcceptable)
}

func (h *commonHandler) serveSSE(w http.ResponseWriter, r *http.Request) {
	rc := http.NewResponseController(w)

	w.Header().Set("Content-Type", "text/event-stream")
	w.Header().Set("Cache-Control", "no-cache")
	w.WriteHeader(http.StatusOK)
	rc.Flush()

	ch := make(chan []byte, 10)
	h.mu.Lock()
	if len(h.clients) > h.limit {
		h.mu.Unlock()
		http.Error(w, "too many clients", http.StatusServiceUnavailable)
		return
	}
	h.clients = append(h.clients, ch)
	filter := h.filter
	h.mu.Unlock()
	defer func() {
		h.mu.Lock()
		defer h.mu.Unlock()
		h.clients = slices.DeleteFunc(h.clients, func(c chan []byte) bool { return c == ch })
	}()

	// Override the default strict deadline, but force the client to reconnect
	// occasionally (which is handled by the browser).
	rc.SetWriteDeadline(time.Now().Add(30 * time.Minute))

	for {
		select {
		case b := <-ch:
			if filter != nil {
				b = filter.ReplaceAll(b, []byte("***"))
			}
			// Note that TextHandler promises "a single line" "in a single
			// serialized call to io.Writer.Write" for each Record.
			if _, err := fmt.Fprintf(w, "data: %s\n", b); err != nil {
				return
			}
			rc.Flush()
		case <-r.Context().Done():
			return
		}
	}
}

func (h *commonHandler) serveHTML(w http.ResponseWriter, _ *http.Request) {
	w.Header().Set("Content-Type", "text/html")
	fmt.Fprintf(w, `
		
		litewitness
		
		
		

		`)
}

type multiHandler []slog.Handler

// MultiHandler returns a Handler that handles each record with all the given
// handlers.
func MultiHandler(handlers ...slog.Handler) slog.Handler {
	return multiHandler(handlers)
}

func (h multiHandler) Enabled(ctx context.Context, l slog.Level) bool {
	for i := range h {
		if h[i].Enabled(ctx, l) {
			return true
		}
	}
	return false
}

func (h multiHandler) Handle(ctx context.Context, r slog.Record) error {
	var errs []error
	for i := range h {
		if h[i].Enabled(ctx, r.Level) {
			if err := h[i].Handle(ctx, r.Clone()); err != nil {
				errs = append(errs, err)
			}
		}
	}
	return errors.Join(errs...)
}

func (h multiHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
	handlers := make([]slog.Handler, 0, len(h))
	for i := range h {
		handlers = append(handlers, h[i].WithAttrs(attrs))
	}
	return multiHandler(handlers)
}

func (h multiHandler) WithGroup(name string) slog.Handler {
	handlers := make([]slog.Handler, 0, len(h))
	for i := range h {
		handlers = append(handlers, h[i].WithGroup(name))
	}
	return multiHandler(handlers)
}
litetlog-0.4.3/internal/tlogclient/000077500000000000000000000000001476426515700173425ustar00rootroot00000000000000litetlog-0.4.3/internal/tlogclient/tlogclient.go000066400000000000000000000213541476426515700220420ustar00rootroot00000000000000package tlogclient

import (
	"bytes"
	"context"
	"errors"
	"fmt"
	"io"
	"iter"
	"log/slog"
	"net/http"
	"os"
	"path/filepath"
	"strings"
	"time"

	"golang.org/x/mod/sumdb/tlog"
	"golang.org/x/sync/errgroup"
)

const tileHeight = 8
const tileWidth = 1 << tileHeight

type Client struct {
	tr  tlog.TileReader
	err error
}

func NewClient(tr tlog.TileReader) *Client {
	// edgeMemoryCache keeps track of two edges: the rightmost one that's used
	// to compute the tree hash, and the one that moves through the tree as we
	// progress through entries.
	tr = &edgeMemoryCache{tr: tr, t: make(map[int][2]tileWithData)}
	return &Client{tr: tr}
}

func (c *Client) Error() error {
	return c.err
}

func (c *Client) EntriesSumDB(tree tlog.Tree, start int64) iter.Seq2[int64, []byte] {
	return func(yield func(int64, []byte) bool) {
		if c.err != nil {
			return
		}
		for {
			base := start / tileWidth * tileWidth
			// In regular operations, don't actually fetch the trailing partial
			// tile, to avoid duplicating that traffic in steady state. The
			// assumption is that a future call to Entries will pass a bigger
			// tree where that tile is full. However, if the tree grows too
			// slowly, we'll get another call where start is at the beginning of
			// the partial tile; in that case, fetch it.
			top := tree.N / tileWidth * tileWidth
			if top-base == 0 {
				top = tree.N
			}
			tiles := make([]tlog.Tile, 0, 50)
			for i := 0; i < 50; i++ {
				tileStart := base + int64(i)*tileWidth
				if tileStart >= top {
					break
				}
				tileEnd := tileStart + tileWidth
				if tileEnd > top {
					tileEnd = top
				}
				tiles = append(tiles, tlog.Tile{H: tileHeight, L: -1,
					N: tileStart / tileWidth, W: int(tileEnd - tileStart)})
			}
			if len(tiles) == 0 {
				return
			}
			tdata, err := c.tr.ReadTiles(tiles)
			if err != nil {
				c.err = err
				return
			}

			// TODO: hash data tile directly against level 8 hash.
			indexes := make([]int64, 0, tileWidth*len(tiles))
			for _, t := range tiles {
				for i := range t.W {
					indexes = append(indexes, tlog.StoredHashIndex(0, t.N*tileWidth+int64(i)))
				}
			}
			hashes, err := tlog.TileHashReader(tree, c.tr).ReadHashes(indexes)
			if err != nil {
				c.err = err
				return
			}

			for ti, t := range tiles {
				tileStart := t.N * tileWidth
				tileEnd := tileStart + int64(t.W)
				data := tdata[ti]
				for i := tileStart; i < tileEnd; i++ {
					if len(data) == 0 {
						c.err = fmt.Errorf("unexpected end of tile data")
						return
					}

					var entry []byte
					if idx := bytes.Index(data, []byte("\n\n")); idx >= 0 {
						// Add back one of the newlines.
						entry, data = data[:idx+1], data[idx+2:]
					} else {
						entry, data = data, nil
					}

					if tlog.RecordHash(entry) != hashes[i-base] {
						c.err = fmt.Errorf("hash mismatch for entry %d", i)
						return
					}

					if i < start {
						continue
					}
					if !yield(i, entry) {
						return
					}
				}
				if len(data) != 0 {
					c.err = fmt.Errorf("unexpected leftover data in tile")
					return
				}
				start = tileEnd
			}

			c.tr.SaveTiles(tiles, tdata)

			if start == top {
				return
			}
		}
	}
}

type tileWithData struct {
	tlog.Tile
	data []byte
}

type edgeMemoryCache struct {
	tr tlog.TileReader
	t  map[int][2]tileWithData
}

func (c *edgeMemoryCache) Height() int {
	return c.tr.Height()
}

func (c *edgeMemoryCache) ReadTiles(tiles []tlog.Tile) (data [][]byte, err error) {
	data = make([][]byte, len(tiles))
	missing := make([]tlog.Tile, 0, len(tiles))
	for i, t := range tiles {
		if td := c.t[t.L]; td[0].Tile == t {
			data[i] = td[0].data
		} else if td[1].Tile == t {
			data[i] = td[1].data
		} else {
			missing = append(missing, t)
		}
	}
	if len(missing) == 0 {
		return data, nil
	}
	missingData, err := c.tr.ReadTiles(missing)
	if err != nil {
		return nil, err
	}
	for i := range data {
		if data[i] == nil {
			data[i] = missingData[0]
			missingData = missingData[1:]
		}
	}
	return data, nil
}

func (c *edgeMemoryCache) SaveTiles(tiles []tlog.Tile, data [][]byte) {
	ts, ds := make([]tlog.Tile, 0, len(tiles)), make([][]byte, 0, len(tiles))
	for i, t := range tiles {
		// If it's already in the memory cache, it was already saved by the
		// lower layer, as well.
		if td := c.t[t.L]; td[0].Tile == t || td[1].Tile == t {
			continue
		}
		ts = append(ts, t)
		ds = append(ds, data[i])
	}
	c.tr.SaveTiles(ts, ds)

	for i, t := range tiles {
		td, ok := c.t[t.L]
		switch {
		case !ok:
			c.t[t.L] = [2]tileWithData{{Tile: t, data: data[i]}}
		case td[0].Tile == t || td[1].Tile == t:
			// Already saved.
		case tileLess(td[0].Tile, t) && tileLess(td[0].Tile, td[1].Tile):
			c.t[t.L] = [2]tileWithData{{Tile: t, data: data[i]}, td[1]}
		case tileLess(td[1].Tile, t) && tileLess(td[1].Tile, td[0].Tile):
			c.t[t.L] = [2]tileWithData{td[0], {Tile: t, data: data[i]}}
		}
	}
}

func tileLess(a, b tlog.Tile) bool {
	// A zero tile is always less than any other tile.
	if a == (tlog.Tile{}) {
		return true
	}
	if b == (tlog.Tile{}) {
		return false
	}
	if a.L != b.L {
		panic("different levels")
	}
	return a.N < b.N || (a.N == b.N && a.W < b.W)
}

type TileFetcher struct {
	base  string
	hc    *http.Client
	log   *slog.Logger
	limit int
}

func NewSumDBFetcher(base string) *TileFetcher {
	if !strings.HasSuffix(base, "/") {
		base += "/"
	}
	transport := http.DefaultTransport.(*http.Transport).Clone()
	transport.MaxIdleConnsPerHost = transport.MaxIdleConns
	return &TileFetcher{base: base, hc: &http.Client{
		Transport: transport,
		Timeout:   10 * time.Second,
	}, log: slog.New(slogDiscardHandler{})}
}

func (f *TileFetcher) SetLogger(log *slog.Logger) {
	f.log = log
}

func (f *TileFetcher) SetHTTPClient(hc *http.Client) {
	f.hc = hc
}

func (f *TileFetcher) SetLimit(limit int) {
	f.limit = limit
}

func (f *TileFetcher) Height() int {
	return tileHeight
}

func (f *TileFetcher) ReadTiles(tiles []tlog.Tile) (data [][]byte, err error) {
	data = make([][]byte, len(tiles))
	errGroup, ctx := errgroup.WithContext(context.Background())
	if f.limit > 0 {
		errGroup.SetLimit(f.limit)
	}
	for i, t := range tiles {
		errGroup.Go(func() error {
			resp, err := f.hc.Get(f.base + t.Path())
			if err != nil {
				return fmt.Errorf("%s: %w", t.Path(), err)
			}
			defer resp.Body.Close()
			if resp.StatusCode != http.StatusOK {
				return fmt.Errorf("%s: unexpected status code %d", t.Path(), resp.StatusCode)
			}
			data[i], err = io.ReadAll(resp.Body)
			if err != nil {
				return fmt.Errorf("%s: %w", t.Path(), err)
			}
			f.log.InfoContext(ctx, "fetched tile", "path", t.Path(), "size", len(data[i]))
			return nil
		})
	}
	return data, errGroup.Wait()
}

func (f *TileFetcher) SaveTiles(tiles []tlog.Tile, data [][]byte) {}

type slogDiscardHandler struct{}

func (slogDiscardHandler) Enabled(context.Context, slog.Level) bool  { return false }
func (slogDiscardHandler) Handle(context.Context, slog.Record) error { return nil }
func (slogDiscardHandler) WithAttrs(attrs []slog.Attr) slog.Handler  { return slogDiscardHandler{} }
func (slogDiscardHandler) WithGroup(name string) slog.Handler        { return slogDiscardHandler{} }

type PermanentCache struct {
	tr  tlog.TileReader
	dir string
	log *slog.Logger
}

func NewPermanentCache(tr tlog.TileReader, dir string) *PermanentCache {
	return &PermanentCache{tr: tr, dir: dir, log: slog.New(slogDiscardHandler{})}
}

func (c *PermanentCache) SetLogger(log *slog.Logger) {
	c.log = log
}

func (c *PermanentCache) Height() int {
	return c.tr.Height()
}

func (c *PermanentCache) ReadTiles(tiles []tlog.Tile) (data [][]byte, err error) {
	data = make([][]byte, len(tiles))
	missing := make([]tlog.Tile, 0, len(tiles))
	for i, t := range tiles {
		path := filepath.Join(c.dir, t.Path())
		if d, err := os.ReadFile(path); errors.Is(err, os.ErrNotExist) {
			missing = append(missing, t)
		} else if err != nil {
			return nil, err
		} else {
			c.log.Info("loaded tile from cache", "path", t.Path(), "size", len(d))
			data[i] = d
		}
	}
	if len(missing) == 0 {
		return data, nil
	}
	missingData, err := c.tr.ReadTiles(missing)
	if err != nil {
		return nil, err
	}
	for i := range data {
		if data[i] == nil {
			data[i] = missingData[0]
			missingData = missingData[1:]
		}
	}
	return data, nil
}

func (c *PermanentCache) SaveTiles(tiles []tlog.Tile, data [][]byte) {
	for i, t := range tiles {
		if t.W != tileWidth {
			continue // skip partial tiles
		}
		path := filepath.Join(c.dir, t.Path())
		if _, err := os.Stat(path); err == nil {
			continue
		}
		if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
			c.log.Error("failed to create directory", "path", path, "error", err)
			return
		}
		if err := os.WriteFile(path, data[i], 0600); err != nil {
			c.log.Error("failed to write file", "path", path, "error", err)
		} else {
			c.log.Info("saved tile to cache", "path", t.Path(), "size", len(data[i]))
		}
	}
	c.tr.SaveTiles(tiles, data)
}
litetlog-0.4.3/internal/tlogclient/tlogclient_test.go000066400000000000000000000055071476426515700231030ustar00rootroot00000000000000package tlogclient_test

import (
	"fmt"
	"log/slog"
	"path/filepath"
	"testing"

	"filippo.io/litetlog/internal/tlogclient"
	"golang.org/x/mod/sumdb/tlog"
)

func TestSumDB(t *testing.T) {
	latest := []byte(`go.sum database tree
31048497
InZSsRXdXKTMF3W5wEcd9T6ro5zyOiRMGQsEPSTco6U=
`)
	tree, err := tlog.ParseTree(latest)
	if err != nil {
		t.Fatal(err)
	}

	handler, _ := testLogHandler(t)

	tests := []struct {
		start  int64
		expect int
	}{
		{0, 1000},
		{100000, 1000},
		{31048497 - 1000, 1000 - 31048497%256},    // Stop before the partial.
		{31048497 - 31048497%256, 31048497 % 256}, // Consume the partial.
		{31048497, 0},
	}

	for _, tt := range tests {
		t.Run(fmt.Sprintf("Start%d", tt.start), func(t *testing.T) {
			t.Run("NoCache", func(t *testing.T) {
				fetcher := tlogclient.NewSumDBFetcher("https://sum.golang.org/")
				fetcher.SetLogger(slog.New(handler))
				client := tlogclient.NewClient(fetcher)

				count := 0
				for range client.EntriesSumDB(tree, tt.start) {
					count++
					if count >= 1000 {
						break
					}
				}
				if err := client.Error(); err != nil {
					t.Fatal(err)
				}
				if count != tt.expect {
					t.Errorf("got %d entries, want %d", count, tt.expect)
				}
			})

			t.Run("DirCache", func(t *testing.T) {
				fetcher := tlogclient.NewSumDBFetcher("https://sum.golang.org/")
				fetcher.SetLogger(slog.New(handler))
				dirCache := tlogclient.NewPermanentCache(fetcher, t.TempDir())
				dirCache.SetLogger(slog.New(handler))
				client := tlogclient.NewClient(dirCache)

				count := 0
				for range client.EntriesSumDB(tree, tt.start) {
					count++
					if count >= 1000 {
						break
					}
				}
				if err := client.Error(); err != nil {
					t.Fatal(err)
				}
				if count != tt.expect {
					t.Errorf("got %d entries, want %d", count, tt.expect)
				}

				// Again, from cache.
				client = tlogclient.NewClient(dirCache)
				count = 0
				for range client.EntriesSumDB(tree, tt.start) {
					count++
					if count >= 1000 {
						break
					}
				}
				if err := client.Error(); err != nil {
					t.Fatal(err)
				}
				if count != tt.expect {
					t.Errorf("got %d entries, want %d", count, tt.expect)
				}
			})
		})
	}
}

func testLogHandler(t testing.TB) (slog.Handler, *slog.LevelVar) {
	level := &slog.LevelVar{}
	level.Set(slog.LevelDebug)
	h := slog.NewTextHandler(writerFunc(func(p []byte) (n int, err error) {
		t.Logf("%s", p)
		return len(p), nil
	}), &slog.HandlerOptions{
		AddSource: true,
		Level:     level,
		ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
			if a.Key == slog.SourceKey {
				src := a.Value.Any().(*slog.Source)
				a.Value = slog.StringValue(fmt.Sprintf("%s:%d", filepath.Base(src.File), src.Line))
			}
			return a
		},
	})
	return h, level
}

type writerFunc func(p []byte) (n int, err error)

func (f writerFunc) Write(p []byte) (n int, err error) {
	return f(p)
}
litetlog-0.4.3/internal/tlogx/000077500000000000000000000000001476426515700163335ustar00rootroot00000000000000litetlog-0.4.3/internal/tlogx/checkpoint.go000066400000000000000000000036521476426515700210170ustar00rootroot00000000000000// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at
// https://go.googlesource.com/go/+/refs/heads/master/LICENSE.

package tlogx

import (
	"encoding/base64"
	"errors"
	"fmt"
	"strconv"
	"strings"

	"golang.org/x/mod/sumdb/tlog"
)

const maxCheckpointSize = 1e6

// A Checkpoint is a tree head to be formatted according to c2sp.org/checkpoint.
//
// A checkpoint looks like this:
//
//	example.com/origin
//	923748
//	nND/nri/U0xuHUrYSy0HtMeal2vzD9V4k/BO79C+QeI=
//
// It can be followed by extra extension lines.
type Checkpoint struct {
	Origin string
	tlog.Tree

	// Extension is empty or a sequence of non-empty lines,
	// each terminated by a newline character.
	Extension string
}

func ParseCheckpoint(text string) (Checkpoint, error) {
	// This is an extended version of tlog.ParseTree.

	if strings.Count(text, "\n") < 3 || len(text) > maxCheckpointSize {
		return Checkpoint{}, errors.New("malformed checkpoint")
	}
	if !strings.HasSuffix(text, "\n") {
		return Checkpoint{}, errors.New("malformed checkpoint")
	}

	lines := strings.SplitN(text, "\n", 4)

	n, err := strconv.ParseInt(lines[1], 10, 64)
	if err != nil || n < 0 || lines[1] != strconv.FormatInt(n, 10) {
		return Checkpoint{}, errors.New("malformed checkpoint")
	}

	h, err := base64.StdEncoding.DecodeString(lines[2])
	if err != nil || len(h) != tlog.HashSize {
		return Checkpoint{}, errors.New("malformed checkpoint")
	}

	rest := lines[3]
	for rest != "" {
		before, after, found := strings.Cut(rest, "\n")
		if before == "" || !found {
			return Checkpoint{}, errors.New("malformed checkpoint")
		}
		rest = after
	}

	var hash tlog.Hash
	copy(hash[:], h)
	return Checkpoint{lines[0], tlog.Tree{N: n, Hash: hash}, lines[3]}, nil
}

func FormatCheckpoint(c Checkpoint) string {
	return fmt.Sprintf("%s\n%d\n%s\n%s",
		c.Origin, c.N, base64.StdEncoding.EncodeToString(c.Hash[:]), c.Extension)
}
litetlog-0.4.3/internal/tlogx/cosignature.go000066400000000000000000000072441476426515700212140ustar00rootroot00000000000000// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at
// https://go.googlesource.com/go/+/refs/heads/master/LICENSE.

package tlogx

import (
	"crypto"
	"crypto/ed25519"
	"crypto/sha256"
	"encoding/base64"
	"encoding/binary"
	"errors"
	"fmt"
	"strings"
	"time"
	"unicode"
	"unicode/utf8"

	"golang.org/x/mod/sumdb/note"
)

const algCosignatureV1 = 4

// NewCosignatureV1Signer constructs a new CosignatureV1Signer that produces
// timestamped cosignature/v1 signatures from an Ed25519 private key.
func NewCosignatureV1Signer(name string, key crypto.Signer) (*CosignatureV1Signer, error) {
	if !isValidName(name) {
		return nil, errors.New("invalid name")
	}
	k, ok := key.Public().(ed25519.PublicKey)
	if !ok {
		return nil, errors.New("key type is not Ed25519")
	}

	s := &CosignatureV1Signer{}
	s.name = name
	s.hash = keyHash(name, append([]byte{algCosignatureV1}, k...))
	s.key = k
	s.sign = func(msg []byte) ([]byte, error) {
		t := uint64(time.Now().Unix())
		m, err := formatCosignatureV1(t, msg)
		if err != nil {
			return nil, err
		}
		s, err := key.Sign(nil, m, crypto.Hash(0))
		if err != nil {
			return nil, err
		}

		// The signature itself is encoded as timestamp || signature.
		sig := make([]byte, 0, 8+ed25519.SignatureSize)
		sig = binary.BigEndian.AppendUint64(sig, t)
		sig = append(sig, s...)
		return sig, nil
	}
	s.verify = func(msg, sig []byte) bool {
		if len(sig) != 8+ed25519.SignatureSize {
			return false
		}
		t := binary.BigEndian.Uint64(sig)
		sig = sig[8:]
		m, err := formatCosignatureV1(t, msg)
		if err != nil {
			return false
		}
		return ed25519.Verify(k, m, sig)
	}

	return s, nil
}

func formatCosignatureV1(t uint64, msg []byte) ([]byte, error) {
	// The signed message is in the following format
	//
	//      cosignature/v1
	//      time TTTTTTTTTT
	//      origin line
	//      NNNNNNNNN
	//      tree hash
	//
	// where TTTTTTTTTT is the current UNIX timestamp, and the following
	// three lines are the first three lines of the note. All other
	// lines are not processed by the witness, so are not signed.

	c, err := ParseCheckpoint(string(msg))
	if err != nil {
		return nil, fmt.Errorf("message being signed is not a valid checkpoint: %w", err)
	}
	return []byte(fmt.Sprintf(
		"cosignature/v1\ntime %d\n%s\n%d\n%s\n",
		t, c.Origin, c.N, base64.StdEncoding.EncodeToString(c.Hash[:]))), nil
}

type CosignatureV1Signer struct {
	verifier
	sign func([]byte) ([]byte, error)
}

type verifier struct {
	name   string
	hash   uint32
	verify func(msg, sig []byte) bool
	key    ed25519.PublicKey
}

var _ note.Signer = &CosignatureV1Signer{}

func (v *verifier) Name() string                               { return v.name }
func (v *verifier) KeyHash() uint32                            { return v.hash }
func (v *verifier) Verify(msg, sig []byte) bool                { return v.verify(msg, sig) }
func (s *CosignatureV1Signer) Sign(msg []byte) ([]byte, error) { return s.sign(msg) }
func (s *CosignatureV1Signer) Verifier() note.Verifier         { return &s.verifier }

func (v *verifier) VerifierKey() string {
	return fmt.Sprintf("%s+%08x+%s", v.name, v.hash, base64.StdEncoding.EncodeToString(
		append([]byte{algCosignatureV1}, v.key...)))
}

// isValidName reports whether name is valid.
// It must be non-empty and not have any Unicode spaces or pluses.
func isValidName(name string) bool {
	return name != "" && utf8.ValidString(name) && strings.IndexFunc(name, unicode.IsSpace) < 0 && !strings.Contains(name, "+")
}

func keyHash(name string, key []byte) uint32 {
	h := sha256.New()
	h.Write([]byte(name))
	h.Write([]byte("\n"))
	h.Write(key)
	sum := h.Sum(nil)
	return binary.BigEndian.Uint32(sum)
}
litetlog-0.4.3/internal/tlogx/cosignature_test.go000066400000000000000000000011411476426515700222410ustar00rootroot00000000000000package tlogx_test

import (
	"crypto/ed25519"
	"crypto/rand"
	"testing"

	"filippo.io/litetlog/internal/tlogx"
	"golang.org/x/mod/sumdb/note"
)

func TestSignerRoundtrip(t *testing.T) {
	_, k, err := ed25519.GenerateKey(rand.Reader)
	if err != nil {
		t.Fatal(err)
	}

	s, err := tlogx.NewCosignatureV1Signer("example.com", k)
	if err != nil {
		t.Fatal(err)
	}

	msg := "test\n123\nf+7CoKgXKE/tNys9TTXcr/ad6U/K3xvznmzew9y6SP0=\n"
	n, err := note.Sign(¬e.Note{Text: msg}, s)
	if err != nil {
		t.Fatal(err)
	}

	if _, err := note.Open(n, note.VerifierList(s.Verifier())); err != nil {
		t.Fatal(err)
	}
}
litetlog-0.4.3/internal/tlogx/note.go000066400000000000000000000026751476426515700176410ustar00rootroot00000000000000package tlogx

import (
	"crypto/ed25519"
	"encoding/base64"
	"errors"
	"strconv"
	"strings"

	"golang.org/x/mod/sumdb/note"
)

const algEd25519 = 1

func NewVerifierFromSigner(skey string) (note.Verifier, error) {
	priv1, skey := chop(skey, "+")
	priv2, skey := chop(skey, "+")
	name, skey := chop(skey, "+")
	hash16, key64 := chop(skey, "+")
	hash, err1 := strconv.ParseUint(hash16, 16, 32)
	key, err2 := base64.StdEncoding.DecodeString(key64)
	if priv1 != "PRIVATE" || priv2 != "KEY" || len(hash16) != 8 || err1 != nil || err2 != nil || !isValidName(name) || len(key) == 0 {
		return nil, errors.New("malformed verifier id")
	}

	alg, key := key[0], key[1:]
	if alg != algEd25519 {
		return nil, errors.New("unknown verifier algorithm")
	}
	if len(key) != 32 {
		return nil, errors.New("malformed verifier id")
	}
	pub := ed25519.NewKeyFromSeed(key).Public().(ed25519.PublicKey)
	if uint32(hash) != keyHash(name, append([]byte{algEd25519}, pub...)) {
		return nil, errors.New("invalid verifier hash")
	}

	return &verifier{
		name: name,
		hash: uint32(hash),
		verify: func(msg, sig []byte) bool {
			return ed25519.Verify(pub, msg, sig)
		},
	}, nil
}

// chop chops s at the first instance of sep, if any,
// and returns the text before and after sep.
// If sep is not present, chop returns before is s and after is empty.
func chop(s, sep string) (before, after string) {
	i := strings.Index(s, sep)
	if i < 0 {
		return s, ""
	}
	return s[:i], s[i+len(sep):]
}
litetlog-0.4.3/internal/tlogx/tlogx.go000066400000000000000000000017001476426515700200150ustar00rootroot00000000000000// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at
// https://go.googlesource.com/go/+/refs/heads/master/LICENSE.

package tlogx

import "golang.org/x/mod/sumdb/tlog"

// RightEdge returns the stored hash indexes of the right edge of a tree of
// size n. These are the same hashes that are combined into a [tlog.TreeHash]
// and allow producing record and tree proofs for any size bigger than n. See
// [tlog.StoredHashIndex] for the definition of stored hash indexes.
func RightEdge(n int64) []int64 {
	var lo int64
	var idx []int64
	for lo < n {
		k, level := maxpow2(n - lo + 1)
		idx = append(idx, tlog.StoredHashIndex(level, lo>>level))
		lo += k
	}
	return idx
}

// maxpow2 returns k, the maximum power of 2 smaller than n,
// as well as l = log₂ k (so k = 1< newSize {
		return errBadRequest
	}
	knownSize, oldHash, err := w.getLog(origin)
	if err != nil {
		return err
	}
	if knownSize != oldSize {
		return &conflictError{knownSize}
	}
	if oldSize == 0 {
		// This is the first tree head for this log.
		return nil
	}
	if err := tlog.CheckTree(proof, newSize, newHash, oldSize, oldHash); err != nil {
		return errProof
	}
	return nil
}

func (w *Witness) persistTreeHead(origin string, oldSize, newSize int64, newHash tlog.Hash) error {
	// Check oldSize against the database to prevent rolling back on a race.
	// Alternatively, we could use a database transaction which would be cleaner
	// but would encode a critical security semantic in the implicit use of the
	// correct Conn across functions, which is uncomfortable.
	changes, err := w.dbExecWithChanges(`
			UPDATE log SET tree_size = ?, tree_hash = ?
			WHERE origin = ? AND tree_size = ?`,
		nil, newSize, newHash, origin, oldSize)
	if err == nil && changes != 1 {
		knownSize, _, err := w.getLog(origin)
		if err != nil {
			return err
		}
		return &conflictError{knownSize}
	}
	return err
}

func (w *Witness) getLog(origin string) (treeSize int64, treeHash tlog.Hash, err error) {
	found := false
	err = w.dbExec("SELECT tree_size, tree_hash FROM log WHERE origin = ?",
		func(stmt *sqlite.Stmt) error {
			found = true
			treeSize = stmt.GetInt64("tree_size")
			treeHash, err = tlog.ParseHash(stmt.GetText("tree_hash"))
			return nil
		}, origin)
	if err == nil && !found {
		err = errUnknownLog
	}
	return
}

func (w *Witness) getKeys(origin string) (note.Verifiers, error) {
	var keys []string
	err := w.dbExec("SELECT key FROM key WHERE origin = ?",
		func(stmt *sqlite.Stmt) error {
			keys = append(keys, stmt.GetText("key"))
			return nil
		}, origin)
	if err == nil && keys == nil {
		err = errUnknownLog
	}
	if err != nil {
		return nil, err
	}
	var verifiers []note.Verifier
	for _, k := range keys {
		v, err := note.NewVerifier(k)
		if err != nil {
			w.log.Warn("invalid key in database", "key", k, "error", err)
			return nil, fmt.Errorf("invalid key %q: %v", k, err)
		}
		verifiers = append(verifiers, v)
	}
	return note.VerifierList(verifiers...), nil
}

func (w *Witness) dbExec(query string, resultFn func(stmt *sqlite.Stmt) error, args ...interface{}) error {
	w.dmMu.Lock()
	defer w.dmMu.Unlock()
	err := sqlitex.Exec(w.db, query, resultFn, args...)
	if err != nil {
		w.log.Error("database error", "error", err)
	}
	return err
}

func (w *Witness) dbExecWithChanges(query string, resultFn func(stmt *sqlite.Stmt) error, args ...interface{}) (int, error) {
	w.dmMu.Lock()
	defer w.dmMu.Unlock()
	err := sqlitex.Exec(w.db, query, resultFn, args...)
	if err != nil {
		w.log.Error("database error", "error", err)
		return 0, err
	}
	return w.db.Changes(), nil
}
litetlog-0.4.3/internal/witness/witness_test.go000066400000000000000000000112471476426515700217610ustar00rootroot00000000000000package witness

import (
	"crypto/ed25519"
	"encoding/base64"
	"encoding/hex"
	"fmt"
	"log/slog"
	"path/filepath"
	"sync"
	"testing"

	"crawshaw.io/sqlite/sqlitex"
	"golang.org/x/mod/sumdb/note"
	"golang.org/x/mod/sumdb/tlog"
	"sigsum.org/sigsum-go/pkg/merkle"
)

func TestRace(t *testing.T) {
	// gentest seed b4e385f4358f7373cfa9184b176f3cccf808e795baf04092ddfde9461014f0c4
	ss := ed25519.PrivateKey(mustDecodeHex(t,
		"31ffc2116ecbe003acaa800ab70757bd7d53206e3febef6a6d0796d95530b34f"+
			"64848ad8abed6e85981b3b3875b252b8767ebb4b02f703aca3b1e71bbd6a8e50"))
	w, err := NewWitness(":memory:", "example.com", ss, slog.New(testLogHandler(t)))
	fatalIfErr(t, err)
	t.Cleanup(func() { w.Close() })
	pk := mustDecodeHex(t, "ffdc2d4d98e4124d3feaf788c0c2f9abfd796083d1f0495437f302ec79cf100f")
	origin := "sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562"

	treeHash := merkle.HashEmptyTree()
	fatalIfErr(t, sqlitex.Exec(w.db, "INSERT INTO log (origin, tree_size, tree_hash) VALUES (?, 0, ?)",
		nil, origin, base64.StdEncoding.EncodeToString(treeHash[:])))
	k, err := note.NewEd25519VerifierKey(origin, pk[:])
	fatalIfErr(t, err)
	fatalIfErr(t, sqlitex.Exec(w.db, "INSERT INTO key (origin, key) VALUES (?, ?)", nil, origin, k))

	_, err = w.processAddCheckpointRequest([]byte(`old 0

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
1
KgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom7fPZTqpxWWhyjWduBvTvGVqsokMbqTArsQilegKoFBJQjUFAmQ0+YeSPM3wfUQMFSzVnnNuWRTYrajXpNUbIQY=
`))
	fatalIfErr(t, err)

	// Stall the first request updating to the shorter size between getting
	// consistency checked and being committed to the database.
	var firstHalf, secondHalf, final sync.Mutex
	firstHalf.Lock()
	secondHalf.Lock()
	final.Lock()
	w.testingOnlyStallRequest = func() {
		firstHalf.Unlock()
		secondHalf.Lock()
	}
	go func() {
		cosig, err := w.processAddCheckpointRequest([]byte(`old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
KgIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
3
RcCI1Nk56ZcSmIEfIn0SleqtV7uvrlXNccFx595Iwl0=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIom2VbtIcdFbwFAy1n7s6IkAxIY6J/GQOTuZF2ORV39d75cbAj2aQYwyJre36kezNobZs4SUUdrcawfAB8WVrx6go=
`))
		if _, ok := err.(*conflictError); !ok {
			t.Errorf("expected conflict, got %v", err)
		}
		if cosig != nil {
			t.Error("returned a cosignature on conflict")
		}
		final.Unlock()
	}()

	// Wait for testingOnlyStallRequest to fire.
	firstHalf.Lock()

	w.testingOnlyStallRequest = nil
	_, err = w.processAddCheckpointRequest([]byte(`old 1
KgEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
+fUDV+k970B4I3uKrqJM4aP1lloPZP8mvr2Z4wRw2LI=
KgQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=

sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562
5
QrtXrQZCCvpIgsSmOsah7HdICzMLLyDfxToMql9WTjY=

— sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562 UgIomw/EOJmWi0i1FQsOj+etB7F8IccFam/jgd6wzRns4QPVmyEZtdvl1U2KEmLOZ/ASRcWJi0tW90dJWAShei7sDww=
`))
	if err != nil {
		t.Errorf("racing request failed: %v", err)
	}

	// Unblock testingOnlyStallRequest and wait for that request to finish.
	secondHalf.Unlock()
	final.Lock()

	size, hash, err := w.getLog("sigsum.org/v1/tree/4d6d8825a6bb689d459628312889dfbb0bcd41b5211d9e1ce768b0ff0309e562")
	if err != nil {
		t.Fatal(err)
	}
	if size != 5 {
		t.Error("log got rollbacked")
	}
	if hash != mustDecodeHash(t, "42bb57ad06420afa4882c4a63ac6a1ec77480b330b2f20dfc53a0caa5f564e36") {
		t.Error("unexpected tree hash")
	}
}

func testLogHandler(t testing.TB) slog.Handler {
	h := slog.NewTextHandler(writerFunc(func(p []byte) (n int, err error) {
		t.Logf("%s", p)
		return len(p), nil
	}), &slog.HandlerOptions{
		AddSource: true,
		Level:     slog.LevelDebug,
		ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
			if a.Key == slog.SourceKey {
				src := a.Value.Any().(*slog.Source)
				a.Value = slog.StringValue(fmt.Sprintf("%s:%d", filepath.Base(src.File), src.Line))
			}
			return a
		},
	})
	return h
}

type writerFunc func(p []byte) (n int, err error)

func (f writerFunc) Write(p []byte) (n int, err error) {
	return f(p)
}

func mustDecodeHex(t *testing.T, s string) []byte {
	t.Helper()
	b, err := hex.DecodeString(s)
	if err != nil {
		t.Fatal(err)
	}
	return b
}

func mustDecodeHash(t *testing.T, s string) tlog.Hash {
	t.Helper()
	b, err := hex.DecodeString(s)
	if err != nil {
		t.Fatal(err)
	}
	return *(*tlog.Hash)(b)
}

func fatalIfErr(t *testing.T, err error) {
	t.Helper()
	if err != nil {
		t.Fatal(err)
	}
}