Skip to content

Instantly share code, notes, and snippets.

@burke
Last active May 1, 2023 14:14
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save burke/694d504be69998dbe4477f80ffa90951 to your computer and use it in GitHub Desktop.
Save burke/694d504be69998dbe4477f80ffa90951 to your computer and use it in GitHub Desktop.
Code Release for NixCon 2019

The rest of these files are the code referenced in my NixCon 2019 talk which live in Shopify's non-public codebases. Everything in this gist is extracted with minimal or no change from our codebase, and thus some of it references libraries or other code not included here. cli-ui and cli-kit probably comprise a majority of these cases.

All code Copyright Shopify, 2019, released here under MIT License.

#!/bin/sh
set -eu
set -f # disable globbing
export IFS=' '
mkdir -p /opt/dev/var/spool/nix-copy
# see upload_to_cache.rb
echo "${OUT_PATHS}" | while read -r path; do
echo "Spooling $(basename "${path}")"
touch "/opt/dev/var/spool/nix-copy/$(basename "${path}")"
done
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>Label</key>
<string>dev.up.minio</string>
<key>RunAtLoad</key>
<true/>
<key>KeepAlive</key>
<true/>
<key>StandardOutPath</key>
<string>@LOG_FILE@</string>
<key>StandardErrorPath</key>
<string>@LOG_FILE@</string>
<key>EnvironmentVariables</key>
<dict>
<key>MINIO_ACCESS_KEY</key>
<string>@MINIO_ACCESS_KEY@</string>
<key>MINIO_SECRET_KEY</key>
<string>@MINIO_SECRET_KEY@</string>
</dict>
<key>ProgramArguments</key>
<array>
<string>/usr/local/bin/shadowenv</string>
<string>exec</string>
<string>--</string>
<string>minio</string>
<string>gateway</string>
<string>gcs</string>
<string>--address</string>
<string>@ADDRESS@</string>
<string>@GCP_PROJECT@</string>
</array>
<key>WorkingDirectory</key>
<string>@WORKDIR@</string>
</dict>
</plist>
-- gcloud:inject_auth_or_error
-- no arugments
-- no returns
-- sets the Authorization header according to the return of
-- :get_access_token.
-- gcloud:get_access_token
-- no arguments
-- returns:
-- - Google Cloud access token, as a string
-- - error (typically nil or string) indicating failure if non-nil
-- This performs an API request *each* time, trading a `refresh_token` for a
-- new access_token. In the future, this could be optimized to stash the
-- access_token and expiry away somewhere.
local cjson = require("cjson")
local http = require("resty.http")
local rsa = require("resty.rsa")
local user = require("user")
local ngx = require("ngx")
local b64 = require("ngx.base64")
local _M = {} -- module exports
local _U = {} -- local utility functions
-- token, expiry timestamp
local access_token_cache = {nil, 0}
function _M.inject_auth_or_error()
local access_token, err = _M.get_access_token()
if err then
ngx.status = 500
ngx.say("error generating access token: "..err)
return
end
ngx.req.set_header("Authorization", "Bearer "..access_token)
end
function _M.get_access_token()
local now = os.time()
local cached_access_token = access_token_cache
if cached_access_token[2] > now then
ngx.log(ngx.DEBUG, "using cached access_token")
return cached_access_token[1], nil
end
ngx.log(ngx.NOTICE, "fetching new access_token")
local token, expires_in, err = _U.get_access_token_uncached()
access_token_cache = { token, now + expires_in }
return token, err
end
function _U.get_access_token_uncached()
local adc, err = _U.get_application_default_credentials()
if err then
return nil, 0, err
end
if adc.refresh_token then
return _U.access_token_from_authorized_user(adc)
else
return _U.access_token_from_service_account(adc)
end
end
function _U.get_application_default_credentials()
local file = "/Users/"..user.."/.config/gcloud/application_default_credentials.json"
local f, err = io.open(file, "rb")
if err then
return nil, "failed to locate gcloud credentials: "..err
end
local content = f:read("*all")
local success, obj = pcall(cjson.decode, content)
if not success then
return nil, "failed to decode gcloud application default credentials: "..obj
end
return obj, nil
end
function _U.access_token_from_authorized_user(adc)
if not adc.client_id or not adc.client_secret then
return nil, 0, "decoded application_default_credentials.json (which we interpreted as an authorized_user) didn't have the fields we expected"
end
local access_token, expires_in, err = _U.convert_refresh_token_to_access_token(adc.client_id, adc.client_secret, adc.refresh_token)
return access_token, expires_in, err
end
function _U.access_token_from_service_account(adc)
if not adc.client_email or not adc.private_key then
return nil, 0, "decoded application_default_credentials.json (which we interpreted as a service_account) didn't have the fields we expected"
end
local jwt_header_json = '{"alg":"RS256","typ":"JWT"}'
local iat = os.time()
local exp = iat + 3600
local jwt_claim_set = {
iss = adc.client_email,
scope = 'https://www.googleapis.com/auth/devstorage.read_only',
aud = 'https://oauth2.googleapis.com/token',
exp = exp,
iat = iat
}
local jwt_claim_set_json = cjson.encode(jwt_claim_set)
local jwt_header_b64 = b64.encode_base64url(jwt_header_json)
local jwt_claim_set_b64 = b64.encode_base64url(jwt_claim_set_json)
local signature_input = jwt_header_b64.."."..jwt_claim_set_b64
local signature, err = _U.sign(adc.private_key, signature_input)
if err then
return nil, 0, err
end
local jws = signature_input.."."..b64.encode_base64url(signature)
return _U.convert_jws_to_access_token(jws)
end
function _U.sign(key, text)
local priv, err = rsa:new({ private_key = key, algorithm = "SHA256" })
if err then
return nil, err
end
local sig, err = priv:sign(text)
return sig, err
end
function _U.convert_jws_to_access_token(jws)
return _U.post_for_token(
"https://oauth2.googleapis.com/token",
"grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Ajwt-bearer&assertion="..jws
)
end
function _U.convert_refresh_token_to_access_token(client_id, client_secret, refresh_token)
return _U.post_for_token(
"https://accounts.google.com/o/oauth2/token",
"grant_type=refresh_token&refresh_token="..refresh_token.."&client_id="..client_id.."&client_secret="..client_secret
)
end
function _U.post_for_token(url, body)
local httpc = http.new()
local res, err = httpc:request_uri(url, {
method = "POST",
body = body,
headers = {
["Content-Type"] = "application/x-www-form-urlencoded",
},
ssl_verify = true
})
if err then
return nil, 0, err
end
local decoded = cjson.decode(res.body)
if not decoded then
return nil, 0, "failed to decode JSON after asking google for a refreshed access_token"
end
local access_token = decoded.access_token
if not access_token then
return nil, 0, "response from google didn't contain an access token"
end
local expires_in = decoded.expires_in
if not expires_in then
return nil, 0, "response from google didn't contain an expiry"
end
return access_token, expires_in, nil
end
return _M
# This is grosser than you'd think becuase the the binary cache thing requires
# URLs of the format: https://host/nar/somehash.nar.xz
#
# In proxying to GCS, that means we want an object path nar/somehash.nar.xz in
# our bucket, which has to be encoded in the URL as nar%2Fsomehash.nar.xz.
#
# It is surprisingly hard to convince nginx to escape a slash in a URL.
# The relatively obvious strategy of rewriting /nar/foo to /nar%2Ffoo just
# results in the request being sent as /nar%262Ffoo.
#
# The way we resolve this is to just have two highly-redundant location blocks:
# one for /* and one for /nar/*.
server {
listen 127.0.0.42:443 ssl;
server_name nix-cache.up.dev;
error_log /opt/nginx/log/error.log notice;
rewrite_log on;
include ssl.inc;
location /nar {
rewrite_by_lua_block {
local gcloud = require("gcloud")
local access_token, err = gcloud:get_access_token()
if err then
ngx.status = 500
ngx.say("error generating access token: "..err)
return
end
ngx.req.set_header("Authorization", "Bearer "..access_token)
ngx.req.set_uri_args("alt=media")
}
if ($request_uri ~* "([^/]*$)" ) {
set $last_path_component $1;
}
proxy_set_header Host www.googleapis.com;
proxy_ssl_name www.googleapis.com;
proxy_pass https://www.googleapis.com/storage/v1/b/nix-cache/o/nar%2F$last_path_component?alt=media;
}
location / {
rewrite_by_lua_block {
local gcloud = require("gcloud")
local access_token, err = gcloud:get_access_token()
if err then
ngx.status = 500
ngx.say("error generating access token: "..err)
return
end
ngx.req.set_header("Authorization", "Bearer "..access_token)
ngx.req.set_uri_args("alt=media")
}
proxy_set_header Host www.googleapis.com;
proxy_ssl_name www.googleapis.com;
proxy_pass https://www.googleapis.com/storage/v1/b/nix-cache/o/;
}
}
# This server is *mostly* a dumb proxy from http://minio.up.dev to http://127.0.0.42:9000.
# However, in some cases, we actually proxy through to cache.nixos.org.
#
# The way we install packages is that users install stuff using both cache.nixos.org and our private
# cache as substituters when downloading binary cache entries, then when they're done installing
# their profile, we have them upload any new entries to the (private) cache. However, without some
# extra work, this means they'll spend a lot of time downloading entries from the public cache and
# uploading them to the private cache. This is not ideal.
#
# Most of the time, this host proxies directly to our private cache (via a local minio install), but
# when that upstream 404's, we re-proxy the request to https://cache.nixos.org. If *that* request
# 404's as well, we unforutunately have to do some especially gross work to return a 404 that the
# nix process understands, since it insists on parsing XML.
server {
listen 127.0.0.42:80;
server_name minio.up.dev;
ignore_invalid_headers off;
client_max_body_size 5000m;
proxy_buffering off;
location @nixosDotOrgCache {
rewrite /nix-cache/(.*) /$1 break;
# It would be ideal to do the same strategy as below here, but nginx doesn't seem to be willing
# to do two jumps in a row for `error_page 404`:
# error_page 404 = @lookLikeAWS404;
# proxy_intercept_errors on;
# instead, we have to do some real gross stuff with header and body filters.
# The end result here is really just to rewrite 404s to an XML response that satisfies nix.
set $done_first_chunk '';
header_filter_by_lua_block {
if ngx.var.status == "404" then
ngx.header.content_length = nil
end
}
body_filter_by_lua_block {
if ngx.var.status ~= "404" then
return
end
if ngx.var.done_first_chunk == true then
ngx.arg[1] = nil
end
ngx.var.done_first_chunk = true
ngx.arg[1] = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><Error><Code>NoSuchKey</Code><Message>The specified key does not exist.</Message></Error>";
ngx.arg[2] = true
}
proxy_http_version 1.1;
proxy_read_timeout 15m;
proxy_send_timeout 15m;
proxy_request_buffering off;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host cache.nixos.org;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Authorization "";
proxy_set_header x-amz-api-version "";
proxy_set_header x-amz-content-sha256 "";
proxy_set_header x-amz-date "";
proxy_pass https://cache.nixos.org;
}
location / {
error_page 404 = @nixosDotOrgCache;
proxy_intercept_errors on;
proxy_http_version 1.1;
proxy_read_timeout 15m;
proxy_send_timeout 15m;
proxy_request_buffering off;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://127.0.0.42:9000; # minio
}
}
require('dev')
require('set')
require('English')
module Dev
module Nix
module Build
# This class reads build output line-by-line from nix-build (or nix-shell, or whatever),
# updating an internal state indicating progress, and emits a status object to a registered
# callback (which must not mutate it) each time that status changes.
#
# Note that in order to provide better fidelity, we depend on running nix with `-vvvQ`, as this
# is required to get messages when a derivation output *finished* downloading.
#
# Simple usage example:
#
# bop = Dev::Nix::BuildOutputParser.new { |status| puts status.to_s }
# run_nix_and_yield_each_line('-vvvQ') { |line| bop.process(line) }
#
# The basic form of (this hyper-verbose) nix-build output that we parse is:
#
# (...snip...)
# these derivations will be built:
# /nix/store/${A}
# these paths will be fetched (143.56 MiB download, 737.31 MiB unpacked):
# /nix/store/${B}
# /nix/store/${C}
# (...snip...)
# copying path '/nix/store/${B}' from 'https://cache.nixos.org'...
# (...snip...)
# building '/nix/store/${A}'...
# (...snip...)
# copying path '/nix/store/${C}' from 'https://cache.nixos.org'...
# (...snip...)
# substitution of path '/nix/store/${C}' succeeded
# (...snip...)
# substitution of path '/nix/store/${B}' succeeded
# (...snip...)
# builder process for '/nix/store/${A}' finished
# (...snip...)
#
# With this input, our callback would fire at every "(...snip...)" except the first.
#
# -vv logs almost work, but there's no event that lets us detect when a build (as opposed to a
# fetch) has completed. -Q is not strictly necessary but eliminates output generated by
# derivations, more carefully guaranteeing that we won't get confounding inputs.
class OutputParser
extend(Dev::Util::PrivateConstants)
Error = Class.new(StandardError)
private_constants do
STATE_INIT = :init
STATE_LISTING_BUILT = :listing_built
STATE_LISTING_FETCHED = :listing_fetched
STATE_WORKING = :working
STATE_AWAITING_BUILD_FAILURE = :awaiting_build_failure
PAT_WILL_BE_BUILT = %r{^these derivations will be built}
PAT_WILL_BE_FETCHED = %r{^these (derivations|paths) will be fetched \((.*) download, (.*) unpacked\)}
PAT_INDENTED_STORE_PATH = %r{\s+(/nix/store/.*)}
PAT_COPYING = %r{^copying path '(.*?)'}
PAT_BUILDING = %r{^building '(.*?)'\.\.\.}
PAT_FETCH_SUCCESS = %r{^substitution of path '(.*?)' succeeded}
PAT_BUILD_DONE = %r{^builder process for '(.*?)' finished}
PAT_BUILD_FAILED = %r{^builder for '(.*?)' failed}
PAT_FETCH_FAILED = %r{^path '(.*?)' is required, but there is no substituter that can build it}
# user-environment is a sort of special derivation name used by nix-build that doesn't show up
# in the initial preamble listing of entries that will be built or fetched. It only really
# shows up at higher levels of verbosity, so we're just going to ignore it completely.
PAT_USER_ENVIRONMENT = %r{^/nix/store/[a-z0-9]{32}-user-environment(\.drv)?$}
REPROCESS = true
NO_REPROCESS = false
EMIT = true
NO_EMIT = false
end
def initialize(&block)
@state = STATE_INIT
@lineno = 0
@status = Dev::Nix::Build::Status.new
@callback = block
end
def process(line)
@lineno += 1
# On transition, we set @reprocess to trigger the line to be reevaluated in the new state.
@reprocess = true
while @reprocess
@reprocess = false
_process(line)
end
rescue Error, Build::Status::UnallowableTransition => e
error = Dev::Bug.new("error on line #{@lineno}: #{e.message}")
error.set_backtrace([caller[0]] + e.backtrace)
raise(error)
end
private
# rubocop:disable Layout/SpaceBeforeSemicolon,Style/WhenThen,Style/PerlBackrefs,Style/Semicolon
# rubocop:disable Style/IdenticalConditionalBranches,Style/AndOr,Metrics/LineLength
def _process(line)
case @state
when STATE_INIT
# Read a whoooooole bunch of input, then an listing of what will be build and fetched.
case line
when PAT_WILL_BE_BUILT ; transition(STATE_LISTING_BUILT, NO_REPROCESS)
when PAT_WILL_BE_FETCHED ; transition(STATE_LISTING_FETCHED, NO_REPROCESS)
else ; noop
end
when STATE_LISTING_BUILT
# Don't actually emit while we read these in: they all come immediately as a batch, so
# instead, emit a record when we exit this state.
case line
when PAT_WILL_BE_FETCHED ; transition(STATE_LISTING_FETCHED, NO_REPROCESS)
when PAT_INDENTED_STORE_PATH ; record_waiting_build($1) # (defer emit)
else ; transition(STATE_WORKING, REPROCESS) ; emit
end
when STATE_LISTING_FETCHED
# Don't actually emit while we read these in: they all come immediately as a batch, so
# instead, emit a record when we exit this state.
case line
when PAT_WILL_BE_BUILT ; transition(STATE_LISTING_BUILT, NO_REPROCESS)
when PAT_INDENTED_STORE_PATH ; record_waiting_fetch($1) # (defer emit)
else ; transition(STATE_WORKING, REPROCESS) ; emit
end
when STATE_WORKING
# We're reading a very verbose stream of build/fetch output. The odd message will
# indicate the beginning or successful/unsuccessful termination of a build or fetch.
case line
when PAT_COPYING ; record_running_fetch($1) and emit
when PAT_BUILDING ; record_running_build($1) and emit
when PAT_FETCH_SUCCESS ; record_successful_fetch($1) and emit
when PAT_FETCH_FAILED ; record_failed_fetch($1) and emit
when PAT_BUILD_DONE ; await_build_failure($1)
else ; noop
end
when STATE_AWAITING_BUILD_FAILURE
# If the line immediately following a "completed" build indicates a failure, the build
# failed. Otherwise, it succeeded.
case line
when PAT_BUILD_FAILED ; record_failed_build(@await) and emit ; transition(STATE_WORKING, REPROCESS)
else ; record_successful_build(@await) and emit ; transition(STATE_WORKING, REPROCESS)
end
end
end
# rubocop:enable Layout/SpaceBeforeSemicolon,Style/WhenThen,Style/PerlBackrefs,Style/Semicolon
# rubocop:enable Style/IdenticalConditionalBranches,Style/AndOr,Metrics/LineLength
def emit
@callback.call(@status)
end
def transition(next_state, reprocess)
@state = next_state
@reprocess = reprocess
end
def await_build_failure(store_path)
@await = store_path
transition(STATE_AWAITING_BUILD_FAILURE, NO_REPROCESS)
end
def noop
end
def record_waiting_build(store_path)
return unless check_store_path(store_path)
@status.move_build(store_path, to: Status::WAITING)
EMIT
end
def record_waiting_fetch(store_path)
return unless check_store_path(store_path)
@status.move_fetch(store_path, to: Status::WAITING)
EMIT
end
def record_running_fetch(store_path)
return unless check_store_path(store_path)
@status.move_fetch(store_path, to: Status::RUNNING)
EMIT
end
def record_running_build(store_path)
return unless check_store_path(store_path)
@status.move_build(store_path, to: Status::RUNNING)
EMIT
end
def record_successful_fetch(store_path)
return unless check_store_path(store_path)
# The line we use to recognize successful fetches occurs in failed
# ones too, after the failure message.
return(NO_EMIT) if @status.fetch_state(store_path) == Status::FAILED
@status.move_fetch(store_path, to: Status::SUCCEEDED)
EMIT
end
def record_successful_build(store_path)
return unless check_store_path(store_path)
@status.move_build(store_path, to: Status::SUCCEEDED)
EMIT
end
def record_failed_fetch(store_path)
return unless check_store_path(store_path)
# Nix sometimes (optimistically?) tries to fetch things (apparently
# relating to builds) that aren't listed in our list of fetches.
state = @status.fetch_state(store_path)
unless state == Status::WAITING || state == Status::RUNNING
return(NO_EMIT)
end
@status.move_fetch(store_path, to: Status::FAILED)
EMIT
end
def record_failed_build(store_path)
return unless check_store_path(store_path)
@status.move_build(store_path, to: Status::FAILED)
EMIT
end
def check_store_path(store_path)
raise(Error, 'failed to parse store path from line') if store_path.nil?
return(false) if store_path =~ PAT_USER_ENVIRONMENT
true
end
end
end
end
end
require('dev')
module Dev
module Nix
module Build
module OutputUI
extend Dev::Util::PrivateConstants
private_constants do
LOG_FILE = '/tmp/dev-nix-env-failure'
SPINNER_INIT = 'Nix: calculating downloads and builds...'
STATUS = "Nix downloads:{{@widget/status:%d:%d:%d:%d}} builds:{{@widget/status:%d:%d:%d:%d}}"
end
class << self
def call(argv)
@title_set = false
with_spinner do |spinner|
output_parser = new_output_parser(spinner)
log = File.open(LOG_FILE, 'w')
stat = run_nix(argv) do |stderr_line|
log.write(stderr_line)
output_parser.process(stderr_line)
end
log.close
unless @title_set
spinner.update_title(format(STATUS, 0, 0, 0, 0, 0, 0, 0, 0))
end
raise(Dev::Abort, "#{argv.first} failed! #{LOG_FILE} has (much) more detail.") unless stat.success?
end
end
private
def with_spinner
CLI::UI::Spinner.spin(SPINNER_INIT) do |spinner|
yield(spinner)
end
end
def new_output_parser(spinner)
Dev::Nix::Build::OutputParser.new do |s|
@title_set = true
spinner.update_title(format(
STATUS,
s.fetch_succeeded, s.fetch_failed, s.fetch_running, s.fetch_waiting,
s.build_succeeded, s.build_failed, s.build_running, s.build_waiting,
))
end
end
def run_nix(argv)
stdin, _, stderr, wait_thr = Open3.popen3(*argv, out: '/dev/null')
stdin.close
begin
while (stderr_line = stderr.readline)
yield(stderr_line)
end
rescue EOFError
end
wait_thr.value
end
end
end
end
end
end
#!/bin/bash
#
# usage:
# setup-hook-to-shadowenv /nix/store/$something
#
# This file partially emulates <nixpkgs>/pkgs/stdenv/generic/setup.sh.
#
# setup.sh is used to manage a large part of the building process for most
# derivations. One small part of what it is responsible for is evaluating the
# shellHook attribute in derivations. The shellHook is written to
# nix-support/setup-hook in the nix store path for a given package.
#
# A setup-hook for a dependency will often do a bit of environment manipulation
# that we'd like to be able to capture and reproduce in the shadowenv for a
# project.
#
# This file emulates just enough of nixpkgs's setup.sh to capture the
# environment variable modifications and emit them in shadowlisp format without
# implementing any other parts of setup.sh
# Most functions from setup.sh are left unimplemented. Generated by:
# grep "() {" setup.sh
__stub_funcs=(
runHook runOneHook _callImplicitHook _eval stopNest header closeNest echoCmd
exitHandler addToSearchPathWithCustomDelimiter _addRpathPrefix isELF isScript
printLines printWords findInputs activatePackage _activatePkgs _addToEnv
substituteStream consumeEntire substitute substituteInPlace _allFlags
substituteAllStream substituteAll substituteAllInPlace dumpVars stripHash
_defaultUnpack unpackFile unpackPhase patchPhase fixLibtool configurePhase
buildPhase checkPhase installPhase fixupPhase installCheckPhase distPhase
showPhaseHeader genericBuild
)
for __stub_func in "${__stub_funcs[@]}"; do
eval "${__stub_func}() { >&2 echo \"warning: unhandled function ${__stub_func}\"; }"
done
addEnvHooks() {
eval "$2" "${__NIX_STORE_PATH}"
}
addToSearchPath() {
echo "(env/prepend-to-pathlist \"$1\" \"$2\")"
}
addToSearchPathWithCustomDelimiter() {
if [[ "$1" == ":" ]]; then
addToSearchPath "$2" "$3"
else
echo "(let ((val \"$3\"))"
echo " (if (eq (env/get \"$2\") ())"
echo " (env/set \"$2\" val)"
echo " (env/set \"$2\" (concat val \"$1\" (env/get \"$2\")))))"
fi
}
export() {
local IFS name val regex
regex='(.*)\+=(.*)'
if [[ "$1" =~ $regex ]]; then
name=${BASH_REMATCH[1]}
val=${BASH_REMATCH[2]}
echo "(env/set \"${name}\" (concat (env/get \"${name}\") \"${val}\")"
else
IFS="="
read -r name val <<< "$1"
echo "(env/set \"${name}\" \"${val}\")"
fi
}
__NIX_STORE_PATH=$1
# Various things that are set in setup.sh. The values shouldn't be
# super-important, but they may need to exist in some cases.
targetOffset=0
hostOffset=0
depHostOffset=0
depTargetOffset=0
_PATH=$PATH
source "${__NIX_STORE_PATH}/nix-support/setup-hook"
module Dev
module Nix
module Build
class Status
WAITING = :waiting
RUNNING = :running
SUCCEEDED = :succeeded
FAILED = :failed
FETCH = :fetch
BUILD = :build
class UnallowableTransition < StandardError
attr_reader(:store_path, :type, :from, :to)
def initialize(store_path, type, from, to)
@store_path = store_path
@type = type
@from = from
@to = to
super("unallowable state transition #{from}->#{to} for #{type}: #{store_path}")
end
end
# true indicates a transition is allowed; a symbol indicates a
# transition is allowed and the method named after the symbol should be
# invoked as a hook on transition.
#
# rubocop:disable Layout/AlignHash
ALLOWABLE_TRANSITIONS = {
nil => { WAITING => :noop, RUNNING => :on_start },
WAITING => { RUNNING => :on_start },
RUNNING => { SUCCEEDED => :on_succeed, FAILED => :on_fail },
SUCCEEDED => {},
FAILED => {},
}
# rubocop:enable Layout/AlignHash
def initialize
@fetches = {}
@builds = {}
@start_times = {}
@fsize = { WAITING => 0, RUNNING => 0, SUCCEEDED => 0, FAILED => 0 }
@bsize = { WAITING => 0, RUNNING => 0, SUCCEEDED => 0, FAILED => 0 }
end
def move_fetch(store_path, to:)
from = @fetches[store_path] # can be nil; that's what we want.
unless (hook = ALLOWABLE_TRANSITIONS[from][to])
raise(UnallowableTransition.new(store_path, FETCH, from, to))
end
@fetches[store_path] = to
@fsize[from] -= 1 unless from.nil?
@fsize[to] += 1
send(hook, FETCH, store_path)
end
def move_build(store_path, to:)
from = @builds[store_path] # nil when first registering (nil->WAITING)
unless (hook = ALLOWABLE_TRANSITIONS[from][to])
raise(UnallowableTransition.new(store_path, BUILD, from, to))
end
@builds[store_path] = to
@bsize[from] -= 1 unless from.nil? # when entering WAITING, nothing to decrement.
@bsize[to] += 1
send(hook, BUILD, store_path)
end
def fetch_state(store_path)
@fetches[store_path]
end
def build_state(store_path)
@builds[store_path]
end
def to_s
"Build:[#{build_succeeded},#{build_failed}<#{build_running}<#{build_waiting}];" \
"Fetch:[#{fetch_succeeded},#{fetch_failed}<#{fetch_running}<#{fetch_waiting}]"
end
# rubocop:disable Style/SingleLineMethods,Layout/SpaceBeforeSemicolon,Layout/EmptyLineBetweenDefs
def fetch_succeeded ; @fsize.fetch(SUCCEEDED) end
def fetch_failed ; @fsize.fetch(FAILED) end
def fetch_running ; @fsize.fetch(RUNNING) end
def fetch_waiting ; @fsize.fetch(WAITING) end
def build_succeeded ; @bsize.fetch(SUCCEEDED) end
def build_failed ; @bsize.fetch(FAILED) end
def build_running ; @bsize.fetch(RUNNING) end
def build_waiting ; @bsize.fetch(WAITING) end
# rubocop:enable Style/SingleLineMethods,Layout/SpaceBeforeSemicolon,Layout/EmptyLineBetweenDefs
private
def noop(_, _)
end
def on_start(_, store_path)
@start_times[store_path] = time_now_utc
end
def on_succeed(type, store_path)
finalize(type, store_path, true)
end
def on_fail(type, store_path)
finalize(type, store_path, false)
end
def finalize(type, store_path, is_success)
Dev::Monorail.log.nix_install(
store_path: store_path,
is_build: type == BUILD,
is_success: is_success,
start_time: @start_times.fetch(store_path),
end_time: time_now_utc,
)
end
def time_now_utc
Time.now.utc
end
end
end
end
end
# frozen_string_literal: true
require('dev')
module Dev
module Nix
module Periodic
module UploadToCache
Error = Class.new(StandardError)
NIX = 'nix'
NIX_COPY_TARGET = 's3://nix-cache?profile=minio-dev&endpoint=minio.up.dev&scheme=http'
KEY_FILE = Dev::Tasks::Prereq::NixConfig::SIGNING_KEY_LOCAL_PATH
def self.call(_message, ctx: Dev::Context.new)
return unless Dev::Daemon::RequireBinary.has?(NIX)
return unless Dev::Helpers::Internet.connected?
while (entry = Dev::Nix::CopySpooler.next)
puts("[UploadToCache] unspooled #{entry}")
upload(ctx, "/nix/store/#{entry}")
puts("[UploadToCache] uploaded #{entry}")
end
end
def self.upload(ctx, path)
oe, stat = ctx.capture2e(NIX, 'sign-paths', '--key-file', KEY_FILE, path)
unless stat.success?
puts(oe.gsub(/^/m, "\t"))
Dev::Util.report(Error, "nix sign-paths failed; log written to daemon log")
end
oe, stat = ctx.capture2e(NIX, 'copy', '--to', NIX_COPY_TARGET, path)
unless stat.success?
puts(oe.gsub(/^/m, "\t"))
Dev::Util.report(Error, "nix copy failed; log written to daemon log")
end
end
end
end
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment