Skip to content

Instantly share code, notes, and snippets.

@brson
Last active August 29, 2015 14:09
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save brson/f699bf0d1b6076dca433 to your computer and use it in GitHub Desktop.
Save brson/f699bf0d1b6076dca433 to your computer and use it in GitHub Desktop.
# -*- python -*-
# ex: set syntax=python:
# This is rust-buildbot's buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory.
# You will probably need to read the buildbot manual at some length to
# understand what's going on in here. A quick summary follows:
#
# - ChangeSoruces (GitPoller) run server-side polling git for changes
# - Schedulers (AnyBranchScheduler) trigger builds
# - Builders represent work-queues that Schedulers dump BuildRequests into
# - BuildRequests cause a BuildFactory to make Builds
# - Builds get dispatched to Slaves
# - The slave runs the sequence of Steps in the Build
#
# To customize the behavior of a _Step_, we pass _Properties_.
#
# A property is a k=v pair attached to a BuildRequest passing through
# the system. It can be overridden at each stage of processing, but
# since we want to give forced-builds (from users) a fair amount of
# flexibility, we try to set our default properties early (in
# Schedulers).
#
# Properties can be set by users, by schedulers, by builders, by
# slaves, and by buildsteps themselves in response to their
# environment.
#
# We often want a mixture of such configuration and control, so we use
# properties for everything. Any steps in the BuildFactory that we
# expect to vary (aside from the branch being served), we parameterize
# through properties and adjust the commands issued in the steps
# themselves through IRenderables, doStepIf, and similar
# property-driven customization. Note that this means most variability
# can change _request by request_; if you wire-in variability when
# setting up the builder, you'll be stuck always doing the same thing
# in that builder.
import re
from buildbot.process.buildstep import BuildStep, SUCCESS, FAILURE
from buildbot.status.logfile import STDOUT
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
# from buildbot import manhole
# c['manhole'] = manhole.AuthorizedKeysManhole("tcp:1234:interface=127.0.0.1", "~/.ssh/authorized_keys")
c['changeHorizon'] = 200
c['buildHorizon'] = 500
c['eventHorizon'] = 50
c['logHorizon'] = 500
c['caches'] = {
'Changes' : 1000,
'Builds' : 500,
'chdicts' : 1000,
'BuildRequests' : 100,
'SourceStamps' : 200,
'ssdicts' : 200,
'objectids' : 100,
'usdicts' : 1000,
}
c['logCompressionLimit'] = 16384
c['logCompressionMethod'] = 'gz'
c['logMaxSize'] = 1024*1024*10 # 10M
c['logMaxTailSize'] = 32768
WORKDIR = "build"
BUILD_WORKDIR = "build/obj"
####### Site-specific configuration
keypair_name = 'buildbot-west-slave-key'
security_name = None
# 1 hour should be smaller than the diff between fastest and slowest build,
# to prevent slaves from needing to be restarted while bors's queue is full
build_wait_timeout = (60*60)
region = 'us-west-1'
master_config = { }
for line in open("master.cfg.txt"):
fields = line.split()
if len(fields) >= 2:
k = fields.pop(0)
v = fields.pop(0)
master_config[k] = v
env = master_config['env']
master_addy = master_config['master_addy']
git_source = master_config['git_source']
cargo_source = master_config['cargo_source']
s3_addy = master_config['s3_addy']
s3_cargo_addy = master_config['s3_cargo_addy']
all_branches = ["auto", "master", "try",
"snap-stage3", "dist-snap"]
# Production configuration
auto_platforms_prod = ["mac-32-opt",
#"mac-32-nopt-c", FIXME #7221 can't fit metedata sections
#"mac-32-nopt-t",
"mac-64-opt",
"mac-64-nopt-c",
"mac-64-nopt-t",
# "mac-64-opt-vg",
# "mac-all-opt",
"linux-32-opt",
"linux-32-nopt-c",
"linux-32-nopt-t",
"linux-64-opt",
"linux-64-nopt-c",
"linux-64-nopt-t",
# shard linux valgrind tests 5 ways
# "linux-64-opt-vg-0.5",
# "linux-64-opt-vg-1.5",
# "linux-64-opt-vg-2.5",
# "linux-64-opt-vg-3.5",
# "linux-64-opt-vg-4.5",
# "linux-64-opt-vg",
# "linux-all-opt",
"linux-64-x-android-t",
"win-32-opt",
"win-32-nopt-c",
"win-32-nopt-t",
"win-64-opt",
"win-64-nopt-c",
"win-64-nopt-t",
"bsd-64-opt",
]
try_platforms_prod = ["linux", "win-32", "win-64", "bsd", "mac"]
snap_platforms_prod = ["linux", "win-32", "win-64", "bsd", "mac"]
dist_platforms_prod = ["linux", "win-32", "win-64", "mac"]
cargo_platforms_prod = ["linux-32", "linux-64", "mac-32", "mac-64", "win-32", "win-64"]
# Development configuration
auto_platforms_dev = [#"mac-32-opt",
#"mac-32-nopt-c", FIXME #7221 can't fit metedata sections
#"mac-32-nopt-t",
#"mac-64-opt",
#"mac-64-nopt-c",
#"mac-64-nopt-t",
# "mac-64-opt-vg",
# "mac-all-opt",
"linux-32-opt",
"linux-32-nopt-c",
"linux-32-nopt-t",
"linux-64-opt",
"linux-64-nopt-c",
"linux-64-nopt-t",
# shard linux valgrind tests 5 ways
# "linux-64-opt-vg-0.5",
# "linux-64-opt-vg-1.5",
# "linux-64-opt-vg-2.5",
# "linux-64-opt-vg-3.5",
# "linux-64-opt-vg-4.5",
# "linux-64-opt-vg",
# "linux-all-opt",
"linux-64-x-android-t",
"win-32-opt",
"win-32-nopt-c",
"win-32-nopt-t",
"win-64-opt",
"win-64-nopt-c",
"win-64-nopt-t",
"bsd-64-opt",
]
try_platforms_dev = ["linux", "win-32", "win-64", "bsd"]#, "mac"]
snap_platforms_dev = ["linux", "win-32", "win-64", "bsd"]#, "mac"]
dist_platforms_dev = ["linux", "win-32", "win-64"]#, "mac"]
cargo_platforms_dev = ["linux-32", "linux-64", "win-32", "win-64"]
if env == "prod":
auto_platforms = auto_platforms_prod
try_platforms = try_platforms_prod
snap_platforms = snap_platforms_prod
dist_platforms = dist_platforms_prod
cargo_platforms = cargo_platforms_prod
else:
auto_platforms = auto_platforms_dev
try_platforms = try_platforms_dev
snap_platforms = snap_platforms_dev
dist_platforms = dist_platforms_dev
cargo_platforms = cargo_platforms_dev
# auto-platforms that won't cause other's to fail (these don't gate bors)
nogate_builders = ["auto-bsd-64-opt"]
####### BUILDSLAVES
# Configuration of --host and --target triples based on the above platform names
def all_platform_hosts(platform):
if "mac" in platform:
return ["i686-apple-darwin", "x86_64-apple-darwin"]
elif "linux-64-x-android" in platform:
return ["i686-unknown-linux-gnu", "x86_64-unknown-linux-gnu"]
elif "linux" in platform:
return ["i686-unknown-linux-gnu", "x86_64-unknown-linux-gnu"]
elif "bsd" in platform:
return ["x86_64-unknown-freebsd"]
elif "win-32" in platform:
return ["i686-pc-windows-gnu"]
elif "win-64" in platform:
return ["x86_64-pc-windows-gnu"]
else:
return None
def auto_platform_host(p):
if "-all" in p:
return "all"
else:
return [auto_platform_triple(p)]
def all_platform_target(platform):
return all_platform_hosts(platform)
def auto_platform_target(p):
if "-all" in p:
return "all"
else:
return [auto_platform_triple(p)]
def auto_platform_triple(p):
if "mac" in p:
if "-32" in p:
return "i686-apple-darwin"
else:
return "x86_64-apple-darwin"
if "linux" in p:
if "-32" in p:
return "i686-unknown-linux-gnu"
else:
return "x86_64-unknown-linux-gnu"
if "win" in p:
if "-32" in p:
return "i686-pc-windows-gnu"
else:
return "x86_64-pc-windows-gnu"
if "bsd" in p:
return "x86_64-unknown-freebsd"
####### BUILDSLAVES
# The 'slaves' list defines the set of recognized buildslaves. Each element is
# a BuildSlave object, specifying a unique slave name and password. The same
# slave name and password must be configured on the slave.
from buildbot.buildslave import BuildSlave
from buildbot.buildslave.ec2 import EC2LatentBuildSlave
snap_slaves = []
dist_slaves = []
auto_slaves = []
c['slaves'] = []
for line in open("slave-list.txt"):
if line.startswith("#"):
continue
fields = line.split()
if len(fields) >= 2:
name = fields.pop(0)
pw = fields.pop(0)
ext = {'max_builds':1,
'instance_type':'m3.xlarge',
}
for kv in fields:
(k,v) = kv.split('=')
ext[k] = v
if 'ami' in ext:
slave = EC2LatentBuildSlave(name, pw, ext['instance_type'],
ami=ext['ami'],
#elastic_ip=ext['elastic_ip'],
user_data="%s %s %s" % (name, pw, master_addy),
region=region,
#subnet_id=subnet_id,
keypair_name=keypair_name,
#security_name=security_name,
security_name="rust-non-vpc-slave",
build_wait_timeout=build_wait_timeout,
# notify_on_missing=['admin@rust-lang.org'],
max_builds=int(ext['max_builds']),
tags = { 'Name': env + "-slave-" + name })
else:
slave = BuildSlave(name, pw, max_builds=int(ext['max_builds']))
if 'snap' in ext:
snap_slaves.append(slave)
if 'dist' in ext:
dist_slaves.append(slave)
# "special" slaves are those we are _not_ putting in the auto pool.
if 'special' not in ext:
auto_slaves.append(slave)
c['slaves'].append(slave)
# We listen for slaves only on localhost; there should be an stunnel
# loopback forwarding to here.
c['slavePortnum'] = "tcp:9989:interface=127.0.0.1"
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes.
from buildbot.changes.gitpoller import GitPoller
main_sources = [GitPoller(git_source,
workdir='gitpoller-workdir',
branches=all_branches,
pollinterval=60),
GitPoller(cargo_source,
workdir='gitpoller-workdir',
branches=["master", "auto-cargo"],
pollinterval=60)]
c['change_source'] = main_sources
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes.
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.forcesched import *
from buildbot.schedulers.timed import Nightly
from buildbot.schedulers.triggerable import Triggerable
from buildbot.changes import filter
try_sched = SingleBranchScheduler(
name="try-sched",
change_filter=filter.ChangeFilter(branch='try'),
treeStableTimer=5,
builderNames=["try-" + p for p in try_platforms])
auto_sched = SingleBranchScheduler(
name="auto-sched",
change_filter=filter.ChangeFilter(#filter_fn=(lambda c: "bors" in c.who),
branch='auto'),
treeStableTimer=60,
builderNames=["auto-" + p for p in auto_platforms])
snap_sched = SingleBranchScheduler(
name="snap3-sched",
change_filter=filter.ChangeFilter(branch='snap-stage3'),
treeStableTimer=5,
builderNames=["snap3-" + p for p in snap_platforms])
dist_sched = SingleBranchScheduler(
name="dist-sched",
change_filter=filter.ChangeFilter(branch='dist-snap'),
treeStableTimer=5,
builderNames=["dist2-" + p for p in dist_platforms])
nightly_trigger_sched = Nightly(
name="nightly-trigger",
branch="master",
builderNames=["nightly-trigger"],
hour=3,
minute=0
)
nightly_sched = Triggerable(
name="nightly",
builderNames=["nightly-" + p for p in dist_platforms],
)
nightly_cargo_trigger_sched = Nightly(
name="nightly-cargo-trigger",
branch="master",
builderNames=["nightly-cargo-trigger"],
hour=2,
minute=0
)
nightly_cargo_builders = []
for p in dist_platforms:
for host in all_platform_hosts(p):
bits = "32" if "i686" in host else "64"
nightly_cargo_builders.append("nightly-cargo-" + p + "-" + bits)
nightly_cargo_sched = Triggerable(
name="nightly-cargo",
builderNames=nightly_cargo_builders,
)
cargo_sched = SingleBranchScheduler(
name="cargo-sched",
change_filter=filter.ChangeFilter(#filter_fn=(lambda c: "bors" in c.who),
branch='auto-cargo'),
treeStableTimer=60,
builderNames=["cargo-" + p for p in cargo_platforms])
force_sched = ForceScheduler(
name="force-sched",
builderNames=["try-" + p for p in try_platforms]
+ ["auto-" + p for p in auto_platforms]
+ ["snap3-" + p for p in snap_platforms]
+ ["dist2-" + p for p in dist_platforms]
+ ["nightly-" + p for p in dist_platforms]
+ nightly_cargo_builders
+ ["cargo-" + p for p in cargo_platforms]
+ ["nightly-trigger"]
+ ["dist2-trigger"]
+ ["nightly-cargo-trigger"],
reason=StringParameter(name="reason", label="reason:", default="force build",
required=False, size=10),
branch=StringParameter(name="branch", label="branch:",
required=True, size=10),
revision=StringParameter(name="revision", label="revision:",
required=False, size=10),
# will generate nothing in the form, but revision, repository,
# and project are needed by buildbot scheduling system so we
# need to pass a value ("")
#revision=FixedParameter(name="revision", default=""),
repository=FixedParameter(name="repository", default=""),
project=FixedParameter(name="project", default="")
)
c['schedulers'] = [
try_sched,
auto_sched,
snap_sched,
dist_sched,
nightly_sched,
nightly_trigger_sched,
nightly_cargo_sched,
nightly_cargo_trigger_sched,
cargo_sched,
force_sched
]
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which slaves can execute them. Note that any particular build will
# only take place on one slave.
from buildbot.process.factory import BuildFactory
from buildbot.process.properties import WithProperties, Property
from buildbot.steps.source.git import Git
from buildbot.status.results import SUCCESS, WARNINGS, FAILURE, SKIPPED, \
EXCEPTION, RETRY, worst_status
from buildbot.steps.shell import ShellCommand, Configure, Compile, Test, SetPropertyFromCommand
from buildbot.steps.transfer import FileUpload, DirectoryUpload
from buildbot.steps.master import MasterShellCommand
from buildbot.steps.slave import RemoveDirectory
from buildbot.steps.trigger import Trigger
from buildbot.config import BuilderConfig
from buildbot.interfaces import IRenderable
from zope.interface import implements
import re, os
def props_has_negative_key(props, keyname):
if keyname in props:
if str(props[keyname]).lower() in ["no", "false", "0"]:
return True
return False
# property-based doStepIf helper
def should_wipe(step):
# Wipe isn't working right now
return False
props = step.build.getProperties()
if props.has_key("wipe"):
return True
if props.has_key("freshconfig"):
return props["freshconfig"] == "0"
return False
# property-based doStepIf helper
def should_clean_llvm(step):
props = step.build.getProperties()
if props.has_key("clean-llvm"):
return True
# property-based doStepIf helper
def should_check(step):
props = step.build.getProperties()
return not props_has_negative_key(props, "check")
# property-based IRenderable helper
class MakeCommand(object):
implements(IRenderable)
def getRenderingFor(self, props):
if "buildername" in props:
if "bsd" in props["buildername"]:
return "gmake"
return "make"
# property-based IRenderable helper
class CheckCommand(object):
implements(IRenderable)
def getRenderingFor(self, props):
mk = MakeCommand().getRenderingFor(props)
chk = "check"
if "check" in props:
chk = str(props["check"])
if "buildername" in props and \
"bsd" in props["buildername"] and \
chk in ["lite", "check-lite", "check",
"yes", "True"]:
chk = "check-lite"
if re.search("^check(-[a-zA-Z0-9-_\.]+)?$", chk):
return [mk, chk]
return [mk, "check"]
# property-based IRenderable helper
class TestCommand(object):
implements(IRenderable)
def getRenderingFor(self, props):
mk = MakeCommand().getRenderingFor(props)
return [mk, "test"]
# property-based IRenderable helper
class CommandEnv(object):
implements(IRenderable)
def getRenderingFor(self, props):
env = {"RUST_LOG": "rustc=1"}
if "buildername" in props:
# FIXME: Want to upgrade to 4.9.1 but cargo build fails
#win32toolchain = r"c:\program files (x86)\mingw-w64\i686-4.9.1-win32-dwarf-rt_v3-rev1\mingw32\bin"
win32toolchain = r"c:\program files (x86)\mingw-w64\i686-4.8.1-win32-dwarf-rt_v3-rev2\mingw32\bin"
win64toolchain = r"c:\program files\mingw-w64\x86_64-4.9.1-win32-seh-rt_v3-rev1\mingw64\bin"
# This is required to trigger certain workarounds done
# slave-side by buildbot. In particular omitting the PWD
# variable with an unmangled pathname.
if "win-32" in props["buildername"]:
env["MACHTYPE"] = "i686-pc-msys"
env["MSYSTEM"] = "MINGW32"
env["PATH"] = win32toolchain + ";c:\\msys64\\usr\\bin;${PATH}"
if "win-64" in props["buildername"]:
env["MACHTYPE"] = "x86_64-pc-msys"
env["MSYSTEM"] = "MINGW64"
env["PATH"] = win64toolchain + ";c:\\msys64\\usr\\bin;${PATH}"
if "valgrind" in props and props["valgrind"] == True:
env["RUST_THREADS"]="1"
env["RUST_RT_TEST_THREADS"]="1"
if "verbose" in props and props["verbose"] == True:
env["VERBOSE"]="1"
if "shard" in props and props["shard"] != None:
env["TEST_SHARD"] = props["shard"]
#if "android" in props and props["android"] == True:
if "buildername" in props and "android" in props["buildername"]:
path = os.environ["PATH"]
path += ":/home/rustbuild/android-sdk-linux/platform-tools/"
path += ":/home/rustbuild/android-sdk-linux/tools/"
env["PATH"] = path
env["NO_BENCH"] = "1"
elif "buildername" in props and "linux" in props["buildername"]:
path = os.environ["PATH"]
path = "/home/rustbuild/gcc-4.7.4/bin:" + path
path = "/home/rustbuild/root64/bin:" + path
env["PATH"] = path
ld_path = "/home/rustbuild/gcc-4.7.4/lib"
ld_path = "/home/rustbuild/gcc-4.7.4/lib64:" + ld_path
env["LD_LIBRARY_PATH"] = ld_path
if "buildername" in props and "bsd" in props["buildername"]:
env["CXX"] = "clang++"
if "cargo" in props:
env["PLATFORM"] = props["platform"]
env["BITS"] = props["bits"]
env["HOME"] = "${PWD}/slave/cargo-" + props["platform"] + \
"-" + props["bits"] + "/build"
# if "cargo-nightly" in props and "platform" in props and \
# "linux" in props["platform"]:
# root = "/home/rustbuild/root" + props["bits"]
# env["CMAKE_PREFIX_PATH"] = root
# env["PKG_CONFIG_PATH"] = root + "/lib/pkgconfig"
# env["CFLAGS"] = "-I" + root + "/include " + \
# "-L" + root + "/lib"
return env
# property-based IRenderable helper
class ConfigCommand(object):
implements(IRenderable)
def getRenderingFor(self, props):
opts=["sharedstd", "valgrind", "helgrind",
"docs", "optimize", "optimize-tests",
"optimize-cxx", "optimize-llvm",
"debug", "pax-flags", "clang",
"inject-std-version", "llvm-static-stdcpp"]
if "cargo" in props:
s = "./configure --local-rust-root=$PWD/rustc"
else:
s = "../configure"
for opt in opts:
if opt in props:
if props_has_negative_key(props, opt):
s += " --disable-" + opt
else:
s += " --enable-" + opt
testing_android = "android" in props and props["android"] == True
# Set up the path to the android NDK
if testing_android:
s += " --android-cross-path=~/ndk_standalone"
s += " --target=arm-linux-androideabi"
s += " --disable-docs"
if "release-channel" in props:
s += " --release-channel=" + props["release-channel"]
if props["release-channel"] == "nightly":
# Our release builders build multiple hosts at once.
# This tells them to only include a single target in each artifact.
s += " --enable-dist-host-only"
if props["release-channel"] == "stable":
s += " --enable-dist-host-only"
# Cargo is still using this, but Rust is using the "release-channel" prop
if "nightly" in props:
s += " --enable-nightly"
if "build" in props:
if re.search("^[a-zA-Z0-9-_\.]+$", props["build"]):
s += " --build=" + props["build"]
if "hosts" in props:
if props["hosts"] == "all" and "platform" in props:
hosts = all_platform_hosts(props["platform"])
else:
hosts = [t for t in props["hosts"] \
if re.search("^[a-zA-Z0-9-_\.]+$", str(t))]
if hosts:
s += " --host=" + ",".join(hosts)
# --target is configured specially for android
if "targets" in props and not testing_android:
if props["targets"] == "all" and "platform" in props:
targets = all_platform_targets(props["platform"])
else:
targets = [t for t in props["targets"] \
if re.search("^[a-zA-Z0-9-_\.]+$", str(t))]
if targets:
s += " --target=" + ",".join(targets)
# This works around a bug (that I don't know the cause of) where
# $PWD is set to a windows-style path, causing Rust's configure script
# to see the source dir as a windows-style path, then using that path
# to call LLVM's configure script, which pukes on the windows-style
# value of $0 by failing to generate AsmPrinters. Calling cd -P
# sets $PWD properly.
if "buildername" in props and "win" in props["buildername"]:
s = "(cd -P . && " + s + ")"
return ['sh', '-c', s]
# Checks whether all dist artifacts for all platforms have been uploaded from
# the slaves the buildmaster and have matching commit ids
class DistSync(BuildStep):
stagingDir = None
platforms = []
def __init__(self, stagingDir=None, platforms=[], **kwargs):
BuildStep.__init__(self, **kwargs)
self.stagingDir = stagingDir
self.platforms = platforms
def start(self):
self.log = self.addLog('log')
if self.all_dist_builds_done():
self.finished(SUCCESS)
else:
self.finished(FAILURE)
self.step_status.setText(self.describe(done=True))
self.log.finish()
def all_dist_builds_done(self):
self.log.addEntry(STDOUT, 'checking commit ids of builds\n')
all_done = True
consensus_commit_id = None
for p in self.platforms:
plat_dir = self.stagingDir + "/" + p
self.log.addEntry(STDOUT, 'checking ' + plat_dir + '\n')
commit_id_file = plat_dir + "/commit-id"
try:
with open(commit_id_file, 'r') as f:
commit_id = f.read()
self.log.addEntry(STDOUT, 'commit-id: ' + commit_id + '\n')
if consensus_commit_id:
all_done = all_done and commit_id == consensus_commit_id
else:
consensus_commit_id = commit_id
except IOError:
self.log.addEntry(STDOUT, 'no commit-id\n')
all_done = False
return all_done
# Run a function on master and sets a property to the return value
class SetPropertyFromFn(BuildStep):
property = None
extract_fn = None
def __init__(self, property=None, extract_fn=None, **kwargs):
BuildStep.__init__(self, **kwargs)
self.property = property
self.extract_fn = extract_fn
def start(self):
self.log = self.addLog('log')
prop_val = self.extract_fn()
properties = self.build.getProperties()
properties.setProperty(self.property, prop_val, 'SetPropertyFromFn', runtime=True)
self.step_status.setText(self.describe(done=True))
self.log.finish()
self.finished(SUCCESS)
def grab_slave_filename(rc, out, err):
for line in out.split('\n'):
if re.search("^[a-zA-Z0-9-_\.]+$", line):
return {"slave_filename": line}
return {"slave_filename": None}
def basic_buildfactory():
global git_source
f = BuildFactory()
# We wipe if the user requests it or if we can't find a config.stamp
# that's been touched in the past 2 days -- suggests config is failing
findcmd = "/usr/bin/find . -maxdepth 2 -name config.stamp -ctime -2 | wc -l"
f.addStep(SetPropertyFromCommand(command=["sh", "-c", findcmd],
property="freshconfig",
workdir=WORKDIR))
f.addStep(RemoveDirectory(WORKDIR,
doStepIf=should_wipe))
f.addStep(Git(repourl=git_source,
progress=True,
#clobberOnFailure=True,
retry=(5, 2), # Combat the flakiness. 2 retries, 5 seconds
retryFetch=True, # Combat the flakiness
mode='incremental',
workdir=WORKDIR))
return f
def checkout_and_configure_buildfactory(checking_android):
f = basic_buildfactory()
if checking_android:
# Restart Android emu and wait for it
f.addStep(ShellCommand(env=CommandEnv(),command=["killall", "-9", "emulator64-arm"],
flunkOnFailure=False))
f.addStep(ShellCommand(env=CommandEnv(),command=["bash", "-c", "nohup nohup emulator @test2 -no-window -partition-size 2047 0<&- &>/dev/null &"]))
f.addStep(ShellCommand(env=CommandEnv(),command=["adb", "wait-for-device"],
flunkOnFailure=False))
f.addStep(Configure(env=CommandEnv(),
haltOnFailure=True,
flunkOnFailure=True,
command=ConfigCommand(),
interruptSignal="TERM",
workdir=BUILD_WORKDIR))
return f
def just_tidy_buildfactory():
f = checkout_and_configure_buildfactory(False)
f.addStep(Compile(env=CommandEnv(),
name="tidy",
description="make tidy",
descriptionDone="tidy",
interruptSignal="TERM",
workdir=BUILD_WORKDIR,
command=[MakeCommand(), "tidy"]))
return f
def make_and_check_buildfactory(check, android, windows, parallel):
upload_benches = check != False
upload_stability = not android
checking_android = android == True and check != False
f = checkout_and_configure_buildfactory(checking_android)
# Temporary until we convince the build system
# to handle 0.x 0.x+1 co-occurrence in workspace
f.addStep(Compile(env=CommandEnv(),
name="clean",
description="make clean",
descriptionDone="cleaned",
workdir=BUILD_WORKDIR,
command=[MakeCommand(), "clean"]))
f.addStep(Compile(env=CommandEnv(),
name="clean-llvm",
doStepIf=should_clean_llvm,
description="make clean-llvm",
descriptionDone="cleaned llvm",
interruptSignal="TERM",
workdir=BUILD_WORKDIR,
command=[MakeCommand(), "clean-llvm"]))
# msys make is pretty buggy with paralellization
if windows or not parallel:
command = [MakeCommand()]
else:
command = [MakeCommand(), "-j2"]
f.addStep(Compile(env=CommandEnv(),
haltOnFailure=True,
flunkOnFailure=True,
timeout=3600,
interruptSignal="TERM",
command=command,
workdir=BUILD_WORKDIR))
f.addStep(Test(env=CommandEnv(),
doStepIf=check != False,
haltOnFailure=True,
flunkOnFailure=True,
interruptSignal="TERM",
command=CheckCommand(),
timeout=3600,
workdir=BUILD_WORKDIR))
# Upload stability metrics
if upload_stability:
master_dir = "tmp/stab-metrics/%(buildername)s/%(slavename)s"
s3_dir = "stab-metrics/%(got_revision)s/%(buildername)s/%(slavename)s"
# This fails on the windows bot occasionally because of locking issues
# and is not critical so don't halt/flunk
f.addStep(DirectoryUpload(name="stability slave upload",
slavesrc="doc",
masterdest=WithProperties(master_dir),
workdir=BUILD_WORKDIR,
haltOnFailure=False,
warnOnFailure=True,
flunkOnFailure=False))
tar_cmd = WithProperties("cd " + master_dir + " && " +
"tar czf stab.tar.gz */*.json")
f.addStep(MasterShellCommand(name="stability tar",
command=["sh", "-c", tar_cmd],
haltOnFailure=True,
warnOnFailure=True,
flunkOnFailure=False))
s3cmd = WithProperties("s3cmd put -P -r " + master_dir + "/stab.tar.gz"
+ " " + s3_addy + "/" + s3_dir + "/")
f.addStep(MasterShellCommand(name="stability s3 upload",
command=["sh", "-c", s3cmd],
haltOnFailure=True,
warnOnFailure=True,
flunkOnFailure=False))
f.addStep(MasterShellCommand(name="stability rm",
command=["rm", "-Rf",
WithProperties(master_dir)],
haltOnFailure=True,
warnOnFailure=True,
flunkOnFailure=False))
if upload_benches:
master_dir = "tmp/build-metrics/%(buildername)s/%(slavename)s"
s3_dir = "build-metrics/%(got_revision)s/%(buildername)s/%(slavename)s"
# This fails on the windows bot occasionally because of locking issues
# and is not critical so don't halt/flunk
f.addStep(DirectoryUpload(name="bench slave upload",
slavesrc="tmp",
masterdest=WithProperties(master_dir),
workdir=BUILD_WORKDIR,
haltOnFailure=False,
warnOnFailure=True,
flunkOnFailure=False))
tar_cmd = WithProperties("cd " + master_dir + " && " +
"tar czf bench.tar.gz *.json")
f.addStep(MasterShellCommand(name="bench tar",
command=["sh", "-c", tar_cmd],
haltOnFailure=True,
warnOnFailure=True,
flunkOnFailure=False))
s3cmd = WithProperties("s3cmd put -P -r " + master_dir + "/bench.tar.gz"
+ " " + s3_addy + "/" + s3_dir + "/")
f.addStep(MasterShellCommand(name="bench s3 upload",
command=["sh", "-c", s3cmd],
haltOnFailure=True,
warnOnFailure=True,
flunkOnFailure=False))
f.addStep(MasterShellCommand(name="bench rm",
command=["rm", "-Rf",
WithProperties(master_dir)],
haltOnFailure=True,
warnOnFailure=True,
flunkOnFailure=False))
return f
def cargo_basic_buildfactory():
global cargo_source
f = BuildFactory()
# We wipe if the user requests it or if we can't find a config.stamp
# that's been touched in the past 2 days -- suggests config is failing
f.addStep(RemoveDirectory("build",
doStepIf=should_wipe))
f.addStep(Git(repourl=cargo_source,
progress=True,
#clobberOnFailure=True,
retry=(5, 2), # Combat the flakiness. 2 retries, 5 seconds
retryFetch=True, # Combat the flakiness
mode='incremental',
submodules=True))
return f
def cargo_buildfactory():
f = cargo_basic_buildfactory()
f.addStep(ShellCommand(env=CommandEnv(),
command=["sh", ".travis.install.deps.sh"],
workdir=WORKDIR))
f.addStep(Configure(env=CommandEnv(),
haltOnFailure=True,
flunkOnFailure=True,
command=ConfigCommand(),
interruptSignal="TERM",
workdir=WORKDIR))
f.addStep(Compile(env=CommandEnv(),
name="clean",
description="make clean-all",
descriptionDone="cleaned",
workdir=WORKDIR,
command=[MakeCommand(), "clean-all"]))
f.addStep(Compile(env=CommandEnv(),
haltOnFailure=True,
flunkOnFailure=True,
timeout=3600,
interruptSignal="TERM",
command=[MakeCommand()],
workdir=WORKDIR))
f.addStep(Test(env=CommandEnv(),
doStepIf=True,
haltOnFailure=True,
flunkOnFailure=True,
interruptSignal="TERM",
command=TestCommand(),
timeout=3600,
workdir=WORKDIR))
return f
def cargo_nightly_buildfactory(platform, host):
global s3_cargo_addy
f = cargo_buildfactory()
f.addStep(Compile(env=CommandEnv(),
name="distcheck",
description="make install",
descriptionDone="installed",
workdir=WORKDIR,
command=[MakeCommand(), "distcheck"]))
local_dist_dir = "tmp/dist/cargo-nightly"
local_dist_platform_dir = local_dist_dir + "/" + host
# Delete local dist dir
rm_dist_cmd = "rm -rf " + local_dist_platform_dir
f.addStep(MasterShellCommand(name="rm dist dir",
command=["sh", "-c", rm_dist_cmd]))
tarball = "cargo-nightly-" + host + ".tar.gz"
f.addStep(FileUpload(slavesrc="target/" + host + "/release/dist/" + tarball,
masterdest=local_dist_platform_dir + "/" + tarball,
workdir=WORKDIR))
commit_id_cmd = "echo '%(got_revision)s' > " + local_dist_platform_dir + "/commit-id"
f.addStep(MasterShellCommand(name="stamp commit id",
command=["sh", "-c", WithProperties(commit_id_cmd)]))
all_cargo_hosts = []
for p in dist_platforms:
for h in all_platform_hosts(p):
all_cargo_hosts += [h]
f.addStep(DistSync(name="checking for synced dist builds",
stagingDir=local_dist_dir,
platforms=all_cargo_hosts,
haltOnFailure=True,
flunkOnFailure=False))
return finish_dist(f, local_dist_dir, all_cargo_hosts, s3_cargo_addy, "cargo-dist")
def snap3_buildfactory(platform):
global s3_addy
f = make_and_check_buildfactory(True, False, "win" in platform,
False)
hosts = all_platform_hosts(platform)
if hosts != None:
for host in hosts:
f.addStep(Compile(env=CommandEnv(),
name="make-snap-stage3",
command=[MakeCommand(),
"snap-stage3-H-" + host],
haltOnFailure=True,
flunkOnFailure=True,
timeout=3600,
interruptSignal="TERM",
workdir=BUILD_WORKDIR))
f.addStep(SetPropertyFromCommand(command=["sh", "-c", "ls rust-stage0-*.tar.bz2"],
haltOnFailure=True,
flunkOnFailure=True,
extract_fn=grab_slave_filename,
workdir=BUILD_WORKDIR))
f.addStep(FileUpload(slavesrc=WithProperties("%(slave_filename:-none)s"),
masterdest=WithProperties("tmp/%(slave_filename:-none)s"),
haltOnFailure=True,
flunkOnFailure=True,
workdir=BUILD_WORKDIR))
s3cmd = WithProperties("s3cmd put -P tmp/%(slave_filename:-none)s "
+ s3_addy + "/stage0-snapshots/")
f.addStep(MasterShellCommand(name="s3-upload",
command=["sh", "-c", s3cmd],
haltOnFailure=True,
flunkOnFailure=True))
f.addStep(ShellCommand(command=["rm",
WithProperties("%(slave_filename:-none)s")],
workdir=BUILD_WORKDIR))
f.addStep(MasterShellCommand(command=["rm",
WithProperties("tmp/%(slave_filename:-none)s")]))
return f
# The only purpose of this buildfactory is to trigger the nightly
# builders all on the same revision
def rust_distsnap_trigger_buildfactory(scheduler_names):
# Need to do check out the source to get the revision (I think)
f = basic_buildfactory()
f.addStep(Trigger(schedulerNames=scheduler_names))
return f
def cargo_distsnap_trigger_buildfactory(scheduler_names):
# Need to do check out the source to get the revision (I think)
f = cargo_basic_buildfactory()
f.addStep(Trigger(schedulerNames=scheduler_names))
return f
def distsnap_buildfactory(platform, channel_label):
global s3_addy
# Don't run check because distcheck is going to do it all again
f = make_and_check_buildfactory(False, False, "win" in platform,
False)
f.addStep(Compile(env=CommandEnv(),
name="distcheck",
command=[MakeCommand(), "distcheck"],
timeout=3600,
interruptSignal="TERM",
workdir=BUILD_WORKDIR))
# Artifacts from each channel go in their own directory
local_dist_dir = "tmp/dist/rust-" + channel_label
# Artifacts from each platform go in their own dir since multiple
# platforms may produce artifacts with the same name. We'll
# combine them all right before the final upload.
local_dist_platform_dir = local_dist_dir + "/" + platform
# Delete local dist dir for this platform
rm_dist_cmd = "rm -rf " + local_dist_platform_dir + "/*"
f.addStep(MasterShellCommand(name="rm dist dir",
command=["sh", "-c", rm_dist_cmd]))
# Upload artifacts from slave
f.addStep(DirectoryUpload(slavesrc="dist",
masterdest=local_dist_platform_dir,
workdir=BUILD_WORKDIR))
# All remaining steps happen on the buildmaster
# Add the commit-id file to indicate which commit this is
# for. This will be checked by the DistSync buildstep later to
# determine when to upload everything at once.
commit_id_cmd = "echo '%(got_revision)s' > " + local_dist_platform_dir + "/commit-id"
f.addStep(MasterShellCommand(name="stamp commit id",
command=["sh", "-c", WithProperties(commit_id_cmd)]))
# Check whether dists from all builders have finished by examining the commit-id file
f.addStep(DistSync(name="checking for synced dist builds",
stagingDir=local_dist_dir,
platforms=dist_platforms,
haltOnFailure=True,
flunkOnFailure=False))
# Because the above check that all platforms have finished is
# haltOnFailure but not flunkOnFailure, all builds but the last
# will stop here, successfully. The final build will proceed with
# all the work of consolidating and uploading the artifacts.
# These next few steps deal with uploading docs. We're going to
# take the docs from the linux builder, put them on s3, then
# delete the docs before uploading the rest of the dist artifacts.
linux_doc_dir = local_dist_dir + "/linux/doc"
# Figure out the 'package name' for subsequent steps. Package name
# is something like 'nightly', 'beta', or '1.0.0', basically
# either a channel name or a version, depending on how the
# makefiles configure a given channel.
def grab_package_name(doc_dir):
from os import listdir
from os.path import isdir
for f in listdir(doc_dir):
if isdir(doc_dir + "/" + f):
return f
raise Exception("unable to grab package name")
f.addStep(SetPropertyFromFn(name="extracting package name",
property="package_name",
extract_fn=lambda : grab_package_name(linux_doc_dir)))
# This is exactly like the previous step of grabbing
# `package_name` but substituting 'nightly' with 'master' - for
# legacy reasons nightly docs are uploaded to 'master'.
# FIXME #17398 don't do this
def grab_doc_package_name(doc_dir):
name = grab_package_name(doc_dir)
if name == 'nightly':
return 'master'
else:
return name
f.addStep(SetPropertyFromFn(name="extracting doc package name",
property="doc_package_name",
extract_fn=lambda : grab_doc_package_name(linux_doc_dir)))
# Sync the doc folder from the 'linux' staging dir
s3cmd = WithProperties("s3cmd sync -P --no-progress --delete-removed " + \
linux_doc_dir + "/%(package_name)s/ " + \
s3_addy + "/doc/%(doc_package_name)s/",)
f.addStep(MasterShellCommand(name="upload docs",
command=["sh", "-c", s3cmd]))
# Delete the doc folders from all the dist_platform directories
all_doc_dirs = [(local_dist_dir + "/" + p + "/doc") for p in dist_platforms]
rm_doc_dirs_cmd = "rm -Rf " + " ".join(all_doc_dirs)
f.addStep(MasterShellCommand(name="rm doc dirs",
command=["sh", "-c", rm_doc_dirs_cmd]))
# We want our source tarballs to come from the linux bot. Delete any others
non_linux_platforms = [p for p in dist_platforms if p != 'linux']
non_linux_src_tarballs = [(local_dist_dir + "/" + p + "/rust-%(package_name)s.tar.gz")
for p in non_linux_platforms]
rm_src_tarballs_cmd = "rm -Rf " + " ".join(non_linux_src_tarballs)
f.addStep(MasterShellCommand(name="rm non-linux src tarballs",
command=["sh", "-c", WithProperties(rm_src_tarballs_cmd)]))
# Upload everything that's left
return finish_dist(f, local_dist_dir, dist_platforms, s3_addy, "dist")
def finish_dist(f, local_dist_dir, dist_subdirs, s3_addy, remote_dist_dir):
# Delete the commit-id files
all_commit_ids = [(local_dist_dir + "/" + p + "/commit-id") for p in dist_subdirs]
rm_commit_ids_cmd = "rm -f " + " ".join(all_commit_ids)
f.addStep(MasterShellCommand(name="rm commit-id",
command=["sh", "-c", rm_commit_ids_cmd]))
# Consolidate everything that's left under one directory for final upload. There
# should be no duplicate artifacts across platforms at this point.
final_dist_dir = local_dist_dir + "/final"
all_dist_wildcards = [(local_dist_dir + "/" + p + "/*") for p in dist_subdirs]
mkdir_final_cmd = "mkdir -p " + final_dist_dir
mv_final_cmd = "mv -f " + " ".join(all_dist_wildcards) + " " + final_dist_dir + "/"
consolidate_cmd = mkdir_final_cmd + " && " + mv_final_cmd
f.addStep(MasterShellCommand(name="consolidate artifacts",
command=["sh", "-c", WithProperties(consolidate_cmd)]))
# Generate SHA 256 checksums for everything remaining
sha256_cmd = "for i in " + final_dist_dir + "/* ; do sha256sum $i > $i.sha256; done"
f.addStep(MasterShellCommand(name="checksumming",
command=["sh", "-c", WithProperties(sha256_cmd)]))
# Just log what we're about to upload for reference
ls_cmd = "ls -l " + final_dist_dir
f.addStep(MasterShellCommand(name="listing artifacts",
command=["sh", "-c", WithProperties(ls_cmd)]))
# Upload everything to S3, first to the archive
s3dir = s3_addy + "/" + remote_dist_dir + "/`date +'%Y-%m-%d'`/"
s3cmd = "s3cmd put -P --cf-invalidate --no-progress " + final_dist_dir + "/* " + s3dir
f.addStep(MasterShellCommand(name="s3 archive upload",
command=["sh", "-c", s3cmd]))
# Then to the primary dist directory
s3dir = s3_addy + "/" + remote_dist_dir + "/"
s3cmd = "s3cmd put -P --cf-invalidate --no-progress " + final_dist_dir + "/* " + s3dir
f.addStep(MasterShellCommand(name="s3 primary upload",
command=["sh", "-c", s3cmd]))
# Delete the dist directory to start the next fresh
f.addStep(MasterShellCommand(name="wipe dist dir",
command=["sh", "-c", "rm -Rf " + local_dist_dir]))
return f
c['builders'] = []
def platform_slaves(p):
# The android builder has one slave, with the same name
if "-x-" in p:
return [p]
p = p.split("-")[0]
return [slave.slavename
for slave in c['slaves']
if p in slave.slavename and slave in auto_slaves]
def platform_snap_slaves(p):
p = p.split("-")[0]
return [slave.slavename
for slave in snap_slaves
if p in slave.slavename]
# FIXME: The linux AMI instances are using valgrind 3.7 and we need 3.8+
# This rule limits which bots we run the valgrinding dist snapshot on.
def platform_dist_slaves(p):
p = p.split("-")[0]
return [slave.slavename
for slave in dist_slaves
if p in slave.slavename]
def nextSlave(builder, available_slavebuilders):
if available_slavebuilders and len(available_slavebuilders) > 0:
s = sorted(available_slavebuilders, key=lambda s: s.slave.slavename)
return s[0]
return None
for p in try_platforms:
# Builder that does incremental-make-check-lite runs,
# host=target only, no valgrind, no perf.
c['builders'].append(BuilderConfig(
mergeRequests=False,
name="try-" + p,
category="try",
nextSlave=nextSlave,
properties={"platform":p,
"branch":"try",
"valgrind": False,
"docs": False,
"check": "check-lite",
"verbose": True},
slavenames=platform_slaves(p),
factory=make_and_check_buildfactory(True, False,
"win" in p, True)))
for p in auto_platforms:
opt_compiler = True
opt_tests = True
if "-nopt-c" in p:
opt_compiler = False
if "-nopt-t" in p:
opt_tests = False
vg = False
if "-vg" in p:
vg = True
shard = False
m = re.search("-(\d+\.\d+)", p)
if m:
shard = m.group(1)
chk = True
if "-all" in p:
chk = "check-lite"
if "bsd" in p:
chk = "check-lite"
if not opt_compiler:
chk = False
android = False
if "-x-android" in p:
android = True
# Not checking android for now
chk = False
if "-x-android-t" in p:
# Only test android, not the host
chk = "check-stage2-T-arm-linux-androideabi-H-x86_64-unknown-linux-gnu"
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="auto-" + p,
category="auto",
slavenames=platform_slaves(p),
nextSlave=nextSlave,
properties={"platform":p,
"branch":"auto",
"valgrind": vg,
"optimize": opt_compiler,
"optimize-tests": opt_tests,
"android": android,
"build": auto_platform_triple(p),
"hosts": auto_platform_host(p),
"targets": auto_platform_target(p),
"check": chk},
factory=make_and_check_buildfactory(chk, android,
"win" in p, True)))
for p in snap_platforms:
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="snap3-" + p,
category="util-snap3",
slavenames=platform_snap_slaves(p),
nextSlave=nextSlave,
properties={"platform":p,
"branch":"snap-stage3",
"hosts": "all",
"valgrind": False,
"inject-std-version": False,
"llvm-static-stdcpp": p == 'linux',
"check": True},
factory=snap3_buildfactory(p)))
for p in dist_platforms:
# Builder that does whatever's needed to make releases.
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="dist2-" + p,
category="util-dist",
properties={"platform":p,
"branch":"dist-snap",
"hosts": "all",
"valgrind": False,
"check": True,
"llvm-static-stdcpp": p == 'linux',
"release-channel": "stable"},
nextSlave=nextSlave,
slavenames=platform_dist_slaves(p),
factory=distsnap_buildfactory(p, 'stable')))
# Nightlies
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="nightly-" + p,
category="util-dist",
properties={"platform":p,
"branch":"master",
"hosts": "all",
"valgrind": False,
"check": True,
"llvm-static-stdcpp": p == 'linux',
"release-channel": "nightly"},
nextSlave=nextSlave,
slavenames=platform_dist_slaves(p),
factory=distsnap_buildfactory(p, 'nightly')))
# Nightlies
for host in all_platform_hosts(p):
bits = "32" if "i686" in host else "64"
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="nightly-cargo-" + p + "-" + bits,
category="util-dist",
properties={"platform":p,
"branch":"master",
"hosts": [host],
"check": True,
"wipe": True,
"cargo": True,
"cargo-nightly": True,
"optimize": True,
"bits": bits,
"nightly": True},
nextSlave=nextSlave,
slavenames=platform_dist_slaves(p),
factory=cargo_nightly_buildfactory(p, host)))
# The nightly trigger
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="nightly-trigger",
category="util-dist",
nextSlave=nextSlave,
slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev
factory=rust_distsnap_trigger_buildfactory(["nightly"])))
# The stable trigger
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="dist2-trigger",
category="util-dist",
nextSlave=nextSlave,
slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev
factory=rust_distsnap_trigger_buildfactory(["dist2"])))
# The cargo nightly trigger
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="nightly-cargo-trigger",
category="util-dist",
nextSlave=nextSlave,
slavenames=[s.slavename for s in auto_slaves], # any slave; we just need to check out the source to get the current rev
factory=cargo_distsnap_trigger_buildfactory(["nightly-cargo"])))
for platform in cargo_platforms:
p = platform.split("-")[0]
bits = platform.split("-")[1]
triple = auto_platform_triple(platform)
slaves = [slave.slavename
for slave in c['slaves']
if p in slave.slavename and slave in auto_slaves]
# Builder that does incremental-make-check-lite runs,
# host=target only, no valgrind, no perf.
c['builders'].append(BuilderConfig(
mergeRequests=True,
name="cargo-" + platform,
category="cargo",
nextSlave=nextSlave,
properties={"platform":p,
"branch":"auto-cargo",
"verbose": True,
"cargo": True,
"bits": bits,
"build": triple},
slavenames=slaves,
factory=cargo_buildfactory()))
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
c['status'] = []
from buildbot.status import html
from buildbot.status.web import authz, auth
# we add one trivial user to prevent spambots from hitting the web UI
# (yes, this has happened)
users = [("rust", "rust")]
authz_cfg=authz.Authz(
auth=auth.BasicAuth(users),
# change any of these to True to enable; see the manual for more
# options
gracefulShutdown = 'auth',
forceBuild = 'auth', # use this to test your slave once it is set up
forceAllBuilds = 'auth',
pingBuilder = 'auth',
stopBuild = 'auth',
stopAllBuilds = 'auth',
cancelPendingBuild = 'auth',
showUsersPage = 'auth',
)
#c['status'].append(
# mail.MailNotifier(fromaddr="buildbot@rust-lang.org",
# builders=[],
# relayhost="smtp.example.org"))
c['status'].append(html.WebStatus(
revlink='http://github.com/mozilla/rust/commit/%s',
order_console_by_time=True,
#http_port="tcp:8010:interface=127.0.0.1",
http_port="tcp:8010:interface=127.0.0.1",
authz=authz_cfg))
from buildbot.status import words
c['status'].append(words.IRC(host="irc.mozilla.org",
port=6697,
useSSL=True,
nick="rust-buildbot",
channels=["#rust-bots"]))
# Status reporter that kills off builds that are redundant if
# particular build has failed. We use this to kill associated 'auto'
# jobs if one of them fails.
#
# thanks to Brendan Cully <brendan@kublai.com>
from twisted.internet import defer
from twisted.python import log
from buildbot.status.base import StatusReceiverMultiService
from buildbot.status import results
from buildbot import interfaces
def samesource(ssa, ssb):
if ssa.ssid and ssa.ssid == ssb.ssid:
return True
if ssa.branch == ssb.branch and ssa.revision == ssb.revision \
and ssa.project == ssb.project and ssa.repository == ssb.repository \
and ssa.patch == ssb.patch:
return True
return False
class BSKiller(StatusReceiverMultiService):
def __init__(self, buildermatch=lambda _: True):
StatusReceiverMultiService.__init__(self)
self.buildermatch = buildermatch
self.builders = []
self.ctl = None
def startService(self):
StatusReceiverMultiService.startService(self)
self.parent.getStatus().subscribe(self)
self.ctl = interfaces.IControl(self.master)
def builderAdded(self, name, builder):
"""choose to subscribe to the given builder"""
if not self.buildermatch(name):
return False
self.builders.append(builder)
return self
def buildFinished(self, buildername, build, result):
if result in [results.FAILURE]:
return self.killallbuilds(build.getSourceStamps())
def stepFinished(self, build, step, results):
builderctl = self.ctl.getBuilder(build.getBuilder().getName())
builderctl.getBuild(build.getNumber()).stopBuild("no point in continuing")
@defer.inlineCallbacks
def killallbuilds(self, sourcestamps):
pending = []
for builder in self.builders:
checkpending = True
builderctl = self.ctl.getBuilder(builder.getName())
for build in builder.getCurrentBuilds():
if build.isFinished():
continue
log.msg('considering build %s:%d' % (builder.getName(),
build.getNumber()))
if not any([samesource(ss1, ss2)
for ss1 in sourcestamps
for ss2 in build.getSourceStamps()]):
log.msg('mismatched sourcestamps')
continue
log.msg('sourcestamps match')
checkpending = False
bldctl = builderctl.getBuild(build.getNumber())
if build.currentStep != None and ("git" in build.currentStep.getName()
or "configure" in build.currentStep.getName()):
build.subscribe(self)
else:
bldctl.stopBuild("no point in continuing")
if checkpending:
res = yield builderctl.getPendingBuildRequestControls()
brctls = dict((brc.brid, brc) for brc in res)
brs = yield builder.getPendingBuildRequestStatuses()
for br in brs:
ss = yield br.getSourceStamps()
if any([samesource(ss1, ss2) for ss1 in sourcestamps
for ss2 in ss]):
log.msg('cancelling pending build on ' + builder.getName())
brctl = brctls[br.brid]
brctl.cancel()
c['status'].append(BSKiller(lambda buildername: buildername.startswith('auto') and buildername not in nogate_builders))
####### PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot
# installation's html.WebStatus home page (linked to the
# 'titleURL') and is embedded in the title of the waterfall HTML page.
c['title'] = "Rust"
c['titleURL'] = "http://rust-lang.org"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.WebStatus page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://rust1.vm.labs.scl3.mozilla.com:8010/"
####### DB URL
# This specifies what database buildbot uses to store change and scheduler
# state. You can leave this at its default for all but the largest
# installations.
c['db_url'] = "sqlite:///state.sqlite"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment