Skip to content

Instantly share code, notes, and snippets.

@aburan28
Last active July 27, 2019 20:42
Show Gist options
  • Save aburan28/bb192df36e16fce003baa3f614d22311 to your computer and use it in GitHub Desktop.
Save aburan28/bb192df36e16fce003baa3f614d22311 to your computer and use it in GitHub Desktop.
<!--
~ Artifactory is a binaries repository manager.
~ Copyright (C) 2018 JFrog Ltd.
~
~ Artifactory is free software: you can redistribute it and/or modify
~ it under the terms of the GNU Affero General Public License as published by
~ the Free Software Foundation, either version 3 of the License, or
~ (at your option) any later version.
~
~ Artifactory is distributed in the hope that it will be useful,
~ but WITHOUT ANY WARRANTY; without even the implied warranty of
~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
~ GNU Affero General Public License for more details.
~
~ You should have received a copy of the GNU Affero General Public License
~ along with Artifactory. If not, see <http://www.gnu.org/licenses/>.
-->
<Context path="/access" docBase="${artifactory.home}/webapps/access.war">
<Parameter name="jfrog.access.bundled" value="true" override="true"/>
<!-- enable annotations scanning of access jar files -->
<JarScanner scanClassPath="false">
<JarScanFilter defaultPluggabilityScan="false" pluggabilityScan="access*" defaultTldScan="false"/>
</JarScanner>
</Context>
#cat /opt/jfrog/artifactory/bin/artifactory.sh
#!/bin/bash
#
# Startup script for Artifactory in Tomcat Servlet Engine
#
errorArtHome() {
echo
echo -e "\033[31m** $1\033[0m"
echo
exit 1
}
checkArtHome() {
if [ -z "$ARTIFACTORY_HOME" ] || [ ! -d "$ARTIFACTORY_HOME" ]; then
errorArtHome "ERROR: Artifactory home folder not defined or does not exists at $ARTIFACTORY_HOME"
fi
}
checkTomcatHome() {
if [ -z "$TOMCAT_HOME" ] || [ ! -d "$TOMCAT_HOME" ]; then
errorArtHome "ERROR: Tomcat Artifactory folder not defined or does not exists at $TOMCAT_HOME"
fi
export CATALINA_HOME="$TOMCAT_HOME"
}
createLogsLink() {
mkdir -p $ARTIFACTORY_HOME/logs/catalina || errorArtHome "Could not create dir $ARTIFACTORY_HOME/logs/catalina"
if [ ! -L "$TOMCAT_HOME/logs" ];
then
ln -s $ARTIFACTORY_HOME/logs/catalina $TOMCAT_HOME/logs || \
errorArtHome "Could not create link from $TOMCAT_HOME/logs to $ARTIFACTORY_HOME/logs/catalina"
fi
}
findShutdownPort() {
SHUTDOWN_PORT=`netstat -vatn|grep LISTEN|grep -w $CATALINA_MGNT_PORT|wc -l`
}
isAlive() {
pidValue=""
javaPs=""
if [ -e "$ARTIFACTORY_PID" ]; then
pidValue=`cat $ARTIFACTORY_PID`
if [ -n "$pidValue" ]; then
javaPs="`ps -p $pidValue | grep java`"
fi
fi
}
# Check if conditions to run local METADATA are met
runMetadata() {
if [ "${START_LOCAL_MDS}" == true ] && [ -f ${metadataScript} ]; then
return 0
else
return 1
fi
}
startMetadata() {
if runMetadata; then
chmod +x ${metadataScript}
${metadataScript} start
fi
}
stopMetadata() {
if runMetadata; then
chmod +x ${metadataScript}
${metadataScript} stop
fi
}
# Check if conditions to run local router are met
runRouter() {
if [[ -f ${routerScript} ]]; then
return 0
else
return 1
fi
}
startRouter() {
if runRouter; then
chmod +x ${routerScript}
. ${routerScript} start
fi
}
stopRouter() {
if runRouter; then
chmod +x ${routerScript}
${routerScript} stop
fi
}
# Check if conditions to run local replicator are met
runReplicator() {
if [ "${START_LOCAL_REPLICATOR}" == true ] && [ -f ${replicatorScript} ]; then
return 0
else
return 1
fi
}
startReplicator() {
if runReplicator; then
chmod +x ${replicatorScript}
${replicatorScript} start
fi
}
stopReplicator() {
if runReplicator; then
chmod +x ${replicatorScript}
${replicatorScript} stop
fi
}
stop() {
echo finding
# The default CATALINA_MGNT_PORT is 8015
CATALINA_MGNT_PORT=8015
echo "Using the default catalina management port ($CATALINA_MGNT_PORT) to test shutdown"
isAlive
findShutdownPort
if [ $SHUTDOWN_PORT -eq 0 ] && [ -z "$javaPs" ]; then
echo "Artifactory Tomcat already stopped"
RETVAL=0
else
echo "Stopping Artifactory Tomcat..."
if [ $SHUTDOWN_PORT -ne 0 ]; then
$TOMCAT_HOME/bin/shutdown.sh
RETVAL=$?
else
RETVAL=1
fi
killed=false
if [ $RETVAL -ne 0 ]; then
echo "WARN: Artifactory Tomcat server shutdown script failed. Sending kill signal to $pidValue"
if [ -n "$pidValue" ]; then
killed=true
kill $pidValue
RETVAL=$?
fi
fi
# Wait 2 seconds for process to die
sleep 2
findShutdownPort
isAlive
nbSeconds=1
while [ $SHUTDOWN_PORT -ne 0 ] || [ -n "$javaPs" ] && [ $nbSeconds -lt 30 ]; do
if [ $nbSeconds -eq 10 ] && [ -n "$pidValue" ]; then
# After 10 seconds try to kill the process
echo "WARN: Artifactory Tomcat server shutdown not done after 10 seconds. Sending kill signal"
kill $pidValue
RETVAL=$?
fi
if [ $nbSeconds -eq 25 ] && [ -n "$pidValue" ]; then
# After 25 seconds try to kill -9 the process
echo "WARN: Artifactory Tomcat server shutdown not done after 25 seconds. Sending kill -9 signal"
kill -9 $pidValue
RETVAL=$?
fi
sleep 1
let "nbSeconds = $nbSeconds + 1"
findShutdownPort
isAlive
done
if [ $SHUTDOWN_PORT -eq 0 ] && [ -z "$javaPs" ]; then
echo "Artifactory Tomcat stopped"
else
echo "ERROR: Artifactory Tomcat did not stop"
RETVAL=1
fi
fi
[ $RETVAL=0 ] && rm -f "$ARTIFACTORY_PID"
stopReplicator
stopMetadata
stopRouter
}
start() {
## will source defaults file to prevent issue of loading wrong ARTIFACTORY_HOME variable
## which will be used for join.key generation by "Access" application
if [[ -f "/etc/opt/jfrog/artifactory/default" ]]; then
. /etc/opt/jfrog/artifactory/default
fi
JOIN_PATHS=$ARTIFACTORY_HOME
if runMetadata;then
export METADATA_HOME="$JOIN_PATHS/metadata"
export JOIN_PATHS=$JOIN_PATHS:$METADATA_HOME
fi
export CATALINA_PID="$ARTIFACTORY_PID"
[ -x $TOMCAT_HOME/bin/catalina.sh ] || chmod +x $TOMCAT_HOME/bin/*.sh
if [ -z "$@" ];
then
startRouter
startReplicator
startMetadata
#default to catalina.sh run
setCatalinaOpts
$TOMCAT_HOME/bin/catalina.sh run
else
#create $ARTIFACTORY_HOME/run
if [ -n "$ARTIFACTORY_PID" ];
then
mkdir -p $(dirname "$ARTIFACTORY_PID") || \
errorArtHome "Could not create dir for $ARTIFACTORY_PID";
fi
if [ "$@" == "stop" ];
then
setCatalinaOpts
stop
else
startRouter
startReplicator
startMetadata
setCatalinaOpts
# Start tomcat
$TOMCAT_HOME/bin/catalina.sh "$@"
fi
fi
}
check() {
if [ -f $ARTIFACTORY_PID ]; then
echo "Artifactory is running, on pid="`cat $ARTIFACTORY_PID`
echo ""
exit 0
fi
echo "Checking arguments to Artifactory: "
echo "ARTIFACTORY_HOME = $ARTIFACTORY_HOME"
echo "TOMCAT_HOME = $TOMCAT_HOME"
echo "ARTIFACTORY_PID = $ARTIFACTORY_PID"
echo "JAVA_HOME = $JAVA_HOME"
echo "JAVA_OPTIONS = $JAVA_OPTIONS"
echo
exit 1
}
setCatalinaOpts() {
export CATALINA_OPTS="$JAVA_OPTIONS -Dartifactory.home=$ARTIFACTORY_HOME -Dfile.encoding=UTF8 -Djruby.bytecode.version=1.8 -Djruby.compile.invokedynamic=false -Djfrog.join.key.paths=$JOIN_PATHS"
}
checkJavaVersion(){
if [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/java" ]]; then
echo "Found java executable in JAVA_HOME ($JAVA_HOME)"
_java="$JAVA_HOME/bin/java"
elif type -p java; then
_java=java
else
echo "No java found"
fi
if [[ "$_java" ]]; then
"$_java" -version 2>&1| \
awk -F\" '/version/{\
if ($2 < 1.8) {\
printf "%s is too old must be at least java 1.8\n", $2;\
exit 0;\
} else exit 1}' && exit 99
fi
}
artBinDir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
export ARTIFACTORY_HOME="$(cd "$(dirname "${artBinDir}")" && pwd)"
export REPLICATOR_DATA="${ARTIFACTORY_HOME}/replicator"
export METADATA_DATA="${ARTIFACTORY_HOME}/metadata"
export ROUTER_DATA="${ARTIFACTORY_HOME}/router"
artDefaultFile="$artBinDir/artifactory.default"
replicatorScript=${artBinDir}/replicator/replicator.sh
metadataScript=${artBinDir}/metadata/metadata.sh
routerScript=${artBinDir}/../router/bin/router.sh
. $artDefaultFile || errorArtHome "ERROR: $artDefaultFile does not exist or not executable"
# Support old REPLICATOR_ENABLED
if [ "${REPLICATOR_ENABLED}" == true ]; then
export START_LOCAL_REPLICATOR=true
fi
if [ "x$1" = "xcheck" ]; then
check
fi
# Extra termination steps needed
terminate () {
echo "Caught termination signal"
stopReplicator
stopMetadata
stopRouter
}
# Catch Ctrl+C and other termination signals to try graceful shutdown
trap terminate SIGINT SIGTERM SIGHUP
checkJavaVersion
checkArtHome
checkTomcatHome
createLogsLink
start "$@"
#
#
# Artifactory is a binaries repository manager.
# Copyright (C) 2018 JFrog Ltd.
#
# Artifactory is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Artifactory is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Artifactory. If not, see <http://www.gnu.org/licenses/>.
#
#
###############################################################################
# Use this file to override system-level properties used by artifactory. #
# Artifactory-specific properties beginning with the "artifactory." prefix #
# will be handled internally by artifactory - you should change them only if #
# you know what you are doing. #
# All other properties will become normal (jvm-wide) system properties, so #
# this file can be used as an alternative for specifying command-line #
# -Dparam=val parameters. #
###############################################################################
## Comma separated list of disabled addons
#artifactory.addons.disabled=
## Name of alternate application context class to use
#artifactory.applicationContextClass=null
# Enable adding the session id to the URL
#artifactory.servlet.supportUrlSessionTracking=false
## Indicates whether a different instance of Artifactory can request remote artifacts from this instance
#artifactory.artifactoryRequestsToGlobalCanRetrieveRemoteArtifacts=false
## Disable the download access to the global 'repo'
artifactory.repo.global.disabled=true
## Number of seconds for fs items to idle in the cache
#artifactory.fsitem.cache.idleTimeSecs=1200
## Number of seconds to wait between running the storage garbage collector. Use the UI to configure as cron exp
#artifactory.gc.intervalSecs=14400
## Number of milliseconds the garbage collector should wait between the scanning of each node
#artifactory.gc.sleepBetweenNodesMillis=20
## Number of milliseconds to wait before starting to sleep between garbage collector node scanning iterations
#artifactory.gc.scanStartSleepingThresholdMillis=20000
## Number of milliseconds to sleep between garbage collector node scanning iterations
#artifactory.gc.scanSleepBetweenIterationsMillis=200
## Number of milliseconds to work between garbage collector datastore file scanning sleep invocations
#artifactory.gc.fileScanSleepIterationMillis=1000
## Number of milliseconds to sleep during lengthy garbage collector datastore file scanning
#artifactory.gc.fileScanSleepMillis=250
## The maximum binary record cache entries. Apllicable only for v2 gc
#artifactory.gc.maxCacheEntries=10000
## Number of seconds to wait until timing out while waiting for an item lock to be acquired
#artifactory.locks.timeoutSecs=120
## Whether to print detailed debug information on lock timeouts
#artifactory.locks.debugTimeouts=false
## Number of seconds to wait between each refresh of the system logs viewer
#artifactory.logs.viewRefreshRateSecs=10
## The maximum number of seconds to wait when blocking on a concurrent download from the same repository, before
## starting a parallel download
# artifactory.repo.concurrentDownloadSyncTimeoutSecs=900
## Indicates if all stored archives should be indexed (even if already done) upon system startup
#artifactory.search.content.forceArchiveIndexing=false
## Maximum number of excerpt fragments to return for each result when searching archive content through the UI
#artifactory.search.content.maxFragments=500
## Maximum number of characters for each fragment
#artifactory.search.content.maxFragmentsSize=5000
## Maximum number of results to return when searching through the UI
#artifactory.search.maxResults=500
## The backend limit of maximum results to return from sql queries issued by users. Should be higher than maxResults.
#artifactory.search.userQueryLimit=1000
## The minimum number of characters allowed for an archive content query.
#artifactory.search.archive.minQueryLength=3
## The maximum number of seconds that should be spent on a pattern search
#artifactory.search.pattern.timeoutSecs=30
## Number of seconds for authentications to idle in the cache
#artifactory.security.authentication.cache.idleTimeSecs=300
## Minimal number of seconds that should be the difference between each user last access timestamp
#artifactory.security.userLastAccessUpdatesResolutionSecs=60
## If Login Remember Me should be disabled and users will have to input their credentials on each login.
#artifactory.security.disableRememberMe=false
## Lifespan of the remember me cookie
#artifactory.security.rememberMe.lifetimeSecs=1209600
## Caches blocked user in sake of performance improvement
## and takes load off authentication mechanism/db
#artifactory.security.useFrontCacheForBlockedUsers=true
## Login dynamically blocked for increasing amount of time
## since third incorrect login, algorithm is:
## (INCORRECT_ATTEMPTS-3) * loginBlockDelay (millis)
##
## note: delay may not exceed 5000 (5 seconds)
##
#artifactory.security.loginBlockDelay=500
## Path to alternate Spring configuration file
#artifactory.spring.configDir=null
## Number of lock timeouts to retry while waiting for a task to complete
#artifactory.task.completionLockTimeoutRetries=100
## Whether logging and processing of traffic is active
#artifactory.traffic.collectionActive=false
## Number of seconds to wait between each version information update query
#artifactory.versioningQueryIntervalSecs=43200
## The substring by which a remote host is identified as Maven''s central host
#artifactory.mvn.central.hostPattern=.maven.org
## The maximum frequency in seconds that a remote index on Maven central host can be queried for updates
#artifactory.mvn.central.indexerMaxQueryIntervalSecs=86400
## Maximum concurrent workers to calculate maven metadata
#artifactory.mvn.metadata.calculation.workers=8
## Fully qualified name of a maven metadata version comparator to determine the latest and release versions
#artifactory.mvn.metadataVersionsComparatorFqn=org.artifactory.maven.versioning.VersionNameMavenMetadataVersionComparator
## Fully qualified name of a maven metadata timestamp comparator to determine the latest snapshot versions
#artifactory.mvn.metadataSnapshotComparatorFqn=org.artifactory.maven.snapshot.TimestampSnapshotComparator
## Disable requests with version tokens (SNAPSHOT, [RELEASE], [INTEGRATION] which retrieves the latest unique if exists
#artifactory.request.disableVersionTokens=false
## Determine if should sort and retrieve the latest [RELEASE] version by files date created (default is by version string comparator)
#artifactory.request.searchLatestReleaseByDateCreated=false
## Add additional xml mime type file extensions (*.myextension). Separated by ","
#artifactory.xmlAdditionalMimeTypeExtensions=myextension1,myextension2
## Max number of folders to scan deeply for items used as build artifact or dependencies before deleting to warn of
#artifactory.build.maxFoldersToScanForDeletionWarnings=2
## Size for the derby page cache
derby.storage.pageCacheSize=500
## Disable the Derby JMX management service
derby.module.mgmt.jmx=org.apache.derby.impl.services.jmxnone.NoManagementService
## Log all errors/messages of any severity (will list deadlocks)
derby.stream.error.logSeverityLevel=0
## Log all executed statements along with their txid
derby.language.logStatementText=false
## Log all deadlocks
#derby.locks.monitor=true
## Writes a stack trace of all threads involved in lock problems # -- (not just the victims) to the log
#derby.locks.deadlockTrace=true
## Threshold for the number rows touched above which to auto-escalate to table-level locking from row-level locking
#derby.locks.escalationThreshold=5000
## Defines the maximum size of text to parse through the text highlighting script
#artifactory.ui.syntaxColoringMaxTextSizeBytes=512000
## Defines the default chroot for UI file\dir selectors that browse machine Artifactory was installed on
#artifactory.ui.chroot=/home/bob
## Defines the maximum number of files to retain when maintaining a rolling file policy
#artifactory.file.roller.maxFileToRetain=10
## Number of milliseconds to work between system backup file export sleep invocations
#artifactory.backup.fileExportSleepIterationMillis=2000
## Number of milliseconds to sleep during lengthy system backup file exports
#artifactory.backup.fileExportSleepMillis=250
## Number of seconds to check for updates of plugin script files (0 - do not refresh updates scripts)
#artifactory.plugin.scripts.refreshIntervalSecs=0
## Send the Accept-Encoding:gzip header to remote repositories and handle gzip stream responses
#artifactory.http.acceptEncoding.gzip=true
# use the Expect continue directive
#artifactory.http.useExpectContinue=false
# The lower-limit of a filtered resource size for which a performance warning will be displayed
#filtering.resourceSizeKb=64
# Whether to search for an an existing resource under a different name before requesting a remote artifact
#artifactory.repo.remote.checkForExistingResourceOnRequest=true
# Comma separated list of global excludes to apply on all repositories
#artifactory.repo.includeExclude.globalExcludes=**/*~,**/#*#,**/.#*,**/%*%,**/._*,**/CVS,**/CVS/**,**/.cvsignore,**/SCCS,**/SCCS/**,**/vssver.scc,**/.svn,**/.svn/**,**/.DS_Store
## A list of archive file names that may contain textual license information.\
#artifactory.archive.licenseFile.names=license,LICENSE,license.txt,LICENSE.txt,LICENSE.TXT
## Number of seconds for dynamic metadata to be cached
#artifactory.mvn.dynamicMetadata.cacheRetentionSecs=10
## A list of custom types (custom file extensions) to use when resolving maven artifacts
#artifactory.mvn.custom.types=tar.gz,tar.bz2
## Determines the maximum number of rows to display per result page
#artifactory.ui.search.maxRowsPerPage=20
## Maximum number of results to return when performing NuGet searches without specifying a limit
#artifactory.nuget.search.maxResults=100
## If true, all requests to the NuGet API require authentication even if anonymous access is enabled.
#artifactory.nuget.forceAuthentication=false
## Disable filename token in response Content-Disposition header
#artifactory.response.disableContentDispositionFilename=false
## Comma separated list of supported archive extensions for archive bundled deploy
#artifactory.request.explodedArchiveExtensions=zip,tar,tar.gz,tgz
## Disable \ enable the username auto complete in the login page (values : "off" "on").
#artifactory.useUserNameAutoCompleteOnLogin=on
## The interval, in seconds, for flushing aggregated statistics to the storage
#artifactory.stats.flushIntervalSecs=30
## The amount of indices to pre fetch by IdGenerator
#artifactory.db.idGenerator.fetch.amount=1000
## When set, the Bintray Settings section in the User Profile page in the UI will be hidden.
#artifactory.bintray.ui.hideUploads=true
## Hides the encrypted password field and the maven settings snippet from the profile page
#artifactory.ui.hideEncryptedPassword=true
## Hides checksum files in simple and list browsing
#artifactory.ui.hideChecksums=true
## Maximum number repositories to import in parallel (default is one less than the available processors)
#artifactory.import.max.parallelRepos
## Idle connection monitor interval (in seconds)
#artifactory.repo.http.idleConnectionMonitorInterval=10
## Disables idle HTTP connections monitoring (idle connections are closed automatically when #idleConnectionMonitorInterval expires)
#artifactory.repo.http.disableIdleConnectionMonitoring=false
## Time to wait for content collection accomplishing (in minutes)
#artifactory.support.core.bundle.contentCollectionAwaitTimeout=60
## Artifactory by default blocks concurrent execution of Support content collection,
## however one can configure time to wait for next available execution slot before
## withdraw (in seconds)
#artifactory.support.core.bundle.waitForSlotBeforeWithdraw=600
## Max (previously created) bundles to keep
#artifactory.support.core.bundle.maxBundles=5
## The latest deployed npm package will be tag with the 'latest' tag
#artifactory.npm.tag.tagLatestByPublish=true
## Add package MD5 checksum to Debian Packages file
#artifactory.debian.metadata.calculateMd5InPackagesFiles=true
## Whitelist loopback prefixes URL as a remote repository URL (e.g. localhost) separated by ','. default all loopback URLs are blocked
#artifactory.remote.repo.url.whitelist.prefix=null
## Block any site/link local (10/8|172.16/12|192.168/16|169.254/16 prefix) or unresolvable URL as a remote repository URL (default: false)
#artifactory.remote.repo.url.strict.policy=false
<!--
~ Artifactory is a binaries repository manager.
~ Copyright (C) 2018 JFrog Ltd.
~
~ Artifactory is free software: you can redistribute it and/or modify
~ it under the terms of the GNU Affero General Public License as published by
~ the Free Software Foundation, either version 3 of the License, or
~ (at your option) any later version.
~
~ Artifactory is distributed in the hope that it will be useful,
~ but WITHOUT ANY WARRANTY; without even the implied warranty of
~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
~ GNU Affero General Public License for more details.
~
~ You should have received a copy of the GNU Affero General Public License
~ along with Artifactory. If not, see <http://www.gnu.org/licenses/>.
-->
<Context crossContext="true" path="/artifactory" docBase="${artifactory.home}/webapps/artifactory.war">
</Context>
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# List of comma-separated packages that start with or equal this string
# will cause a security exception to be thrown when
# passed to checkPackageAccess unless the
# corresponding RuntimePermission ("accessClassInPackage."+package) has
# been granted.
package.access=sun.,org.apache.catalina.,org.apache.coyote.,org.apache.jasper.,org.apache.tomcat.
#
# List of comma-separated packages that start with or equal this string
# will cause a security exception to be thrown when
# passed to checkPackageDefinition unless the
# corresponding RuntimePermission ("defineClassInPackage."+package) has
# been granted.
#
# by default, no packages are restricted for definition, and none of
# the class loaders supplied with the JDK call checkPackageDefinition.
#
package.definition=sun.,java.,org.apache.catalina.,org.apache.coyote.,\
org.apache.jasper.,org.apache.naming.,org.apache.tomcat.
#
#
# List of comma-separated paths defining the contents of the "common"
# classloader. Prefixes should be used to define what is the repository type.
# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
# If left as blank,the JVM system loader will be used as Catalina's "common"
# loader.
# Examples:
# "foo": Add this folder as a class repository
# "foo/*.jar": Add all the JARs of the specified folder as class
# repositories
# "foo/bar.jar": Add bar.jar as a class repository
#
# Note: Values are enclosed in double quotes ("...") in case either the
# ${catalina.base} path or the ${catalina.home} path contains a comma.
# Because double quotes are used for quoting, the double quote character
# may not appear in a path.
common.loader="${catalina.base}/lib","${catalina.base}/lib/*.jar","${catalina.home}/lib","${catalina.home}/lib/*.jar"
#
# List of comma-separated paths defining the contents of the "server"
# classloader. Prefixes should be used to define what is the repository type.
# Path may be relative to the CATALINA_HOME or CATALINA_BASE path or absolute.
# If left as blank, the "common" loader will be used as Catalina's "server"
# loader.
# Examples:
# "foo": Add this folder as a class repository
# "foo/*.jar": Add all the JARs of the specified folder as class
# repositories
# "foo/bar.jar": Add bar.jar as a class repository
#
# Note: Values may be enclosed in double quotes ("...") in case either the
# ${catalina.base} path or the ${catalina.home} path contains a comma.
# Because double quotes are used for quoting, the double quote character
# may not appear in a path.
server.loader=
#
# List of comma-separated paths defining the contents of the "shared"
# classloader. Prefixes should be used to define what is the repository type.
# Path may be relative to the CATALINA_BASE path or absolute. If left as blank,
# the "common" loader will be used as Catalina's "shared" loader.
# Examples:
# "foo": Add this folder as a class repository
# "foo/*.jar": Add all the JARs of the specified folder as class
# repositories
# "foo/bar.jar": Add bar.jar as a class repository
# Please note that for single jars, e.g. bar.jar, you need the URL form
# starting with file:.
#
# Note: Values may be enclosed in double quotes ("...") in case either the
# ${catalina.base} path or the ${catalina.home} path contains a comma.
# Because double quotes are used for quoting, the double quote character
# may not appear in a path.
shared.loader=
# Default list of JAR files that should not be scanned using the JarScanner
# functionality. This is typically used to scan JARs for configuration
# information. JARs that do not contain such information may be excluded from
# the scan to speed up the scanning process. This is the default list. JARs on
# this list are excluded from all scans. The list must be a comma separated list
# of JAR file names.
# The list of JARs to skip may be over-ridden at a Context level for individual
# scan types by configuring a JarScanner with a nested JarScanFilter.
# The JARs listed below include:
# - Tomcat Bootstrap JARs
# - Tomcat API JARs
# - Catalina JARs
# - Jasper JARs
# - Tomcat JARs
# - Common non-Tomcat JARs
# - Test JARs (JUnit, Cobertura and dependencies)
tomcat.util.scan.StandardJarScanFilter.jarsToSkip=\
bootstrap.jar,commons-daemon.jar,tomcat-juli.jar,\
annotations-api.jar,el-api.jar,jsp-api.jar,servlet-api.jar,websocket-api.jar,\
jaspic-api.jar,\
catalina.jar,catalina-ant.jar,catalina-ha.jar,catalina-storeconfig.jar,\
catalina-tribes.jar,\
jasper.jar,jasper-el.jar,ecj-*.jar,\
tomcat-api.jar,tomcat-util.jar,tomcat-util-scan.jar,tomcat-coyote.jar,\
tomcat-dbcp.jar,tomcat-jni.jar,tomcat-websocket.jar,\
tomcat-i18n-en.jar,tomcat-i18n-es.jar,tomcat-i18n-fr.jar,tomcat-i18n-ja.jar,\
tomcat-juli-adapters.jar,catalina-jmx-remote.jar,catalina-ws.jar,\
tomcat-jdbc.jar,\
tools.jar,\
commons-beanutils*.jar,commons-codec*.jar,commons-collections*.jar,\
commons-dbcp*.jar,commons-digester*.jar,commons-fileupload*.jar,\
commons-httpclient*.jar,commons-io*.jar,commons-lang*.jar,commons-logging*.jar,\
commons-math*.jar,commons-pool*.jar,\
jstl.jar,taglibs-standard-spec-*.jar,\
geronimo-spec-jaxrpc*.jar,wsdl4j*.jar,\
ant.jar,ant-junit*.jar,aspectj*.jar,jmx.jar,h2*.jar,hibernate*.jar,httpclient*.jar,\
jmx-tools.jar,jta*.jar,log4j*.jar,mail*.jar,slf4j*.jar,\
xercesImpl.jar,xmlParserAPIs.jar,xml-apis.jar,\
junit.jar,junit-*.jar,hamcrest-*.jar,easymock-*.jar,cglib-*.jar,\
objenesis-*.jar,ant-launcher.jar,\
cobertura-*.jar,asm-*.jar,dom4j-*.jar,icu4j-*.jar,jaxen-*.jar,jdom-*.jar,\
jetty-*.jar,oro-*.jar,servlet-api-*.jar,tagsoup-*.jar,xmlParserAPIs-*.jar,\
xom-*.jar
# Default list of JAR files that should be scanned that overrides the default
# jarsToSkip list above. This is typically used to include a specific JAR that
# has been excluded by a broad file name pattern in the jarsToSkip list.
# The list of JARs to scan may be over-ridden at a Context level for individual
# scan types by configuring a JarScanner with a nested JarScanFilter.
tomcat.util.scan.StandardJarScanFilter.jarsToScan=\
log4j-web*.jar,log4j-taglib*.jar,log4javascript*.jar,slf4j-taglib*.jar
# String cache configuration.
tomcat.util.buf.StringCache.byte.enabled=true
#tomcat.util.buf.StringCache.char.enabled=true
#tomcat.util.buf.StringCache.trainThreshold=500000
#tomcat.util.buf.StringCache.cacheSize=5000
# This system property is deprecated. Use the relaxedPathChars relaxedQueryChars
# attributes of the Connector instead. These attributes permit a wider range of
# characters to be configured as valid.
# Allow for changes to HTTP request validation
# WARNING: Using this option may expose the server to CVE-2016-6816
#tomcat.util.http.parser.HttpParser.requestTargetAllow=|
#!/bin/bash
#
# An entrypoint script for Artifactory to allow custom setup before server starts
#
: ${ART_PRIMARY_BASE_URL:=http://artifactory-node1:8081/artifactory}
: ${ARTIFACTORY_USER_NAME:=artifactory}
: ${ARTIFACTORY_USER_ID:=1030}
: ${ARTIFACTORY_HOME:=/opt/jfrog/artifactory}
: ${ARTIFACTORY_DATA:=/var/opt/jfrog/artifactory}
: ${REPLICATOR_DATA:=${ARTIFACTORY_DATA}/replicator}
: ${METADATA_DATA:=${ARTIFACTORY_DATA}/metadata}
: ${REPLICATOR_ETC_FOLDER:=${REPLICATOR_DATA}/etc}
: ${METADATA_ETC_FOLDER:=${METADATA_DATA}/etc}
: ${ACCESS_ETC_FOLDER:=${ARTIFACTORY_DATA}/access/etc}
ART_ETC=$ARTIFACTORY_DATA/etc
DB_PROPS=${ART_ETC}/db.properties
ARTIFACTORY_MASTER_KEY_FILE=${ART_ETC}/security/master.key
: ${ARTIFACTORY_EXTRA_CONF:=/artifactory_extra_conf}
: ${ACCESS_EXTRA_CONF:=/access_extra_conf}
: ${REPLICATOR_EXTRA_CONF:=/replicator_extra_conf}
: ${RECOMMENDED_MAX_OPEN_FILES:=32000}
: ${MIN_MAX_OPEN_FILES:=10000}
: ${RECOMMENDED_MAX_OPEN_PROCESSES:=1024}
export ARTIFACTORY_PID=${ARTIFACTORY_HOME}/run/artifactory.pid
DEFAULT_SERVER_XML_ARTIFACTORY_PORT=8081
DEFAULT_SERVER_XML_ARTIFACTORY_MAX_THREADS=200
DEFAULT_SERVER_XML_ACCESS_MAX_THREADS=50
logger() {
DATE_TIME=$(date +"%Y-%m-%d %H:%M:%S")
if [ -z "$CONTEXT" ]
then
CONTEXT=$(caller)
fi
MESSAGE=$1
CONTEXT_LINE=$(echo "$CONTEXT" | awk '{print $1}')
CONTEXT_FILE=$(echo "$CONTEXT" | awk -F"/" '{print $NF}')
printf "%s %05s %s %s\n" "$DATE_TIME" "[$CONTEXT_LINE" "$CONTEXT_FILE]" "$MESSAGE"
CONTEXT=
}
errorExit () {
logger "ERROR: $1"; echo
exit 1
}
warn () {
logger "WARNING: $1"
}
# Print on container startup information about Dockerfile location
printDockerFileLocation() {
logger "Dockerfile for this image can found inside the container."
logger "To view the Dockerfile: 'cat /docker/artifactory-pro/Dockerfile.artifactory'."
}
# Check the max open files and open processes set on the system
checkULimits () {
logger "Checking open files and processes limits"
CURRENT_MAX_OPEN_FILES=$(ulimit -n)
logger "Current max open files is $CURRENT_MAX_OPEN_FILES"
if [ ${CURRENT_MAX_OPEN_FILES} != "unlimited" ] && [ "$CURRENT_MAX_OPEN_FILES" -lt "$RECOMMENDED_MAX_OPEN_FILES" ]; then
if [ "$CURRENT_MAX_OPEN_FILES" -lt "$MIN_MAX_OPEN_FILES" ]; then
errorExit "Max number of open files $CURRENT_MAX_OPEN_FILES, is too low. Cannot run Artifactory!"
fi
warn "Max number of open files $CURRENT_MAX_OPEN_FILES is low!"
warn "You should add the parameter '--ulimit nofile=${RECOMMENDED_MAX_OPEN_FILES}:${RECOMMENDED_MAX_OPEN_FILES}' to your the 'docker run' command."
fi
CURRENT_MAX_OPEN_PROCESSES=$(ulimit -u)
logger "Current max open processes is $CURRENT_MAX_OPEN_PROCESSES"
if [ "$CURRENT_MAX_OPEN_PROCESSES" != "unlimited" ] && [ "$CURRENT_MAX_OPEN_PROCESSES" -lt "$RECOMMENDED_MAX_OPEN_PROCESSES" ]; then
warn "Max number of processes $CURRENT_MAX_OPEN_PROCESSES is too low!"
warn "You should add the parameter '--ulimit noproc=${RECOMMENDED_MAX_OPEN_PROCESSES}:${RECOMMENDED_MAX_OPEN_PROCESSES}' to your the 'docker run' command."
fi
}
configureServerXml () {
logger "Customising Tomcat server.xml if needed"
# List of all available variables with default values if needed
server_xml_variables="""
SERVER_XML_ARTIFACTORY_PORT=${DEFAULT_SERVER_XML_ARTIFACTORY_PORT}
SERVER_XML_ARTIFACTORY_MAX_THREADS=${DEFAULT_SERVER_XML_ARTIFACTORY_MAX_THREADS}
SERVER_XML_ACCESS_MAX_THREADS=${DEFAULT_SERVER_XML_ACCESS_MAX_THREADS}
SERVER_XML_ARTIFACTORY_EXTRA_CONFIG
SERVER_XML_ACCESS_EXTRA_CONFIG
SERVER_XML_EXTRA_CONNECTOR
"""
if env | grep ^SERVER_XML > /dev/null; then
local server_xml=${ARTIFACTORY_HOME}/tomcat/conf/server.xml
local server_xml_template=${ARTIFACTORY_HOME}/server.xml.template
[ -f ${server_xml} ] || errorExit "${server_xml} not found"
[ -f ${server_xml_template} ] || errorExit "${server_xml_template} not found"
# Loop over all variables and replace with final value
for v in ${server_xml_variables}; do
key=$(echo ${v} | awk -F= '{print $1}')
default=$(echo ${v} | awk -F= '{print $2}')
value=${!key}
# Set final value (fall back to default if exists)
final_value=
if [ ! -z "${value}" ]; then
final_value=${value}
elif [ ! -z "${default}" ]; then
final_value=${default}
fi
# Log only if an actual value is found (not default)
if [ ! -z "${value}" ]; then
logger "Replacing ${key} with ${final_value}"
fi
sed -i "s,${key},${final_value},g" ${server_xml_template} || errorExit "Updating ${key} in ${server_xml_template} failed"
done
# Save original and replace with the template
if [ ! -f ${server_xml}.orig ]; then
logger "Saving ${server_xml} as ${server_xml}.orig"
mv -f ${server_xml} ${server_xml}.orig || errorExit "Moving ${server_xml} to ${server_xml}.orig failed"
fi
cp -f ${server_xml_template} ${server_xml} || errorExit "Copying ${server_xml_template} to ${server_xml} failed"
else
logger "No Tomcat server.xml customizations found"
fi
}
# Add additional conf files that were mounted to ARTIFACTORY_EXTRA_CONF
addExtraConfFiles () {
logger "Adding extra configuration files to ${ARTIFACTORY_HOME}/etc if any exist"
# If directory not empty
if [ -d "${ARTIFACTORY_EXTRA_CONF}" ] && [ "$(ls -A ${ARTIFACTORY_EXTRA_CONF})" ]; then
logger "Adding files from ${ARTIFACTORY_EXTRA_CONF} to ${ARTIFACTORY_HOME}/etc"
cp -rfv ${ARTIFACTORY_EXTRA_CONF}/* ${ARTIFACTORY_HOME}/etc || errorExit "Copy files from ${ARTIFACTORY_EXTRA_CONF} to ${ARTIFACTORY_HOME}/etc failed"
fi
}
# Add additional conf files that were mounted to ACCESS_EXTRA_CONF
addExtraAccessConfFiles () {
logger "Adding extra configuration files to ${ACCESS_ETC_FOLDER} if any exist"
# If directory not empty
if [ -d "${ACCESS_EXTRA_CONF}" ] && [ "$(ls -A ${ACCESS_EXTRA_CONF})" ]; then
logger "Adding files from ${ACCESS_EXTRA_CONF} to ${ACCESS_ETC_FOLDER}"
cp -rfv ${ACCESS_EXTRA_CONF}/* ${ACCESS_ETC_FOLDER} || errorExit "Copy files from ${ACCESS_EXTRA_CONF} to ${ACCESS_ETC_FOLDER} failed"
fi
}
# Add additional conf files that were mounted to REPLICATOR_EXTRA_CONF
addExtraReplicatorConfFiles () {
logger "Adding extra configuration files to ${REPLICATOR_ETC_FOLDER} if any exist"
# If directory not empty
if [ -d "${REPLICATOR_EXTRA_CONF}" ] && [ "$(ls -A ${REPLICATOR_EXTRA_CONF})" ]; then
logger "Adding files from ${REPLICATOR_EXTRA_CONF} to ${REPLICATOR_ETC_FOLDER}"
cp -rfv ${REPLICATOR_EXTRA_CONF}/* ${REPLICATOR_ETC_FOLDER} || errorExit "Copy files from ${REPLICATOR_EXTRA_CONF} to ${REPLICATOR_ETC_FOLDER} failed"
fi
}
testReadWritePermissions () {
local dir_to_check=$1
local error=false
[ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory"
local test_file=${dir_to_check}/test-permissions
# Write file
if echo test > ${test_file} 1> /dev/null 2>&1; then
# Write succeeded. Testing read...
if cat ${test_file} > /dev/null; then
rm -f ${test_file}
else
error=true
fi
else
error=true
fi
if [ ${error} == true ]; then
return 1
else
return 0
fi
}
# Test directory has read/write permissions for current user
testDirectoryPermissions () {
local dir_to_check=$1
local error=false
[ -d ${dir_to_check} ] || errorExit "'${dir_to_check}' is not a directory"
local id_str="'${ARTIFACTORY_USER_NAME}' (id ${ARTIFACTORY_USER_ID})"
local u_id=$(id -u)
if [ "${u_id}" != ${ARTIFACTORY_USER_ID} ]; then
id_str="id ${u_id}"
fi
logger "Testing directory ${dir_to_check} has read/write permissions for user ${id_str}"
if testReadWritePermissions ${dir_to_check}; then
# Checking also sub directories (in cases of upgrade + uid change)
local file_list=$(ls -1 ${dir_to_check})
if [ ! -z "${file_list}" ]; then
for d in ${file_list}; do
if [ -d ${dir_to_check}/${d} ]; then
testReadWritePermissions ${dir_to_check}/${d} || error=true
fi
done
fi
else
error=true
fi
if [ "${error}" == true ]; then
local stat_data=$(stat -Lc "Directory: %n, permissions: %a, owner: %U, group: %G" ${dir_to_check})
logger "###########################################################"
logger "${dir_to_check} DOES NOT have proper permissions for user ${id_str}"
logger "${stat_data}"
logger "Mounted directory must have read/write permissions for user ${id_str}"
logger "###########################################################"
errorExit "Directory ${dir_to_check} has bad permissions for user ${id_str}"
fi
logger "Permissions for ${dir_to_check} are good"
}
# In case data dirs are missing or not mounted, need to create them
setupDataDirs () {
logger "Setting up Artifactory data directories if missing"
if [ ! -d ${ARTIFACTORY_DATA}/etc ]; then
mkdir -pv ${ARTIFACTORY_DATA}/etc || errorExit "Failed creating ${ARTIFACTORY_DATA}/etc"
# Add extra conf files to a newly created etc/ only!
addExtraConfFiles
fi
[ -d ${ARTIFACTORY_DATA}/data ] || mkdir -pv ${ARTIFACTORY_DATA}/data || errorExit "Creating ${ARTIFACTORY_DATA}/data failed"
[ -d ${ARTIFACTORY_DATA}/logs ] || mkdir -pv ${ARTIFACTORY_DATA}/logs || errorExit "Creating ${ARTIFACTORY_DATA}/logs failed"
[ -d ${ARTIFACTORY_DATA}/backup ] || mkdir -pv ${ARTIFACTORY_DATA}/backup || errorExit "Creating ${ARTIFACTORY_DATA}/backup failed"
[ -d ${ARTIFACTORY_DATA}/access ] || mkdir -pv ${ARTIFACTORY_DATA}/access || errorExit "Creating ${ARTIFACTORY_DATA}/access failed"
[ -d ${ARTIFACTORY_DATA}/replicator ] || mkdir -pv ${ARTIFACTORY_DATA}/replicator || errorExit "Creating ${ARTIFACTORY_DATA}/replicator failed"
[ -d ${ARTIFACTORY_DATA}/metadata ] || mkdir -pv ${ARTIFACTORY_DATA}/metadata || errorExit "Creating ${ARTIFACTORY_DATA}/metadata failed"
[ -d ${ARTIFACTORY_HOME}/run ] || mkdir -pv ${ARTIFACTORY_HOME}/run || errorExit "Creating ${ARTIFACTORY_HOME}/run failed"
[ -d ${ARTIFACTORY_HOME}/etc/plugins ] || mkdir -pv ${ARTIFACTORY_HOME}/etc/plugins || errorExit "Creating ${ARTIFACTORY_HOME}/etc/plugins failed"
[ -d ${REPLICATOR_DATA} ] || mkdir -pv ${REPLICATOR_DATA} || errorExit "Creating ${REPLICATOR_DATA} failed"
}
# In case data dirs are missing or not mounted, need to create them
setupAccessDataDirs () {
logger "Setting up Access data directories if missing"
if [ ! -d ${ACCESS_ETC_FOLDER} ]; then
mkdir -pv ${ACCESS_ETC_FOLDER} || errorExit "Failed creating $ACCESS_ETC_FOLDER"
fi
# Add extra conf files to a newly created Access etc/ only!
addExtraAccessConfFiles
}
# In case data dirs are missing or not mounted, need to create them
setupReplicatorDataDirs () {
logger "Setting up Replicator data directories if missing"
if [ ! -d ${REPLICATOR_ETC_FOLDER} ]; then
mkdir -pv ${REPLICATOR_ETC_FOLDER} || errorExit "Failed creating $REPLICATOR_ETC_FOLDER"
fi
# Add extra conf files to a newly created Replicator etc/ only!
addExtraReplicatorConfFiles
}
# In case data dirs are missing or not mounted, need to create them
setupMetadataDataDirs () {
logger "Setting up Metadata data directories if missing"
if [ ! -d ${METADATA_ETC_FOLDER} ]; then
mkdir -pv ${METADATA_ETC_FOLDER} || errorExit "Failed creating $METADATA_ETC_FOLDER"
fi
# Add extra conf files to a newly created Metadata etc/ only!
addExtraMetadataConfFiles
}
# Generate an artifactory.config.import.yml if parameters passed
# Only if artifactory.config.import.yml does not already exist!
prepareArtConfigYaml () {
local artifactory_config_import_yml=${ARTIFACTORY_DATA}/etc/artifactory.config.import.yml
if [ ! -f ${artifactory_config_import_yml} ]; then
if [ -n "$AUTO_GEN_REPOS" ] || [ -n "$ART_BASE_URL" ] || [ -n "$ART_LICENSE" ]; then
# Make sure license is provided (must be passed in Pro)
if [ -z "$ART_LICENSE" ]; then
errorExit "To use the feature of auto configuration, you must pass a valid Artifactory license as an ART_LICENSE environment variable!"
fi
logger "Generating ${artifactory_config_import_yml}"
[ -n "$ART_LICENSE" ] && LIC_STR="licenseKey: $ART_LICENSE"
[ -n "$ART_BASE_URL" ] && BASE_URL_STR="baseUrl: $ART_BASE_URL"
[ -n "$AUTO_GEN_REPOS" ] && GEN_REPOS_STR="repoTypes:"
cat <<EY1 > "$artifactory_config_import_yml"
version: 1
GeneralConfiguration:
${LIC_STR}
${BASE_URL_STR}
EY1
if [ -n "$GEN_REPOS_STR" ]; then
cat <<EY2 >> "$artifactory_config_import_yml"
OnboardingConfiguration:
${GEN_REPOS_STR}
EY2
for repo in $(echo ${AUTO_GEN_REPOS} | tr ',' ' '); do
cat <<EY3 >> "$artifactory_config_import_yml"
- ${repo}
EY3
done
fi
fi
fi
}
# Do the actual permission check and chown
checkOwner () {
local file_to_check=$1
local user_to_check=$2
local group_to_check=$3
logger "Checking permissions on $file_to_check"
local stat=( $(stat -Lc "%U %G" ${file_to_check}) )
local USER=${stat[0]}
local GROUP=${stat[1]}
if [[ ${USER} != "$user_to_check" ]] || [[ ${GROUP} != "$group_to_check" ]] ; then
errorExit "${file_to_check} is not owned by ${user_to_check}"
else
logger "$file_to_check is already owned by $user_to_check:$group_to_check."
fi
}
setAccessCreds() {
ACCESS_SOURCE_IP_ALLOWED=${ACCESS_SOURCE_IP_ALLOWED:-127.0.0.1}
ACCESS_CREDS_FILE=${ART_ETC}/security/access/keys/access.creds
BOOTSTRAP_CREDS_FILE=${ACCESS_ETC_FOLDER}/bootstrap.creds
if [ ! -z "${ACCESS_USER}" ] && [ ! -z "${ACCESS_PASSWORD}" ] && [ ! -f ${ACCESS_CREDS_FILE} ] ; then
logger "Creating bootstrap.creds using ACCESS_USER and ACCESS_PASSWORD env variables"
mkdir -p ${ACCESS_ETC_FOLDER} || errorExit "Creating ${ACCESS_ETC_FOLDER} failed"
echo "${ACCESS_USER}@${ACCESS_SOURCE_IP_ALLOWED}=${ACCESS_PASSWORD}" > ${BOOTSTRAP_CREDS_FILE}
chmod 600 ${BOOTSTRAP_CREDS_FILE} || errorExit "Setting permission on ${BOOTSTRAP_CREDS_FILE} failed"
fi
}
setMasterKey() {
ARTIFACTORY_SECURITY_FOLDER=${ART_ETC}/security
if [ ! -z "${ARTIFACTORY_MASTER_KEY}" ] && [ ! -f "${ARTIFACTORY_MASTER_KEY_FILE}" ] ; then
logger "Creating master.key using ARTIFACTORY_MASTER_KEY environment variable"
mkdir -pv ${ARTIFACTORY_SECURITY_FOLDER} || errorExit "Creating folder ${ARTIFACTORY_SECURITY_FOLDER} failed"
echo "${ARTIFACTORY_MASTER_KEY}" > "${ARTIFACTORY_MASTER_KEY_FILE}"
fi
if [ -f "${ARTIFACTORY_MASTER_KEY_FILE}" ] ; then
chmod 600 ${ARTIFACTORY_MASTER_KEY_FILE} || errorExit "Setting permission on ${ARTIFACTORY_MASTER_KEY_FILE} failed"
fi
}
testPermissions () {
# ARTIFACTORY_DATA folder
testDirectoryPermissions ${ARTIFACTORY_DATA}
# HA_DATA_DIR (If running in HA mode)
if [ -d "${HA_DATA_DIR}" ]; then
testDirectoryPermissions ${HA_DATA_DIR}
fi
# HA_BACKUP_DIR (If running in HA mode)
if [ -d "${HA_BACKUP_DIR}" ]; then
testDirectoryPermissions ${HA_BACKUP_DIR}
fi
}
# Wait for primary node if needed
waitForPrimaryNode () {
logger "Waiting for primary node to be up"
logger "Running wget --server-response --quiet $ART_PRIMARY_BASE_URL/webapp/#/login 2>&1 | awk '/^ HTTP/{print \$2}'"
while [[ ! $(wget --server-response --quiet $ART_PRIMARY_BASE_URL/webapp/#/login 2>&1 | awk '/^ HTTP/{print $2}') =~ ^(2|4)[0-9]{2} ]]; do
logger "."
sleep 4
done
logger "Primary node ($ART_PRIMARY_BASE_URL) is up!"
}
# Wait for existence of etc/security/master.key
waitForMasterKey () {
logger "Waiting for $ARTIFACTORY_MASTER_KEY_FILE"
while [ ! -f ${ARTIFACTORY_MASTER_KEY_FILE} ]; do
logger "."
sleep 4
done
logger "$ARTIFACTORY_MASTER_KEY_FILE exists! Checking ${ARTIFACTORY_USER_NAME} is owner"
# Make sure file is owned by artifactory
chmod 600 ${ARTIFACTORY_MASTER_KEY_FILE} || errorExit "Setting owner of ${ARTIFACTORY_MASTER_KEY_FILE} to ${ARTIFACTORY_USER_NAME} failed"
checkOwner ${ARTIFACTORY_MASTER_KEY_FILE} ${ARTIFACTORY_USER_NAME} ${ARTIFACTORY_USER_NAME}
}
# Wait for DB port to be accessible
waitForDB () {
local PROPS_FILE=$1
local DB_TYPE=$2
[ -f "$PROPS_FILE" ] || errorExit "$PROPS_FILE does not exist"
local DB_HOST_PORT=
local TIMEOUT=30
local COUNTER=0
# Extract DB host and port
case "${DB_TYPE}" in
postgresql|mysql|mariadb)
DB_HOST_PORT=$(grep -e '^url=' "$PROPS_FILE" | sed -e 's,^.*:\/\/\(.*\)\/.*,\1,g' | tr ':' '/')
;;
oracle)
DB_HOST_PORT=$(grep -e '^url=' "$PROPS_FILE" | sed -e 's,.*@\(.*\):.*,\1,g' | tr ':' '/')
;;
mssql)
DB_HOST_PORT=$(grep -e '^url=' "$PROPS_FILE" | sed -e 's,^.*:\/\/\(.*\);databaseName.*,\1,g' | tr ':' '/')
;;
*)
errorExit "DB_TYPE $DB_TYPE not supported"
;;
esac
logger "Waiting for DB $DB_TYPE to be ready on $DB_HOST_PORT within $TIMEOUT seconds"
while [ $COUNTER -lt $TIMEOUT ]; do
(</dev/tcp/$DB_HOST_PORT) 2>/dev/null
if [ $? -eq 0 ]; then
logger "DB $DB_TYPE up in $COUNTER seconds"
return 1
else
logger "."
sleep 1
fi
let COUNTER=$COUNTER+1
done
return 0
}
# Set and configure DB type
setupHA () {
# Check if HA (if one HA_XXX is set)
if [ -n "$HA_NODE_ID" ] || [ -n "$HA_IS_PRIMARY" ]; then
logger "Detected an Artifactory HA setup"
if [ -z "$HA_NODE_ID" ]; then
logger "HA_NODE_ID not set. Generating"
HA_NODE_ID="node-$(hostname)"
logger "HA_NODE_ID set to $HA_NODE_ID"
fi
if [ -z "$HA_IS_PRIMARY" ]; then
errorExit "To setup Artifactory HA, you must set the HA_IS_PRIMARY environment variable"
fi
if [ -z "$HA_DATA_DIR" ]; then
warn "HA_DATA_DIR is not set, Artifactory will use local data folder"
HA_DATA_DIR="$ARTIFACTORY_DATA/data"
fi
if [ -z "$HA_BACKUP_DIR" ]; then
warn "HA_BACKUP_DIR is not set, Artifactory will use local backup folder"
HA_BACKUP_DIR="$ARTIFACTORY_DATA/backup"
fi
if [ -z "$HA_MEMBERSHIP_PORT" ]; then
HA_MEMBERSHIP_PORT=10002
fi
if [ -z "${HA_HOST_IP}" ]; then
HA_HOST_IP=$(hostname -i)
fi
logger "HA_HOST_IP is set to ${HA_HOST_IP}"
if [ -z "$HA_CONTEXT_URL" ]; then
warn "HA_CONTEXT_URL is missing, using HA_HOST_IP as context url"
HA_CONTEXT_URL=http://$HA_HOST_IP:8081/artifactory
fi
# If this is not the primary node, make sure the primary node's URL is passed and wait for it before proceeding
if [[ $HA_IS_PRIMARY =~ false ]]; then
logger "This is not the primary node. Must wait for primary node before starting"
waitForPrimaryNode
fi
# Wait for etc/security/master.key (only on non-primary nodes)
if [[ $HA_IS_PRIMARY =~ false ]] && [ "$HA_WAIT_FOR_MASTER_KEY" == "true" ]; then
logger "HA_WAIT_FOR_MASTER_KEY set. Waiting for ${ARTIFACTORY_MASTER_KEY_FILE} existence"
waitForMasterKey
fi
# Install license file if exists in /tmp
if ls /tmp/art*.lic 1> /dev/null 2>&1; then
logger "Found /tmp/art*.lic. Using it..."
cp -v /tmp/art*.lic $ART_ETC/artifactory.lic
fi
# Start preparing the HA setup if not already exists
if [ ! -f "$ART_ETC/ha-node.properties" ]; then
logger "Preparing $ART_ETC/ha-node.properties"
cat <<EOF > "$ART_ETC/ha-node.properties"
node.id=$HA_NODE_ID
context.url=$HA_CONTEXT_URL
membership.port=$HA_MEMBERSHIP_PORT
primary=$HA_IS_PRIMARY
hazelcast.interface=$HA_HOST_IP
EOF
if [ -n "$HA_DATA_DIR" ] && [ -n "$HA_BACKUP_DIR" ] ; then
echo "artifactory.ha.data.dir=$HA_DATA_DIR" >> "$ART_ETC/ha-node.properties"
echo "artifactory.ha.backup.dir=$HA_BACKUP_DIR" >> "$ART_ETC/ha-node.properties"
fi
else
# Update existing for the case the IP changed
logger "$ART_ETC/ha-node.properties already exists. Making sure properties with IP are updated correctly"
sed -i "s,^context.url=.*,context.url=$HA_CONTEXT_URL,g" $ART_ETC/ha-node.properties || errorExit "Updating $ART_ETC/ha-node.properties with context.url failed"
sed -i "s,^hazelcast.interface=.*,hazelcast.interface=$HA_HOST_IP,g" $ART_ETC/ha-node.properties || errorExit "Updating $ART_ETC/ha-node.properties with hazelcast.interface failed"
fi
logger "Content of $ART_ETC/ha-node.properties:"
cat $ART_ETC/ha-node.properties; echo
fi
}
setMaxDBConnections () {
logger "Setting DB max connections if needed"
if [ ! -z "${DB_POOL_MAX_ACTIVE}" ]; then
# Check DB_POOL_MAX_ACTIVE is a valid positive integer
[[ ${DB_POOL_MAX_ACTIVE} =~ ^[0-9]+$ ]] || errorExit "DB_POOL_MAX_ACTIVE (${DB_POOL_MAX_ACTIVE}) is not a valid number"
logger "Setting pool.max.active=${DB_POOL_MAX_ACTIVE}"
# Just in case file already has the property
grep pool.max.active ${DB_PROPS} > /dev/null 2>&1
if [ $? -eq 0 ]; then
sed -i "s,pool.max.active=.*,pool.max.active=${DB_POOL_MAX_ACTIVE},g" ${DB_PROPS} || errorExit "Updating pool.max.active=${DB_POOL_MAX_ACTIVE} in ${DB_PROPS} failed"
else
echo "pool.max.active=${DB_POOL_MAX_ACTIVE}" >> ${DB_PROPS} || errorExit "Writing pool.max.active=${DB_POOL_MAX_ACTIVE} to ${DB_PROPS} failed"
fi
if [ ! -z "${DB_POOL_MAX_IDLE}" ]; then
# Check DB_POOL_MAX_IDLE is a valid positive integer
[[ ${DB_POOL_MAX_IDLE} =~ ^[0-9]+$ ]] || errorExit "DB_POOL_MAX_IDLE (${DB_POOL_MAX_IDLE}) is not a valid number"
# Make sure DB_POOL_MAX_ACTIVE > DB_POOL_MAX_IDLE
[ ${DB_POOL_MAX_ACTIVE} -gt ${DB_POOL_MAX_IDLE} ] || errorExit "DB_POOL_MAX_ACTIVE (${DB_POOL_MAX_ACTIVE}) must be higher than DB_POOL_MAX_IDLE (${DB_POOL_MAX_IDLE})"
logger "Setting pool.max.idle=${DB_POOL_MAX_IDLE}"
else
logger "DB_POOL_MAX_IDLE not set. Setting pool.max.idle to 1/10 of pool.max.active"
DB_POOL_MAX_IDLE=$(( ${DB_POOL_MAX_ACTIVE} / 10 ))
logger "Setting pool.max.idle=${DB_POOL_MAX_IDLE}"
fi
# Just in case file already has the property
grep pool.max.idle ${DB_PROPS} > /dev/null 2>&1
if [ $? -eq 0 ]; then
sed -i "s,pool.max.idle=.*,pool.max.idle=${DB_POOL_MAX_IDLE},g" ${DB_PROPS} || errorExit "Updating pool.max.idle=${DB_POOL_MAX_IDLE} in ${DB_PROPS} failed"
else
echo "pool.max.idle=${DB_POOL_MAX_IDLE}" >> ${DB_PROPS} || errorExit "Writing pool.max.idle=${DB_POOL_MAX_IDLE} to ${DB_PROPS} failed"
fi
else
logger "Not needed. DB_POOL_MAX_ACTIVE not set"
fi
}
# Check DB type configurations before starting Artifactory
setDBConf () {
# Set DB_HOST
if [ -z "$DB_HOST" ]; then
DB_HOST=$DB_TYPE
fi
logger "DB_HOST is set to $DB_HOST"
logger "Checking if need to copy $DB_TYPE configuration"
# If already exists, just make sure it's configured for postgres
if [ -f ${DB_PROPS} ]; then
logger "${DB_PROPS} already exists. Making sure it's set to $DB_TYPE... "
grep type=$DB_TYPE ${DB_PROPS} > /dev/null
if [ $? -eq 0 ]; then
logger "${DB_PROPS} already set to $DB_TYPE"
else
errorExit "${DB_PROPS} already exists and is set to a DB different than $DB_TYPE"
fi
else
NEED_COPY=true
fi
# On a new install and startup, need to make the initial copy before Artifactory starts
if [ "$NEED_COPY" == "true" ]; then
logger "Copying $DB_TYPE configuration... "
cp ${ARTIFACTORY_HOME}/misc/db/$DB_TYPE.properties ${DB_PROPS} || errorExit "Copying $ARTIFACTORY_HOME/misc/db/$DB_TYPE.properties to ${DB_PROPS} failed"
sed -i "s/localhost/$DB_HOST/g" ${DB_PROPS}
# Set custom DB parameters if specified
if [ ! -z "$DB_USER" ]; then
logger "Setting DB_USER to $DB_USER"
sed -i "s/username=.*/username=$DB_USER/g" ${DB_PROPS}
fi
if [ ! -z "$DB_PASSWORD" ]; then
logger "Setting DB_PASSWORD to **********"
sed -i "s/password=.*/password=$DB_PASSWORD/g" ${DB_PROPS}
fi
# Set the URL depending on what parameters are passed
if [ ! -z "$DB_URL" ]; then
logger "Setting DB_URL to $DB_URL (ignoring DB_HOST and DB_PORT if set)"
# Escape any & signs (so sed will not get messed up)
DB_URL=$(echo -n ${DB_URL} | sed "s|&|\\\\&|g")
sed -i "s|url=.*|url=$DB_URL|g" ${DB_PROPS}
else
if [ ! -z "$DB_PORT" ]; then
logger "Setting DB_PORT to $DB_PORT"
case "$DB_TYPE" in
mysql|postgresql|mariadb)
oldPort=$(grep -E "(url).*" ${DB_PROPS} | awk -F":" '{print $4}' | awk -F"/" '{print $1}')
;;
oracle)
oldPort=$(grep -E "(url).*" ${DB_PROPS} | awk -F":" '{print $5}')
;;
mssql)
oldPort=$(grep -E "(url).*" ${DB_PROPS} | awk -F":" '{print $4}' | awk -F";" '{print $1}')
;;
esac
sed -i "s/$oldPort/$DB_PORT/g" ${DB_PROPS}
fi
if [ ! -z "$DB_HOST" ]; then
logger "Setting DB_HOST to $DB_HOST"
case "$DB_TYPE" in
mysql|postgresql|mssql|mariadb)
oldHost=$(grep -E "(url).*" ${DB_PROPS} | awk -F"//" '{print $2}' | awk -F":" '{print $1}')
;;
oracle)
oldHost=$(grep -E "(url).*" ${DB_PROPS} | awk -F"@" '{print $2}' | awk -F":" '{print $1}')
;;
esac
sed -i "s/$oldHost/$DB_HOST/g" ${DB_PROPS}
fi
fi
fi
}
# Set and configure DB type
setDBType () {
logger "Checking DB_TYPE"
if [ -f "${ART_ETC}/.secrets/.temp.db.properties" ]
then
SECRET_DB_PROPS_FILE="${ART_ETC}/.secrets/.temp.db.properties"
logger "Detected secret db.properties file at ${SECRET_DB_PROPS_FILE}. Secret file will override default db.properties file as well as environment variables."
DB_TYPE_FROM_SECRET=$(grep -E "(type).*" "$SECRET_DB_PROPS_FILE" | awk -F"=" '{ print $2 }')
if [[ "$DB_TYPE_FROM_SECRET" =~ ^(postgresql|mysql|oracle|mssql|mariadb)$ ]]; then DB_TYPE=${DB_TYPE_FROM_SECRET} ; fi
fi
if [ ! -z "${DB_TYPE}" ] && [ "${DB_TYPE}" != derby ]; then
logger "DB_TYPE is set to $DB_TYPE"
NEED_COPY=false
DB_PROPS=${ART_ETC}/db.properties
if [ ! -z "$SECRET_DB_PROPS_FILE" ]
then
DB_PROPS=${SECRET_DB_PROPS_FILE}
logger "DB_PROPS set to: ${DB_PROPS}"
fi
setDBConf
# Wait for DB
# On slow systems, when working with docker-compose, the DB container might be up,
# but not ready to accept connections when Artifactory is already trying to access it.
if [[ ! "$HA_IS_PRIMARY" =~ false ]]; then
waitForDB "$DB_PROPS" "$DB_TYPE"
[ $? -eq 1 ] || errorExit "DB $DB_TYPE failed to start in the given time"
fi
fi
if [ -z "${DB_TYPE}" ] || [ "${DB_TYPE}" == derby ]; then
logger "Artifactory will use embedded Derby DB"
# In case db.properties is missing, create a Derby default db.properties
if [ ! -f ${DB_PROPS} ]; then
logger "${DB_PROPS} does not exist. Creating it with defaults"
cat << DB_P > "${DB_PROPS}"
# File auto generated by Docker entrypoint
type=derby
url=jdbc:derby:{db.home};create=true
driver=org.apache.derby.jdbc.EmbeddedDriver
DB_P
fi
fi
setMaxDBConnections
}
addCertsToJavaKeystore () {
logger "Adding extra certificates to Java keystore if exist"
local certs_dir=/artifactory_extra_certs
local certs_list=$(ls -1 ${certs_dir})
local cert=
if [ ! -z "${certs_list}" ]; then
local java_bin=$(dirname $(readlink $(which java)))
for d in ${certs_list}; do
cert=${certs_dir}/${d}
# Skip directories
[ -d ${cert} ] && continue
# Check if alias already installed
${java_bin}/keytool -list -cacerts -storepass changeit -alias ${d} > /dev/null 2>&1
if [ $? -ne 0 ]; then
logger "Adding ${cert} to Java cacerts"
${java_bin}/keytool -importcert -trustcacerts -noprompt -cacerts -storepass changeit -file ${cert} -alias "${d}"
[ $? -eq 0 ] || warn "Adding ${cert} failed!"
else
logger "Certificate ${d} already in Java cacerts"
fi
done
else
logger "No extra certificates found"
fi
}
addExtraJavaArgs () {
logger "Adding EXTRA_JAVA_OPTIONS if exist"
if [ ! -z "${EXTRA_JAVA_OPTIONS}" ] && [ ! -f ${ARTIFACTORY_HOME}/bin/artifactory.default.origin ]; then
logger "Adding EXTRA_JAVA_OPTIONS ${EXTRA_JAVA_OPTIONS}"
cp -v ${ARTIFACTORY_HOME}/bin/artifactory.default ${ARTIFACTORY_HOME}/bin/artifactory.default.origin || errorExit "Copy ${ARTIFACTORY_HOME}/bin/artifactory.default to ${ARTIFACTORY_HOME}/bin/artifactory.default.origin failed"
echo "export JAVA_OPTIONS=\"\$JAVA_OPTIONS ${EXTRA_JAVA_OPTIONS}\"" >> ${ARTIFACTORY_HOME}/bin/artifactory.default || errorExit "Update ${ARTIFACTORY_HOME}/bin/artifactory.default failed"
fi
}
addPlugins () {
logger "Adding plugins if exist"
cp -fv /tmp/plugins/* ${ARTIFACTORY_HOME}/etc/plugins/
}
terminate () {
echo -e "\nTerminating Artifactory"
${ARTIFACTORY_HOME}/bin/artifactory.sh stop
}
# Catch Ctrl+C and other termination signals to try graceful shutdown
trap terminate SIGINT SIGTERM SIGHUP
logger "Preparing to run Artifactory in Docker"
logger "Running as $(id)"
printDockerFileLocation
checkULimits
testPermissions
setupDataDirs
setupAccessDataDirs
setupReplicatorDataDirs
prepareArtConfigYaml
addPlugins
setAccessCreds
setMasterKey
setupHA
setDBType
configureServerXml
addCertsToJavaKeystore
addExtraJavaArgs
logger "Setup done. Running Artifactory"
# Run Artifactory as ARTIFACTORY_USER_NAME user
exec ${ARTIFACTORY_HOME}/bin/artifactory.sh &
art_pid=$!
echo ${art_pid} > ${ARTIFACTORY_PID}
wait ${art_pid}
artifactory@8351c57923e6:/opt/jfrog/artifactory/tomcat/conf$ cat server.xml
<!--
~ Artifactory is a binaries repository manager.
~ Copyright (C) 2018 JFrog Ltd.
~
~ Artifactory is free software: you can redistribute it and/or modify
~ it under the terms of the GNU Affero General Public License as published by
~ the Free Software Foundation, either version 3 of the License, or
~ (at your option) any later version.
~
~ Artifactory is distributed in the hope that it will be useful,
~ but WITHOUT ANY WARRANTY; without even the implied warranty of
~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
~ GNU Affero General Public License for more details.
~
~ You should have received a copy of the GNU Affero General Public License
~ along with Artifactory. If not, see <http://www.gnu.org/licenses/>.
-->
<Server port="8015" shutdown="SHUTDOWN">
<Service name="Catalina">
<Connector port="8081" sendReasonPhrase="true" relaxedPathChars='[]' relaxedQueryChars='[]' maxThreads="200"/>
<!-- Must be at least the value of artifactory.access.client.max.connections -->
<Connector port="8040" sendReasonPhrase="true" maxThreads="50"/>
<Engine name="Catalina" defaultHost="localhost">
<Host name="localhost" appBase="webapps" startStopThreads="2"/>
</Engine>
</Service>
</Server>
<!--
~ Artifactory is a binaries repository manager.
~ Copyright (C) 2018 JFrog Ltd.
~
~ Artifactory is free software: you can redistribute it and/or modify
~ it under the terms of the GNU Affero General Public License as published by
~ the Free Software Foundation, either version 3 of the License, or
~ (at your option) any later version.
~
~ Artifactory is distributed in the hope that it will be useful,
~ but WITHOUT ANY WARRANTY; without even the implied warranty of
~ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
~ GNU Affero General Public License for more details.
~
~ You should have received a copy of the GNU Affero General Public License
~ along with Artifactory. If not, see <http://www.gnu.org/licenses/>.
-->
<Server port="8015" shutdown="SHUTDOWN">
<Service name="Catalina">
<Connector port="SERVER_XML_ARTIFACTORY_PORT" sendReasonPhrase="true" relaxedPathChars='[]' relaxedQueryChars='[]' maxThreads="SERVER_XML_ARTIFACTORY_MAX_THREADS" SERVER_XML_ARTIFACTORY_EXTRA_CONFIG/>
<!-- Must be at least the value of artifactory.access.client.max.connections -->
<Connector port="8040" sendReasonPhrase="true" maxThreads="SERVER_XML_ACCESS_MAX_THREADS" SERVER_XML_ACCESS_EXTRA_CONFIG/>
SERVER_XML_EXTRA_CONNECTOR
<Engine name="Catalina" defaultHost="localhost">
<Host name="localhost" appBase="webapps" startStopThreads="2"/>
</Engine>
</Service>
</Server>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment