Skip to content

Instantly share code, notes, and snippets.

@srirajk
Created April 16, 2018 20:31
Show Gist options
  • Save srirajk/f8aefed9d0b43a52e2de76f88d8a737e to your computer and use it in GitHub Desktop.
Save srirajk/f8aefed9d0b43a52e2de76f88d8a737e to your computer and use it in GitHub Desktop.
kylo-services
###
# #%L
# thinkbig-service-app
# %%
# Copyright (C) 2017 ThinkBig Analytics
# %%
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# #L%
###
###
# Other properties are in the services-app/application.properties
#
# Add extra profiles by setting "spring.profiles.active=<profile-name>" property on command line, e.g.
# -Dspring.profiles.active=hdp24,gmail,cdh
# Extra profiles will add to this set of profiles and override properties given in this file
### Indicate the NiFi version you are using with the correct spring profile.
### - For NiFi 1.0.x: nifi-v1
### - For NiFi 1.1.x: nifi-v1.1
### - For NiFi 1.2.x or 1.3.x or 1.4.x or 1.5.x: nifi-v1.2
spring.profiles.include=native,nifi-v1.2,auth-kylo,auth-simple,search-esr,jms-activemq
# Spring Datasource properties for spring batch and the default data source
# NOTE: Cloudera default password for root access to mysql is "cloudera"
#
spring.datasource.url=jdbc:mysql://localhost:3306/kylo
spring.datasource.username=root
spring.datasource.password=hadoop
spring.datasource.maxActive=30
spring.datasource.validationQuery=SELECT 1
spring.datasource.testOnBorrow=true
spring.datasource.driverClassName=org.mariadb.jdbc.Driver
spring.jpa.database-platform=org.hibernate.dialect.MySQL5InnoDBDialect
spring.jpa.open-in-view=true
#
#Postgres datasource configuration
#
#spring.datasource.url=jdbc:postgresql://localhost:5432/pipeline_db
#spring.datasource.driverClassName=org.postgresql.Driver
#spring.datasource.username=
#spring.datasource.password=
#spring.jpa.database-platform=org.hibernate.dialect.PostgreSQLDialect
# Entity-level access control. To enable, uncomment below line and set value as true.
# WARNING: Enabling entity access control is a one-way operation; you will not be able
# to set this poperty back to "false" once Kylo is started with this value as "true".
#security.entity.access.controlled=false
###
# Authentication settings:
#
# Currently available athentication/authorization Spring profiles:
# * auth-kylo - Users are authenticated if they exist in the Kylo user store and any
# groups associated with the user are retrieved for access control.
# This profile is usually used in conjuction with other auth profiles
# (auth-ldap, auth-ad, etc.)
# * auth-ldap - authenticates users using LDAP and optionally loads any associated
# user groups
# * auth-ad - authenticates users using Active Directory and loads any associated
# user groups
# * auth-simple - Uses authenticationService.username and authenticationService.password
# for authentication (development only)
# * auth-file - Uses users.properties and roles.properties files for authentication
# and role assignment (generally for development only)
## auth-file: If this profile is active then these optional properties may be used:
#security.auth.file.users=file:///Users/s/kylo/users.properties
#security.auth.file.groups=file:///Users/s/kylo/groups.properties
#security.auth.file.users=file:///opt/kylo/users.properties
#security.auth.file.groups=file:///opt/kylo/groups.properties
#security.auth.file.password.hash.enabled=false
#security.auth.file.password.hash.algorithm=MD5
#security.auth.file.password.hash.encoding=base64
## auth-simple: If this profile is active these authenticationService properties are used:
authenticationService.username=dladmin
#authenticationService.password={cipher}52fd39e4e4f7d0f6a91989efbfa870f1a543550401e6ab0b17f3059c1ada9b5f
authenticationService.password=thinkbig
## auth-ldap: If this profile is active then these properties should uncommented and updated appropriately
#security.auth.ldap.server.uri=ldap://localhost:52389/dc=example,dc=com
#security.auth.ldap.server.authDn=
#security.auth.ldap.server.password=
### user DN patterns are separated by '|'
#security.auth.ldap.authenticator.userDnPatterns=uid={0},ou=people
#security.auth.ldap.user.enableGroups=true
#security.auth.ldap.user.groupNameAttr=ou
#security.auth.ldap.user.groupsBase=ou=groups
## auth-ad: If this profile is active then these properties should uncommented and updated appropriately
#security.auth.ad.server.uri=ldap://example.com/
#security.auth.ad.server.domain=test.example.com
#security.auth.ad.server.searchFilter=(&(objectClass=user)(sAMAccountName={1}))
#security.auth.ad.user.enableGroups=true
## group attribute patterns are separated by '|'
#security.auth.ad.user.groupAttributes=
#
# Server port
#
server.port=8420
# server.port=8443
##### ADD IF SSL is needed
###
#server.ssl.key-store=
#server.ssl.key-store-password=
#server.ssl.key-store-type=jks
#server.ssl.trust-store=
#server.ssl.trust-store-password=
#server.ssl.trust-store-type=JKS
#
# General configuration - Note: Supported configurations include STANDALONE, BUFFER_NODE_ONLY, BUFFER_NODE, EDGE_NODE
#
application.mode=STANDALONE
#
# Turn on debug mode to display more verbose error messages in the UI
#
application.debug=true
#
# Prevents execution of jobs at startup. Change to true, and the name of the job that should
# be run at startup if we want that behavior
#
spring.batch.job.enabled=false
spring.batch.job.names=
#spring.jpa.show-sql=true
#spring.jpa.hibernate.ddl-auto=validate
metadata.datasource.driverClassName=${spring.datasource.driverClassName}
metadata.datasource.url=${spring.datasource.url}
metadata.datasource.username=${spring.datasource.username}
metadata.datasource.password=${spring.datasource.password}
metadata.datasource.validationQuery=SELECT 1
metadata.datasource.testOnBorrow=true
hive.userImpersonation.enabled=false
hive.userImpersonation.cache.expiry.duration=4
#hive.userImpersonation.cache.expiry.time-unit can be one of TimeUnit.java values, e.g. SECONDS, MINUTES, HOURS, DAYS
hive.userImpersonation.cache.expiry.time-unit=HOURS
hive.datasource.driverClassName=org.apache.hive.jdbc.HiveDriver
hive.datasource.url=jdbc:hive2://localhost:10000/default
hive.datasource.username=kylo
hive.datasource.password=
hive.datasource.validationQuery=show tables 'test'
##Also Clouder url should be /metastore instead of /hive
hive.metastore.datasource.driverClassName=org.mariadb.jdbc.Driver
hive.metastore.datasource.url=jdbc:mysql://localhost:3306/hive
#hive.metastore.datasource.url=jdbc:mysql://localhost:3306/metastore
hive.metastore.datasource.username=root
hive.metastore.datasource.password=hadoop
hive.metastore.datasource.validationQuery=SELECT 1
hive.metastore.datasource.testOnBorrow=true
modeshape.datasource.driverClassName=${spring.datasource.driverClassName}
modeshape.datasource.url=${spring.datasource.url}
modeshape.datasource.username=${spring.datasource.username}
modeshape.datasource.password=${spring.datasource.password}
modeshape.index.dir=/local
nifi.rest.host=localhost
nifi.rest.port=8079
###
# NiFi Https configuration below
#
# Follow directions here: http://kylo.readthedocs.io/en/v0.8.2/how-to-guides/ConfigureNiFiWithSSL.html to setup certificates and properties in NiFi
#
### The port should match the port found in the /opt/nifi/current/conf/nifi.properties (nifi.web.https.port)
#nifi.rest.port=9443
#nifi.rest.https=true
#nifi.rest.useConnectionPooling=false
#nifi.rest.truststorePath=
#####the truststore password below needs to match that found in the nifi.properties file (nifi.security.truststorePasswd)
#nifi.rest.truststorePassword=
#nifi.rest.truststoreType=JKS
#nifi.rest.keystorePath=
###value found in the .password file /opt/nifi/data/ssl/CN=kylo_OU=NIFI.password
#nifi.rest.keystorePassword=
#nifi.rest.keystoreType=PKCS12
#
kerberos.hive.kerberosEnabled=false
#kerberos.hive.hadoopConfigurationResources=/etc/hadoop/conf/core-site.xml,/etc/hadoop/conf/hdfs-site.xml
#kerberos.hive.kerberosPrincipal=hive/sandbox.hortonworks.com
#kerberos.hive.keytabLocation=/etc/security/keytabs/hive-thinkbig.headless.keytab
## used to map Nifi Controller Service connections to the User Interface
## naming convention for the property is nifi.service.NIFI_CONTROLLER_SERVICE_NAME.NIFI_PROPERTY_NAME
##anything prefixed with nifi.service will be used by the UI. Replace Spaces with underscores and make it lowercase.
nifi.service.mysql.database_user=root
nifi.service.mysql.password=hadoop
#Kylo MySQL controller service configuration
nifi.service.kylo_mysql.database_user=root
nifi.service.kylo_mysql.password=hadoop
nifi.service.hive_thrift_service.database_connection_url=jdbc:hive2://localhost:10000/default
#nifi.service.hive_thrift_service.kerberos_principal=nifi
#nifi.service.hive_thrift_service.kerberos_keytab=/etc/security/keytabs/nifi.headless.keytab
#nifi.service.hive_thrift_service.hadoop_configuration_resources=/etc/hadoop/conf/core-site.xml,/etc/hadoop/conf/hdfs-site.xml
nifi.service.kylo_metadata_service.rest_client_url=http://localhost:8400/proxy/v1/metadata
nifi.service.kylo_metadata_service.rest_client_password=thinkbig
jms.client.id=thinkbig.feedmgr
## nifi Property override with static defaults
##Static property override supports 3 usecases
# 1) store properties in the file starting with the prefix defined in the "PropertyExpressionResolver class" default = config.
# 2) store properties in the file starting with "nifi.<PROCESSORTYPE>.<PROPERTY_KEY> where PROCESSORTYPE and PROPERTY_KEY are all lowercase and the spaces are substituted with underscore
# 3) Global property replacement. properties starting with "nifi.all_processors.<PROPERTY_KEY> will globally replace the value when the template is instantiated
##Below are Ambari configuration options for Hive Metastore and Spark location
config.hive.schema=hive
config.metadata.url=http://localhost:8400/proxy/v1/metadata
## Spark configuration
nifi.executesparkjob.sparkhome=/usr/hdp/current/spark-client
nifi.executesparkjob.sparkmaster=local
nifi.executesparkjob.driver_memory=1024m
nifi.executesparkjob.number_of_executors=1
nifi.executesparkjob.executor_cores=1
##comment the line below on cloudera
config.spark.validateAndSplitRecords.extraJars=/usr/hdp/current/hive-webhcat/share/hcatalog/hive-hcatalog-core.jar
#config.spark.version=2
## Specify to override the default HDFS locations for feed tables and multi-tenancy.
# Root HDFS locations for new raw files
config.hdfs.ingest.root=/etl
# Root Archive location for new raw files
config.hdfs.archive.root=/archive
# Root HDFS location for Hive ingest processing tables (raw,valid,invalid)
config.hive.ingest.root=/model.db
# Root HDFS location for Hive profile table
config.hive.profile.root=/model.db
# Root HDFS location for Hive master table
config.hive.master.root=/app/warehouse
# Prefix to prepend to category system name for this environment (blank if none). Use for multi-tenancy
config.category.system.prefix=
# Set the JMS server hostname for the Kylo hosted JMS server
config.elasticsearch.jms.url=tcp://localhost:61616
# Set the nifi home folder
config.nifi.home=/opt/nifi
#example of replacing global properties
#nifi.all_processors.kerberos_principal=nifi
#nifi.all_processors.kerberos_keytab=/etc/security/keytabs/nifi.headless.keytab
#nifi.all_processors.hadoop_configuration_resources=/etc/hadoop/conf/core-site.xml,/etc/hadoop/conf/hdfs-site.xml
##cloudera config
#config.hive.schema=metastore
#nifi.executesparkjob.sparkhome=/usr/lib/spark
## how often should SLAs be checked
sla.cron.default=0 0/5 * 1/1 * ? *
# Additional Hive UDFs for partition functions. Separate multiple functions with commas.
#kylo.metadata.udfs=
### Sqoop import configuration
# DB Connection password and driver (format: nifi.service.<sqoop controller service name in NiFi>.<key>=<value>
# Note: Ensure that the driver jar is available in below two locations:
# (a) sqoop's lib directory (e.g. /usr/hdp/current/sqoop-client/lib/)
# (b) kylo's lib directory, and owned by 'kylo' user (/opt/kylo/kylo-services/lib)
#nifi.service.sqoop-mysql-connection.password=
#nifi.service.sqoop-mysql-connection.database_driver_class_name=org.mariadb.jdbc.Driver
# Base HDFS landing directory
#config.sqoop.hdfs.ingest.root=/sqoopimport
##uncommenet the settings below for Gmail to work
#sla.mail.protocol=smtp
#sla.mail.host=smtp.google.com
#sla.mail.port=587
#sla.mail.smtpAuth=true
#sla.mail.starttls=true
# Login form authentication
#security.jwt.algorithm=HS256
security.jwt.key=1991a52b0a49e8e276d67ecc12280433
#security.rememberme.alwaysRemember=false
#security.rememberme.cookieDomain=localhost
#security.rememberme.cookieName=remember-me
#security.rememberme.parameter=remember-me
#security.rememberme.tokenValiditySeconds=1209600
#security.rememberme.useSecureCookie=
## if a job fails tell operations manager to query nifi for bulletin information in an attempt to capture more logs about the failure
kylo.ops.mgr.query.nifi.bulletins=true
##when getting aggregate stats back for flows if errors are detected kylo will query NiFi in attempt to capture matching bulletins.
## by default this data is stored in memory. Setting this to true will store the data in the MySQL table
kylo.ops.mgr.stats.nifi.bulletins.persist=false
## if not perisiting (above flag is false) this is the limit to the number of error bulletins per feed.
## this is a rolling queue that will keep the last # of errors per feed
kylo.ops.mgr.stats.nifi.bulletins.mem.size=30
kylo.feed.mgr.cleanup.timeout=60000
## Align the process groups when saving you feed in NiFi. By default it is set to true.
#nifi.auto.align=false
# update database on kylo-services start
liquibase.enabled=true
liquibase.change-log=classpath:com/thinkbiganalytics/db/master.xml
jms.activemq.broker.url=tcp://localhost:61616
## *** Allow reindexing of feed history data. To enable this functionality, perform the below steps: ***
# 1. Set value of below property to true.
# 2. Import these Kylo feeds:
# (A) samples/feeds/nifi-1.3/history-reindexing/history_reindex_text_service_hs_v<version_number>.feed.zip
# (B) samples/feeds/nifi-1.3/history-reindexing/index_text_service_hs_v<version_number>.feed.zip
# Note: When importing the feeds via Kylo UI, tick mark yes for these 3 options:
# (a) Overwrite Feed: YES
# (b) Replace Feed Template: YES
# (c) Replace Reusable Template: YES
# 3. If using Solr for search, and upgrading Kylo from version 0.8.4, update the Solr plugin properties file as below:
# $ cp /opt/kylo/setup/plugins/search-solr/solrsearch.properties /opt/kylo/kylo-services/conf
# If your earlier solrsearch.properties had custom values, update the copied solrsearch.properties file with those.
search.history.data.reindexing.enabled=true
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment