-
Pycapa is running on
y138
, while the KDC is running ony113
. The following commands should be run on the KDC host (y113
).kadmin.local -q "addprinc -randkey fastcapa/y138.l42scl.hortonworks.com@EXAMPLE.COM" kadmin.local -q "ktadd -k fastcapa.service.keytab fastcapa/y138.l42scl.hortonworks.com@EXAMPLE.COM" scp fastcapa.service.keytab root@y138:/etc/security/keytabs/
-
Run these commands on a host where Kafka is installed. In this case
y136
.kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer \ -authorizer-properties zookeeper.connect=y113:2181 \ --add \ --allow-principal User:fastcapa \ --topic pcap8 --group fastcapa
-
Follow the installation instructions.
-
Add the following properties to your configuration file under the
[kafka-global]
header.security.protocol = SASL_PLAINTEXT sasl.kerberos.keytab=/etc/security/keytabs/fastcapa.service.keytab sasl.kerberos.principal=fastcapa/y138.l42scl.hortonworks.com@EXAMPLE.COM
-
Run Fastcapa.
fastcapa -l 0,1,2,3,4,5,6 --huge-dir /mnt/huge_1GB -- -t pcap8 -c /etc/fastcapa.ycluster -r 1024 -q 3 -x 32768
-
Snort is running on
y137
, while the KDC is running ony113
. The following commands should be run on the KDC host (y113
).kadmin.local -q "addprinc -randkey snort/y137.l42scl.hortonworks.com@EXAMPLE.COM" kadmin.local -q "ktadd -k snort.service.keytab snort/y137.l42scl.hortonworks.com@EXAMPLE.COM" scp snort.service.keytab root@y137:/etc/security/keytabs/
Run these commands on a host where Kafka is installed. In this case y136
.
-
Grant access to the new principal for the
snort
topic.kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer \ -authorizer-properties zookeeper.connect=y113:2181 \ --add \ --allow-principal User:snort \ --topic snort \ --group snort
-
Install Kafka which is needed for the client utility
kafka-console-producer
.yum -y install kafka
-
Setup JAAS.
[root@y137 ~]# cat ~/.java.login.config KafkaClient { com.sun.security.auth.module.Krb5LoginModule required useTicketCache=false useKeyTab=true principal="snort/y137.l42scl.hortonworks.com@EXAMPLE.COM" keyTab="/etc/security/keytabs/snort.service.keytab" renewTicket=true debug=true serviceName="kafka" storeKey=true; };
-
Install DAQ.
wget https://snort.org/downloads/snort/daq-2.0.6-1.src.rpm rpmbuild --rebuild daq-2.0.6-1.src.rpm yum -y install /root/rpmbuild/RPMS/x86_64/daq-2.0.6-1.x86_64.rpm
-
Install Snort.
wget https://snort.org/downloads/archive/snort/snort-2.9.8.0-1.src.rpm rpmbuild --rebuild snort-2.9.8.0-1.src.rpm yum -y install /root/rpmbuild/RPMS/x86_64/snort-2.9.8.0-1.x86_64.rpm
-
Install community rules and configure. Using interface
enp129s0f0
wget https://www.snort.org/downloads/community/community-rules.tar.gz tar xvf community-rules.tar.gz cp -r community-rules/community.rules /etc/snort/rules touch /etc/snort/rules/white_list.rules touch /etc/snort/rules/black_list.rules touch /var/log/snort/alerts chown -R snort:snort /etc/snort sed -i 's/^# alert/alert/' /etc/snort/rules/community.rules cp community-rules/snort.conf /etc/snort/snort.conf echo "output alert_csv: /var/log/snort/alert.csv default" >> /etc/snort/snort.conf sed -i "s/^ALERTMODE=.*$/ALERTMODE=/g" /etc/sysconfig/snort sed -i "s/^NO_PACKET_LOG=.*$/NO_PACKET_LOG=1/" /etc/sysconfig/snort sed -i "s/^INTERFACE=.*$/INTERFACE=enp129s0f0/" /etc/sysconfig/snort
-
Start Snort
service snortd start
-
Create Snort Producer script at
/usr/bin/start-snort-producer
.#!/usr/bin/env bash SNORT_ALERT_CSV_PATH=/var/log/snort/alert.csv KAFKA_PROD=/usr/hdp/current/kafka-broker/bin/kafka-console-producer.sh KAFKA_BROKER_LIST=y135.l42scl.hortonworks.com:6667,y136.l42scl.hortonworks.com:6667 SNORT_TOPIC=snort tail -F $SNORT_ALERT_CSV_PATH | $KAFKA_PROD --broker-list $KAFKA_BROKER_LIST --topic $SNORT_TOPIC --security-protocol SASL_PLAINTEXT
-
Create service script at
/etc/init.d/snort-producer
.#!/usr/bin/env bash # # Snort Kafka producer daemon # chkconfig: 345 20 80 # description: Runs Snort Kafka producer # processname: snort-producer # NAME=snort-producer DESC="Executes Snort Kafka producer" PIDFILE=/var/run/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME DAEMON_PATH="/tmp" DAEMON="/usr/bin/start-snort-producer" DAEMONOPTS="${@:2}" LOGDIR=/var/log/snort-producer LOGFILE=$LOGDIR/snort-producer.out case "$1" in start) printf "%-50s" "Starting $NAME..." mkdir -p $LOGDIR rm -f $LOGFILE # kick-off the daemon cd $DAEMON_PATH PID=`$DAEMON $DAEMONOPTS > $LOGFILE 2>&1 & echo $!` if [ -z $PID ]; then printf "%s\n" "Fail" else echo $PID > $PIDFILE printf "%s\n" "Ok" fi ;; status) printf "%-50s" "Checking $NAME..." if [ -f $PIDFILE ]; then PID=`cat $PIDFILE` if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then printf "%s\n" "Process dead but pidfile exists" else echo "Running" fi else printf "%s\n" "Service not running" fi ;; stop) printf "%-50s" "Stopping $NAME" PID=`cat $PIDFILE` PGID=`ps -o pgid= $PID | xargs` cd $DAEMON_PATH if [ -f $PIDFILE ]; then kill -- -$PGID printf "%s\n" "Ok" rm -f $PIDFILE else printf "%s\n" "pidfile not found" fi ;; restart) $0 stop $0 start ;; *) echo "Usage: $0 {status|start|stop|restart}" exit 1 esac
-
Start Snort producer.
service snort-producer start
-
Pycapa is running on
y137
, while the KDC is running ony113
. The following commands should be run on the KDC host (y113
).kadmin.local -q "addprinc -randkey pycapa/y137.l42scl.hortonworks.com@EXAMPLE.COM" kadmin.local -q "ktadd -k pycapa.service.keytab pycapa/y137.l42scl.hortonworks.com@EXAMPLE.COM" scp pycapa.service.keytab root@y137:/etc/security/keytabs/
-
Run these commands on a host where Kafka is installed. In this case
y136
.kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer \ -authorizer-properties zookeeper.connect=y113:2181 \ --add \ --allow-principal User:pycapa \ --topic pcap8 --group pycapa
-
Install system dependencies including the core development tools, Python libraries and header files, and Libpcap libraries and header files. On CentOS, you can install these requirements with the following command.
yum -y install "@Development tools" python-devel libpcap-devel
-
Install Librdkafka at your chosen $PREFIX.
wget https://github.com/edenhill/librdkafka/archive/v0.9.4.tar.gz -O - | tar -xz cd librdkafka-0.9.4/ ./configure --prefix=$PREFIX make make install
-
Install Pycapa
pip install -r requirements.txt python setup.py install
-
Run Pycapa.
pycapa --producer --interface enp129s0f1 \ --kafka-broker y135.l42scl.hortonworks.com:6667 \ --kafka-topic pcap8 \ --max-packets 10 \ --log-level debug \ -X security.protocol=SASL_PLAINTEXT \ -X sasl.kerberos.keytab=/etc/security/keytabs/pycapa.service.keytab \ -X sasl.kerberos.principal=pycapa/y137.l42scl.hortonworks.com@EXAMPLE.COM
pycapa --consumer \ --kafka-broker y135.l42scl.hortonworks.com:6667 \ --kafka-topic pcap8 \ -X security.protocol=SASL_PLAINTEXT \ -X sasl.kerberos.keytab=/etc/security/keytabs/pycapa.service.keytab \ -X sasl.kerberos.principal=pycapa/y137.l42scl.hortonworks.com@EXAMPLE.COM \ -X group.id=pycapa \ | tshark -i -
-
Bro is running on
y137
, while the KDC is running ony113
. The following commands should be run on the KDC host (y113
).kadmin.local -q "addprinc -randkey bro/y137.l42scl.hortonworks.com@EXAMPLE.COM" kadmin.local -q "ktadd -k bro.service.keytab bro/y137.l42scl.hortonworks.com@EXAMPLE.COM" scp bro.service.keytab root@y137:/etc/security/keytabs/
-
Run these commands on a host where Kafka is installed. In this case
y136
.kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer \ -authorizer-properties zookeeper.connect=y113:2181 \ --add \ --allow-principal User:bro \ --topic bro
-
Install
librdkafka
on the host where Bro will run (y137
).wget https://github.com/edenhill/librdkafka/archive/v0.9.4.tar.gz -O - | tar -xz cd librdkafka-0.9.4/ ./configure --prefix=/usr --enable-sasl make make install
-
Install Bro on the host where it will run (
y137
).wget https://www.bro.org/downloads/release/bro-2.4.1.tar.gz -O - | tar -xz cd bro-2.4.1 ./configure --prefix=/usr make make install
-
Configure Bro to listen on
enp129s0f0
.sed -i 's/eth0/enp129s0f0/g' /usr/etc/node.cfg
-
Install config changes.
broctl install
-
Configure logs.
# Rotation interval in seconds for log files on manager (or standalone) node.
# A value of 0 disables log rotation.
LogRotationInterval = 3600
# Expiration interval for archived log files in LogDir. Files older than this
# will be deleted by "broctl cron". The interval is an integer followed by
# one of these time units: day, hr, min. A value of 0 means that logs
# never expire.
LogExpireInterval = 7 day
# Location of the log directory where log files will be archived each rotation
# interval.
#LogDir = /usr/bro/logs
LogDir = /metron1/bro/logs
# Location of the spool directory where files and data that are currently being
# written are stored.
#SpoolDir = /usr/bro/spool
SpoolDir = /metron1/bro/spool
-
Install the Bro Plugin on the host where it will run (
y137
).cd incubator-metron/metron-sensors/bro-plugin-kafka ./configure --bro-dist=/root/bro-2.4.1 --install-root=/usr/lib/bro/plugins/ --with-librdkafka=/usr make make install
-
Add the following to
/usr/share/bro/site/local.bro
@load Bro/Kafka/logs-to-kafka.bro redef Kafka::logs_to_send = set(HTTP::LOG, DNS::LOG); redef Kafka::topic_name = "bro"; redef Kafka::tag_json = T; redef Kafka::kafka_conf = table( ["metadata.broker.list"] = "y134.l42scl.hortonworks.com:6667,y135.l42scl.hortonworks.com:6667,y136.l42scl.hortonworks.com:6667" , ["security.protocol"] = "SASL_PLAINTEXT" , ["sasl.kerberos.keytab"] = "/etc/security/keytabs/bro.service.keytab" , ["sasl.kerberos.principal"] = "bro/y137.l42scl.hortonworks.com@EXAMPLE.COM" , ["debug"] = "metadata" );
-
Make sure the changes are installed.
broctl install
-
Replay packets from
y138
->y137
usingenp129s0f0
. Run the following command ony138
.tcpreplay -i enp129s0f0 --loop=0 --stats=5 --preload-pcap --mbps 100 example.pcap
-
See if data is hitting the
bro
topic in Kafka.kafka-simple-consumer-shell.sh --broker-list y136:6667 --topic bro --security-protocol SASL_PLAINTEXT --partition 0 --offset -1
-
YAF is running on
y137
, while the KDC is running ony113
. The following commands should be run on the KDC host (y113
).kadmin.local -q "addprinc -randkey yaf/y137.l42scl.hortonworks.com@EXAMPLE.COM" kadmin.local -q "ktadd -k yaf.service.keytab yaf/y137.l42scl.hortonworks.com@EXAMPLE.COM" scp yaf.service.keytab root@y137:/etc/security/keytabs/
-
Run these commands on a host where Kafka is installed. In this case
y136
.kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer \ -authorizer-properties zookeeper.connect=y113:2181 \ --add \ --allow-principal User:yaf \ --topic yaf
-
Install libfixbuf.
wget http://tools.netsa.cert.org/releases/libfixbuf-1.7.1.tar.gz -O - | tar -xz cd libfixbuf-1.7.1/ ./configure make make install
-
Install yaf
wget http://tools.netsa.cert.org/releases/yaf-2.8.0.tar.gz -O - | tar -xz cd yaf-2.8.0/ ./configure --enable-applabel --enable-plugins make make install
-
Create yaf start script.
[root@y137 yaf-2.8.0]# cat /usr/bin/start-yaf #!/usr/bin/env bash YAF_TOPIC=yaf SNIFF_IFACE=enp129s0f0 KAFKA_BROKER=y134.l42scl.hortonworks.com:6667,y135.l42scl.hortonworks.com:6667,y136.l42scl.hortonworks.com:6667 YAF_BIN=/usr/local/bin/yaf YAFSCII_BIN=/usr/local/bin/yafscii KAFKA_PRODUCER=/usr/hdp/current/kafka-broker/bin/kafka-console-producer.sh kinit -kt /etc/security/keytabs/yaf.service.keytab yaf/`hostname`@EXAMPLE.COM if [ $? -eq 0 ] then $YAF_BIN --in $SNIFF_IFACE --live pcap "${@:1}" | $YAFSCII_BIN --tabular | $KAFKA_PRODUCER --broker-list $KAFKA_BROKER --topic $YAF_TOPIC --security-protocol SASL_PLAINTEXT exit 0 else echo "Could not kinit" >&2 exit 1 fi
-
Create yaf service script.
#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # yaf daemon # chkconfig: 345 20 80 # description: Runs yaf - yet another flowmeter # processname: yaf # NAME=yaf DESC="Executes yaf - yet another flowmeter" PIDFILE=/var/run/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME DAEMON_PATH=/tmp DAEMON=/usr/bin/start-yaf DAEMONOPTS="${@:2}" case "$1" in start) printf "%-50s" "Starting $NAME..." # kick-off the daemon cd $DAEMON_PATH PID=`$DAEMON $DAEMONOPTS > /dev/null 2>&1 & echo $!` if [ -z $PID ]; then printf "%s\n" "Fail" else echo $PID > $PIDFILE printf "%s\n" "Ok" fi ;; status) printf "%-50s" "Checking $NAME..." if [ -f $PIDFILE ]; then PID=`cat $PIDFILE` if [ -z "`ps axf | grep ${PID} | grep -v grep`" ]; then printf "%s\n" "Process dead but pidfile exists" else echo "Running" fi else printf "%s\n" "Service not running" fi ;; stop) printf "%-50s" "Stopping $NAME" PID=`cat $PIDFILE` cd $DAEMON_PATH if [ -f $PIDFILE ]; then kill -HUP $PID killall $NAME printf "%s\n" "Ok" rm -f $PIDFILE else printf "%s\n" "pidfile not found" fi ;; restart) $0 stop $0 start ;; *) echo "Usage: $0 {status|start|stop|restart}" exit 1 esac
-
Define JAAS config.
[root@y137 ~]# cat ~/.java.login.config KafkaClient { com.sun.security.auth.module.Krb5LoginModule required useTicketCache=false useKeyTab=true principal="yaf/y137.l42scl.hortonworks.com@EXAMPLE.COM" keyTab="/etc/security/keytabs/yaf.service.keytab" renewTicket=true debug=true serviceName="kafka" storeKey=true; };
-
Start the service.
chmod 755 /etc/init.d/yaf service yaf start
wget https://github.com/luigirizzo/netmap/archive/v11.3.tar.gz -O - | tar -xz
cd netmap-11.3/
./configure --no-drivers
make
make install
export DPDK_HOME=/usr/local/dpdk
export PATH=$PATH:$DPDK_HOME/bin/:$DPDK_HOME/sbin:$DPDK_HOME/share/dpdk/tools/
export RTE_SDK=/usr/local/dpdk/share/dpdk
export RTE_TARGET=x86_64-native-linuxapp-gcc
wget http://dpdk.org/browse/apps/pktgen-dpdk/snapshot/pktgen-3.2.8.tar.gz -O - | tar -xz
cd pktgen-3.2.8
make
ifdown enp129s0f1
insmod /usr/local/dpdk/lib/modules/`uname -r`/extra/dpdk/igb_uio.ko
dpdk-devbind --bind=igb_uio "81:00.1"
ifdown enp0s8
modprobe uio_pci_generic
dpdk-devbind --bind=uio_pci_generic "00:08.0"
grep -i huge /proc/meminfo
echo 16384 > /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages
echo 16384 > /sys/devices/system/node/node1/hugepages/hugepages-2048kB/nr_hugepages
mkdir /mnt/huge
mount -t hugetlbfs nodev /mnt/huge
cd pktgen-3.2.8
app/app/x86_64-native-linuxapp-gcc/pktgen \
-l 0-2 \
-n 761 \
--socket-mem=512,0 \
--proc-type auto \
-- \
-p 0x30 \
-m "1.0" \
-s 0:/root/example.pcap
Check for malformed packets
pycapa --consumer \
--kafka-broker y134.l42scl.hortonworks.com:6667 \
--kafka-topic pcap12 \
--kafka-offset begin \
-X security.protocol=SASL_PLAINTEXT \
-X sasl.kerberos.keytab=/etc/security/keytabs/metron.headless.keytab \
-X sasl.kerberos.principal=metron@EXAMPLE.COM \
-X group.id=metron \
| tshark -i - -Y malformed
Count number of packets across all partitions
kafka-run-class.sh \
kafka.tools.GetOffsetShell \
--broker-list y135:6667 \
--topic pcap12 \
--security-protocol PLAINTEXTSASL \
--time -1 | \
grep pcap12 | \
awk -F: '{p+=$3} END {print p}'