Skip to content

Instantly share code, notes, and snippets.

@jpcofr
Last active April 22, 2024 12:50
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jpcofr/27db7dc903a61eae4f63ff4fa9133dde to your computer and use it in GitHub Desktop.
Save jpcofr/27db7dc903a61eae4f63ff4fa9133dde to your computer and use it in GitHub Desktop.
temporal
kubectl get pods --field-selector=status.phase!=Running,status.phase!=Succeeded -o custom-columns=NAME:.metadata.name,STATUS:.status.phase
#!/bin/bash
for i in {1..2300}
do
touch "file_${i}.txt"
done
bash -c 'for dir in */; do ku bectl cp "${dir}" "default/my-pod:/data/${dir}"; done'
kubectl get pods
kubectl get pods --namespace <namespace>
foreach dir (*/); kubectl cp "${dir}" "default/my-pod:/data/${dir}"; end
for dir in */; do
kubectl cp "${dir}" "default/my-pod:/data/${dir}"
done
for dir in */; do kubectl cp "${dir}" "default/my-pod:/data/${dir}"; done
tar -ztf yourfile.tgz | while read line; do if [ -e "/path/to/directory/$line" ]; then echo "$line exists and might be overwritten"; fi; done
tar -xzf /path/to/yourfile.tgz --keep-newer-files
grep 'pattern' file | awk 'NR==1; END{if (NR>1) print}'
alias runapp './yourApplication \!:1 >& /dev/null &'
tshark -r [input_file].pcap -T fields -e ip.src -e ip.dst | tr '\t' '\n' | sort | uniq -c
tshark -r [input_file].pcap -T fields -e ip.src -e ip.dst | tr '\t' '\n' | sort | uniq
alias toclip 'readlink -f \!:1 | to_clipboard'
ps -u $USER --no-headers -o rss | awk '{sum+=$1} END {print sum / 1024 " MB"}'
find /path/to/files -type f -exec sh -c 'f="{}"; zip "output.zip" -j "$f" "$(dirname "$f" | sed "s#.*/##")_$(basename "$f")"' \;
find /path/to/files -type f -exec sh -c 'zip "output.zip" -j "{}" "$(basename $(dirname "{}"))_$(basename "{}")"' \;
find /path/to/files -type f -exec sh -c 'f="{}"; d=$(basename $(dirname "$f")); zip "${d}.zip" -j "$f" "${d}_$(basename "$f")"' \;
find /path/to/files -type f -exec sh -c 'zip -j "output.zip" "$(basename $(dirname "{}"))/$(basename "{}")"' \;
-------------------------
data:text/html,%20%3Cbody%20contenteditable%20style=%22font:%202rem/1.5%20Courier;background-color:black;color:green;max-width:60rem;margin:0%20auto;padding:4rem;%22%3E
grep -oE '20[0-9]{2}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{3}-[0-9]{2}:[0-9]{2}' *.txt
grep -oE '[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}\.[0-9]{3}-[0-9]{2}:[0-9]{2}' /path/to/files/* | sort
-------------------------
find /path/to/directory -type f -name "prefix*.txt" -exec awk 'FNR==1{print ""}1' {} + > /dev/null
import pyshark
import json
# Replace 'your_capture_file.pcap' with the path to your packet capture file
capture_file = 'your_capture_file.pcap'
# Create a PyShark capture object
capture = pyshark.FileCapture(capture_file)
# Iterate through the packets
json_objects = []
for packet in capture:
try:
# Access the packet's raw data
raw_data = packet.get_raw_packet()
# Convert raw data to a string
packet_data = str(raw_data, 'utf-8')
# Check if the packet data contains JSON
if 'application/json' in packet_data:
# Extract the JSON portion of the packet data
json_start = packet_data.find('{')
json_end = packet_data.rfind('}')
if json_start != -1 and json_end != -1:
json_str = packet_data[json_start:json_end + 1]
# Parse the JSON
json_obj = json.loads(json_str)
json_objects.append(json_obj)
except Exception as e:
# Handle any exceptions that may occur during parsing
pass
# Now, json_objects contains all the JSON objects found in the capture file
#####################
from scapy.all import *
# Open the pcap file
packets = rdpcap('your_pcap_file.pcap')
# Loop through each packet in the capture file
for packet in packets:
# Check if the packet is an HTTP packet
if packet.haslayer('HTTP'):
# Extract the JSON data from the HTTP packet
json_data = packet[HTTP].json
# Do something with the JSON data
print(json_data)
##############
import pyshark
import json
# Open the packet capture file
capture = pyshark.FileCapture('capture.pcapng')
# Loop over the packets in the capture file
for packet in capture:
# Check if the packet has an HTTP2 layer
if packet.haslayer('http2'):
# Get the JSON object from the layer
json_object = packet.http2.json
# Convert the JSON object to a Python dictionary
python_dictionary = json.loads(json_object)
# Do something with the Python dictionary
print(python_dictionary)
##############
import pyshark
import json
# Open the pcap file
cap = pyshark.FileCapture('your_pcap_file.pcap', display_filter='http2')
# Loop through each packet in the capture file
for packet in cap:
# Check if the packet is an HTTP2 data frame
if 'http2' in packet and packet.http2.type == 'DATA':
# Extract the payload from the data frame
payload = packet.http2.data.data
# Convert the payload from bytes to a string
payload_str = payload.decode('utf-8')
# Parse the payload as JSON
payload_json = json.loads(payload_str)
# Do something with the JSON payload
print(payload_json)
################################################
import pyshark
import json
# Open the pcap file
cap = pyshark.FileCapture('your_pcap_file.pcap', display_filter='http2.type == "DATA"')
# Loop through each packet in the capture file
for packet in cap:
# Extract the payload from the data frame
payload = packet.http2.data.data
# Convert the payload from bytes to a string
payload_str = payload.decode('utf-8')
# Parse the payload as JSON
payload_json = json.loads(payload_str)
# Do something with the JSON payload
print(payload_json)
#!/bin/bash
filter_json_lines() {
input_file="$1"
output_file="${input_file}_filtered.txt"
# Create or clear the output file
> "$output_file"
# Loop through each line in the input file
while IFS= read -r line; do
echo "$line" | jq . >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "$line" >> "$output_file"
fi
done < "$input_file"
}
# Call the function with the input file path
filter_json_lines "$1"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment