I hereby claim:
- I am fffaraz on github.
- I am fffaraz (https://keybase.io/fffaraz) on keybase.
- I have a public key ASB--MPs8LCDbwPJOxCnVZ0kxuv-x93mHx4oo3bBLS9hIwo
To claim this, I am signing this object:
@echo off | |
for /d %%D in (*) do ( | |
cd %%D | |
echo %%D | |
git pull | |
cd.. | |
) |
#include <iostream> | |
#include <random> | |
#include <bits/stdc++.h> | |
using namespace std; | |
int main() | |
{ | |
std::random_device m_device{}; | |
std::mt19937 m_generator{m_device()}; |
#include <QCoreApplication> | |
#include <QFile> | |
#include <QDataStream> | |
#include <QDebug> | |
#include <iostream> | |
namespace Ui { | |
namespace Emoji { | |
constexpr auto kPostfix = 0xFE0FU; | |
class One {}; |
#!/bin/bash | |
while true; do git pull; sleep 30; done |
I hereby claim:
To claim this, I am signing this object:
<?php | |
class FileAlterationMonitor | |
{ | |
private $scanFolder, $initialFoundFiles; | |
public function __construct($scanFolder) | |
{ | |
$this->scanFolder = $scanFolder; | |
$this->updateMonitor(); |
<?php | |
// https://blog.haschek.at/2017/how-to-defend-your-website-with-zip-bombs.html | |
// dd if=/dev/zero bs=1M count=10240 | gzip > 10G.gzip | |
$agent = lower($_SERVER['HTTP_USER_AGENT']); | |
//check for nikto, sql map or "bad" subfolders which only exist on wordpress | |
if (strpos($agent, 'nikto') !== false || strpos($agent, 'sqlmap') !== false || startswith($url,'wp-') || startswith($url,'wordpress') || startswith($url,'wp/')) | |
{ | |
sendBomb(); |
<!-- saved from http://www.cc.gatech.edu/grads/b/bhroleno/ --> | |
<html> | |
<title>Conway's Game of Life</title> | |
<head> | |
<style type="text/css"> | |
body{ font-family: serif; font-size:1.00em; text-align: center; margin: 10px 0 30px 0; virtical-align: middle } | |
</style> | |
<script type="text/javascript"> | |
var cells = new Array(40); | |
var tmpCells = new Array(40); |
from bs4 import BeautifulSoup | |
import requests | |
def getPlaylistLinks(url): | |
sourceCode = requests.get(url).text | |
soup = BeautifulSoup(sourceCode, 'html.parser') | |
domain = 'https://www.youtube.com' | |
for link in soup.find_all("a", {"dir": "ltr"}): | |
href = link.get('href') | |
if href.startswith('/watch?'): |