Skip to content

Instantly share code, notes, and snippets.

View rvvvt's full-sized avatar
💭
hail, traveler.

rvvvt rvvvt

💭
hail, traveler.
View GitHub Profile
@rvvvt
rvvvt / .htaccess
Created February 8, 2018 17:38 — forked from ScottPhillips/.htaccess
Common .htaccess Redirects
#301 Redirects for .htaccess
#Redirect a single page:
Redirect 301 /pagename.php http://www.domain.com/pagename.html
#Redirect an entire site:
Redirect 301 / http://www.domain.com/
#Redirect an entire site to a sub folder
Redirect 301 / http://www.domain.com/subfolder/
@rvvvt
rvvvt / linkedin_extract.py
Created June 10, 2018 06:44 — forked from lobstrio/linkedin_extract.py
Scraping Linkedin profiles information through Selenium Python
# python package
import csv
import time
import random
import sys
import os
# selenium package
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import requests
from bs4 import BeautifulSoup
import time
USER_AGENT = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
def fetch_results(search_term, number_results, language_code):
assert isinstance(search_term, str), 'Search term must be a string'
assert isinstance(number_results, int), 'Number of results must be an integer'
@rvvvt
rvvvt / linkedin.js
Created January 7, 2019 09:41 — forked from liveashish/linkedin.js
Send auto invite on Linkedin
$("button[class='mn-person-card__person-btn-ext button-secondary-medium']").each(function(index, value) {
setTimeout(function() {
jQuery(value).trigger('click');
}, index * 1000);
});
@rvvvt
rvvvt / logic.py
Created January 7, 2019 19:41
Python Flask Email Checker
import dns.resolver
import socket
import smtplib
import re
# Set of allowed file extensions
ALLOWED_EXTENSIONS = {'txt', 'csv'}
# Extract all email addresses from a string with a regular expression
@rvvvt
rvvvt / google_search_bs.py
Created January 18, 2019 15:54 — forked from yxlao/google_search_bs.py
Google search with BeautifulSoup
import requests
from bs4 import BeautifulSoup
search_url_prefix = "https://www.google.com/search?q="
def get_first_result(search_str):
search_url = search_url_prefix + search_str
r = requests.get(search_url)
soup = BeautifulSoup(r.text, "html.parser")
return soup.find('cite').text
@rvvvt
rvvvt / cdh-scrape.py
Created January 26, 2019 09:22 — forked from meg-codes/cdh-scrape.py
A basic web-scrape script designed to look for bad links on a particular site
#!/usr/bin/env python
# Script to scrape all links from a site, compile counts of each link, status
# codes of access and output the results as a CSV
#
# There's absolutely no reason this shouldn't be pulled into an OOP paradigm
# per se, but I left it functionalized because that can be easier for multitasking.
#
# Requirements:
# requests, bs4
@rvvvt
rvvvt / requests.py
Created January 26, 2019 22:39 — forked from Chairo/requests.py
requests mutil-threading
# -*- coding:utf-8 -*-
import requests
from time import sleep
from threading import Thread
UPDATE_INTERVAL = 0.01
class URLThread(Thread):
def __init__(self, url, timeout=10, allow_redirects=True):
super(URLThread, self).__init__()
@rvvvt
rvvvt / vk_ip_async.py
Created February 25, 2019 15:11 — forked from colyk/vk_ip_async.py
async
import requests
import asyncio
from bs4 import BeautifulSoup
proxy_list = []
def get_html(URL):
r = requests.get(URL)
# print(r.request.headers)
if(r.status_code == 200):
@rvvvt
rvvvt / spamhaha.py
Created March 19, 2019 03:17 — forked from liveashish/spamhaha.py
Sarahah spam bot: A life saver! 🔥 💪
import time
import requests
users_to_attack = ['USER_NAME', ] #list of users to be attacked
def sarahah_post(user, msg):
s = requests.Session()
homeurl = 'https://' + user + '.sarahah.com/'
home = s.get(homeurl)