Skip to content

Instantly share code, notes, and snippets.

#! /usr/bin/env ruby
require 'nokogiri'
require 'open-uri'
require 'byebug'
class Food
def initialize(d, d1, d2, s)
@date = d
@dish1 = d1
@dish2= d2
@tesths
tesths / kr.sh
Last active December 6, 2017 04:46
korean vps script
#sudo sed -i 's/kr.archive.ubuntu.com/archive.ubuntu.com/g' /etc/apt/sources.list
sudo apt-get -y update
sudo apt-get install -y software-properties-common
sudo apt-add-repository -y ppa:rael-gc/rvm
sudo apt-get -y update
sudo apt-get -y install rvm nodejs
command source /etc/profile.d/rvm.sh
sudo gpasswd rvm -a test
require 'mina/rails'
require 'mina/git'
# require 'mina/rbenv' # for rbenv support. (https://rbenv.org)
# require 'mina/rvm' # for rvm support. (https://rvm.io)
# Basic settings:
# domain - The hostname to SSH to.
# deploy_to - Path to deploy into.
# repository - Git repo to clone from. (needed by mina/git)
# branch - Branch name to deploy. (needed by mina/git)
@tesths
tesths / ins.rb
Last active April 3, 2018 04:06
A spide of instagram
require 'json'
require 'httparty'
require 'telegram/bot'
require 'byebug'
token = ''
bot = Telegram::Bot::Client.new(token)
def requestIns
from PIL import Image
import os, sys
dir_path = os.path.dirname(os.path.realpath(__file__))
dirs = os.listdir( dir_path )
print(os.getcwd())
print(dirs)
def resize():
require 'json'
require 'httparty'
require 'telegram/bot'
require 'byebug'
def request_ins
begin
puts "request start at " + Time.now.to_s
response = HTTParty.get('https://www.instagram.com/twicetagram/', timeout: 60)
@tesths
tesths / story.rb
Created August 6, 2018 08:26
instagram story download
require 'httparty'
response = HTTParty.get('https://www.instagram.com/xx/', timeout: 60)
response = response.scan(/window._sharedData = (.*?);/)[0][0]
json = JSON.parse(response)
user_id = json['entry_data']['ProfilePage'][0]['graphql']['user']['id']
puts user_id
url = 'https://i.instagram.com/api/v1/feed/user/' + user_id + '/reel_media/'
@tesths
tesths / gongzhonghao.json
Last active November 14, 2018 02:47
web scraper 公众号爬虫
{
"_id": "gongzhonghao",
"startUrl": [
"https:AAA"
],
"selectors": [
{
"id": "total",
"type": "SelectorElementScroll",
"parentSelectors": [
@tesths
tesths / weibo.json
Created November 14, 2018 02:47
web scraper 爬虫
{
"_id": "weibo",
"startUrl": [
"https://weibo.com/SanaChina961229?is_search=0&visible=0&is_all=1&is_tag=0&profile_ftype=1&page=[501-588]"
],
"selectors": [
{
"id": "real-content",
"type": "SelectorElementScroll",
"parentSelectors": [
@tesths
tesths / sana.json
Created November 16, 2018 12:29
SanaChina961229 Web Scraper
{
"_id": "weibo",
"startUrl": [
"https://weibo.com/SanaChina961229?is_search=0&visible=0&is_all=1&is_tag=0&profile_ftype=1&page=[1-2]"
],
"selectors": [
{
"id": "real-content",
"type": "SelectorElementScroll",
"parentSelectors": [