Intento de descripción del funcionamiento de algunos Comandos en TidalCycles (y en otros lados también)
-
Algoritmo Euclideano
-
Mixer Ritchse/Lil' Data/ Kindhom / Otres?
-
La Doble Booleana ft Martín Karadagian
import numpy as np | |
import cv2 | |
ON = 255 | |
OFF = 0 | |
N = 200 | |
def rule(neighbours,rules): | |
if neighbours in rules: | |
return ON |
from psychopy import visual, core, event | |
import datetime | |
import pandas as pd | |
# Colours | |
gray = '#969696' | |
black = '#000000' | |
white = '#FFFFFF' | |
# Window parameters |
import requests | |
from bs4 import BeautifulSoup | |
import math | |
import sys | |
def number_of_results(text): | |
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} | |
r = requests.get("https://www.google.com/search?q="+text.replace(" ","+"),params={"gl":"us"},headers=headers) | |
soup = BeautifulSoup(r.text, "lxml") | |
res = soup.find("div", {"id": "resultStats"}) |
# Acceder a este archivo en: https://bit.ly/2OyyWr2 | |
# (Prestar atención a mayusculas y minusculas) | |
# Tempo y escala | |
print(help(Scale)) | |
Scale.default = Scale.minor | |
print(Clock.bpm) | |
# Que instrmentos hay |
from bs4 import BeautifulSoup | |
import requests | |
import os | |
letras_url = "https://www.letras.com" | |
def descargar_cancion(url_path, destination_path): | |
url = f"{letras_url}{url_path}" | |
page = requests.get(url) | |
soup = BeautifulSoup(page.content, 'html.parser') |
### Keybase proof | |
I hereby claim: | |
* I am mathigatti on github. | |
* I am mathigatti (https://keybase.io/mathigatti) on keybase. | |
* I have a public key ASC6UTYH0FvHsHHVGVrZYm9uCr8gWfWencCBnDnCZuok-Qo | |
To claim this, I am signing this object: |
import math | |
from es_lemmatizer import lemmatize | |
import spacy | |
from nltk.tokenize import word_tokenize | |
from nltk import ngrams, FreqDist | |
from tqdm import tqdm | |
from collections import defaultdict | |
import unidecode | |
import sys | |
import os |
from bs4 import BeautifulSoup | |
import requests | |
import os | |
def descargar_noticia(url): | |
page = requests.get(url) | |
soup = BeautifulSoup(page.content, 'html.parser') | |
noticia = "" |