Skip to content

Instantly share code, notes, and snippets.

View amn41's full-sized avatar

Alan Nichol amn41

View GitHub Profile
@amn41
amn41 / gpt.sh
Created March 2, 2023 08:40
chatGPT on the command line - add to your bashrc/zshrc
function gpt() {
local url="https://api.openai.com/v1/chat/completions"
local model="gpt-3.5-turbo"
local body="{\"model\":\"$model\", \"messages\":[{\"role\": \"user\", \"content\": \"$1\"}]}"
local headers="Content-Type: application/json"
local auth="Authorization: Bearer ${OPENAI_API_KEY}"
curl -s -H "$headers" -H "$auth" -d "$body" "$url" \
| jq -r '.choices[0].message.content'
}
@amn41
amn41 / README.md
Last active May 28, 2020 13:33
Github Actions Workflow to comment on a Rasa repo PR with cross-validation results

Comment on a GitHub PR with Rasa NLU cross-validation results

image

Instructions

Save the yaml file at .github/workflows/comment_crossval_results.yml

Add the format_results.py script at the root of your repo.

import streamlit as st
import numpy as np
import pandas as pd
import json
import requests
@st.cache
def get_auth_token(host, user, pw):
st.write("cache miss token!")
url = f"{host}/api/auth"
import json
import requests
def get_auth_token(host, user, pw):
url = f"{host}/api/auth"
payload = {"username": user, "password": pw}
response = requests.post(url, json=payload)
try:
token = response.json()["access_token"]
return token
import numpy as np
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import pickle
"""Implements the long-short term memory character model.
This version vectorizes over multiple examples, but each string
has a fixed length."""
from __future__ import absolute_import
from __future__ import print_function
from builtins import range
from os.path import dirname, join
import numpy as np
import numpy.random as npr
vocab_file ="/path/to/vocab_file"
vectors_file ="/path/to/vectors_file"
embed = Embedding(vocab_file,vectors_file)
cuisine_refs = ["mexican","chinese","french","british","american"]
threshold = 0.2
text = "I want to find an indian restaurant"
import numpy as np
def sum_vecs(embed,text):
tokens = text.split(' ')
vec = np.zeros(embed.W.shape[1])
for idx, term in enumerate(tokens):
if term in embed.vocab:
vec = vec + embed.W[embed.vocab[term], :]
@amn41
amn41 / embedding.py
Last active October 17, 2017 01:19
class Embedding(object):
def __init__(self,vocab_file,vectors_file):
with open(vocab_file, 'r') as f:
words = [x.rstrip().split(' ')[0] for x in f.readlines()]
with open(vectors_file, 'r') as f:
vectors = {}
for line in f:
vals = line.rstrip().split(' ')
vectors[vals[0]] = [float(x) for x in vals[1:]]
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from flask import Blueprint, request, jsonify
import requests
from rasa_dm.channels.channel import UserMessage, OutputChannel