Skip to content

Instantly share code, notes, and snippets.

View sunilkumardash9's full-sized avatar
⚔️

Sunil Kumar Dash sunilkumardash9

⚔️
View GitHub Profile
def add_text(history, text):
global messages #message[list] is defined globally
history = history + [(text,'')]
messages = messages + [{"role":'user', 'content': text}]
return history, ""
def generate_response(history, model ):
global messages, cost
response = openai.ChatCompletion.create(
model = model,
messages=messages,
temperature=0.2,
)
response_msg = response.choices[0].message.content
with gr.Blocks() as demo:
radio = gr.Radio(value='gpt-3.5-turbo', choices=['gpt-3.5-turbo','gpt-4'], label='models')
chatbot = gr.Chatbot(value=[], elem_id="chatbot").style(height=650)
with gr.Row():
with gr.Column(scale=0.90):
txt = gr.Textbox(
show_label=False,
placeholder="Enter text and press enter",
).style(container=False)
from fastapi import FastAPI
from pydantic import BaseModel
from joblib import load
import pandas as pd
import json
import uvicorn
app = FastAPI()
model = load('my-model2')
import streamlit as st
import requests
import json
from requests import ConnectionError
st.title('HR-analytics App') #title to be shown
st.image('office.jpg') #add an image
st.header('Enter the employee data:') #header to be shown in app
satisfaction_level = st.number_input('satisfaction level',min_value=0.00, max_value=1.00)
from fastapi import FastAPI
from joblib import load
import regex as re
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from spamClassify import my_classifier
#from sklearn.feature_extraction.text import TfidfVectorizer
#from xgboost import XGBRFClassifier
app = FastAPI()
import numpy as np
from numpy import log,dot,e,shape
import matplotlib.pyplot as plt
def standardize(X_tr):
for i in range(shape(X_tr)[1]):
X_tr[:,i] = (X_tr[:,i] - np.mean(X_tr[:,i]))/np.std(X_tr[:,i])
def initialize(self,X):
weights = np.zeros((shape(X)[1]+1,1))
X = np.c_[np.ones((shape(X)[0],1)),X]
return weights,X
from sklearn.datasets import make_classification
X,y = make_classification(n_features=4)
#spliting train,test data
from sklearn.model_selection import train_test_split
X_tr,X_te,y_tr,y_te = train_test_split(X,y,test_size=0.15)