Skip to content

Instantly share code, notes, and snippets.

View bharatc9530's full-sized avatar
:octocat:
python is ❤️

bharat choudhary bharatc9530

:octocat:
python is ❤️
View GitHub Profile
@bharatc9530
bharatc9530 / AboutMe.gif
Last active August 21, 2021 18:28
About Me 😎
AboutMe.gif
@bharatc9530
bharatc9530 / iris.py
Last active June 29, 2020 11:13
model
import pandas as pd
import numpy as np
import pickle
df = pd.read_csv('iris.data')
X = np.array(df.iloc[:, 0:4])
y = np.array(df.iloc[:, 4:])
from sklearn.preprocessing import LabelEncoder
from flask import Flask, render_template, request
import pickle
import numpy as np
app = Flask(__name__)
model = pickle.load(open('iri.pkl', 'rb'))
@app.route('/')
@bharatc9530
bharatc9530 / home.html
Created June 29, 2020 12:10
templates
<html>
<body bgcolor=#d4a3ae>
<center>
<h1> IRIS FLOWER DETECTION </h1><br>
<form method="POST", action="{{url_for('home')}}">
<b> First value : <input type="text", name='a', placeholder="enter 1"> <br><br>
Second value : <input type="text", name='b', placeholder="enter 2"> <br><br>
<html>
<body bgcolor=#9d3bc4>
<center>
<h1> PREDICTION : </h1>
{%if data == 0%}
<h1>Iris-setosa</h1>
@bharatc9530
bharatc9530 / hemorrhage.png
Last active August 3, 2020 08:37
Intracranial hemorrhage (ICH)
hemorrhage.png
import pandas as pd
import numpy as np
from numpy import array
from keras.preprocessing.text import one_hot, Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
df = pd.read_csv('Review.csv').sample(frac=1).reset_index(drop=True)
df['sentiment'] = df['sentiment'].astype('category',inplace = True).cat.codes
df.head()
docs = df['review']
labels = array(df['sentiment'])
from sklearn.model_selection import train_test_split
X_train, X_test , y_train, y_test = train_test_split(docs, labels , test_size = 0.40)
t = Tokenizer()
t.fit_on_texts(docs)
vocab_size = len(t.word_index) + 1
# integer encode the documents
print(vocab_size)
X_train = [one_hot(d, vocab_size,filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~',lower=True, split=' ') for d in X_train]
X_test = [one_hot(d, vocab_size,filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~',lower=True, split=' ') for d in X_test]