Skip to content

Instantly share code, notes, and snippets.

test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test accuracy:', test_acc)
print('Test loss:', test_loss)
history = model.fit(x_train, y_train, validation_data = ([x_val, y_val]), batch_size = 100, epochs = 10, verbose = 1)
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(filters = 16, kernel_size = (3,3), strides = (1,1), padding='same', activation='relu', input_shape=(80, 80 ,3)),
tf.keras.layers.MaxPooling2D((2,2)),
tf.keras.layers.Conv2D(filters = 32, kernel_size = (3,3), strides = (1,1), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D((2,2)),
tf.keras.layers.Conv2D(filters = 64, kernel_size = (3,3), strides = (1,1), padding='same', activation='relu'),
tf.keras.layers.MaxPooling2D((2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(45, activation='relu'),
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.20)
# Scaling and ranging the pixel between 0-255:
images = np.array(data['data']).astype('uint8')
x = images/255
# Categorizing the label or target feature:
import keras
y = keras.utils.np_utils.to_categorical(data['labels'], num_classes = 2)
# Reshaping the input shape:
channels = 3
import pandas as pd
from pandas import DataFrame, Series
import numpy as np
import json
f = open('./shipsnet.json')
data = json.load(f)
from sklearn.metrics import accuracy_score
y_pred = model.predict(x_test)
print (accuracy_score(y_test, y_pred))
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
y=le.fit_transform(data['v1'])
# Data split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 1)
#Building Navies Model:
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB().fit(x_train, y_train)
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer()
x = tfidf_vectorizer.fit_transform(data['text'])
# Show the Model as a pandas DataFrame
feature_names = tfidf_vectorizer.get_feature_names()
x = pd.DataFrame(x.toarray(), columns = feature_names)
import nltk
from nltk.corpus import wordnet
def preprocess_text(msg):
# converting messages to lowercase
msg = msg.lower()
# removing stopwords
msg = [word for word in msg.split() if word not in set(nltk.corpus.stopwords.words('english'))]
# using a stemmer
msg = " ".join([nltk.stem.PorterStemmer().stem(word) for word in msg])
return msg