Skip to content

Instantly share code, notes, and snippets.

<!DOCTYPE html>
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>Result</title>
</head>
<body>
<h1>Prediction</h1>
{{ result }}
<!DOCTYPE html>
<html lang="en" dir="ltr">
<head>
<meta charset="utf-8">
<title>Home page</title>
</head>
<body>
<h1>Titanic survival prediction</h1>
<form action="{% urls 'result' %}">
{% csrf_token %}
from django.shortcuts import render
# our home page view
def home(request):
return render(request, 'index.html')
# custom method for generating predictions
def getPredictions(pclass, sex, age, sibsp, parch, fare, C, Q, S):
import pickle
# default present
from django.contrib import admin
from django.urls import path
# add this to import our views file
from Titanic_Survial_Prediction_Web import views
urlpatterns = [
path('admin/', admin.site.urls),
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
# load dataset
from spacy.matcher import PhraseMatcher
from scipy import spatial
# method for searching keyword from the text
def search_for_keyword(keyword, doc_obj, nlp):
phrase_matcher = PhraseMatcher(nlp.vocab)
phrase_list = [nlp(keyword)]
phrase_matcher.add("Text Extractor", None, *phrase_list)
matched_items = phrase_matcher(doc_obj)
# convert keywords to vector
def createKeywordsVectors(keyword, nlp):
doc = nlp(keyword) # convert to document object
return doc.vector
# method to find cosine similarity
def cosineSimilarity(vect1, vect2):
# return cosine distance
import spacy
import PyPDF2
# spacy english model (large)
nlp = spacy.load('en_core_web_lg')
# method for reading a pdf file
def readPdfFile(filename, folder_name):
# storing path of PDF-Documents folder
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
%matplotlib inline
# method for generating captions
def generate_captions(model, image, tokenizer.word_index, max_caption_length, tokenizer.index_word):
# input is <start>
input_text = '<start>'
# get training data
train_data = create_trianing_data(train_image_captions, train_image_features, tokenizer, max_caption_len, vocab_length, 32)
# initialize model
model = create_model(max_caption_len, vocab_len)
steps_per_epochs = len(train_image_captions)//32
# compile model
model.compile(optimizer='adam', loss='categorical_crossentropy')