Skip to content

Instantly share code, notes, and snippets.

View mdhasanai's full-sized avatar
:electron:
Focusing

Md Hasan mdhasanai

:electron:
Focusing
View GitHub Profile
######
Triton Inference Server provides a cloud and edge inferencing solution optimized for both CPUs and GPUs.
Triton supports an HTTP/REST and GRPC protocol that allows remote clients to request inferencing for any
model being managed by the server.
######
################### INSTALL DOCKER ########################
# SET UP THE REPOSITORY
#!/bin/bash
## This gist contains instructions about cuda v10.2 and cudnn 8.1.1 installation in Ubuntu 18.04
### steps ####
# verify the system has a cuda-capable gpu
# download and install the nvidia cuda toolkit and cudnn
# setup environmental variables
# verify the installation
###
# Requirements:
# OS: Ubuntu18.04 LTS
# Python >= 3.8
# Cuda: 10.2,
# CudaNN 8.1.1
# Download TensorRT 7.2.3 for Linux and CUDA 10.2 from https://developer.nvidia.com/nvidia-tensorrt-7x-download for ubuntu 18.04
# or from this link https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.2.3/local_repos/nv-tensorrt-repo-ubuntu1804-cuda10.2-trt7.2.3.4-ga-20210226_1-1_amd64.deb
# OPEN terminal
def Network(iteration):
X, Y, W1, W2, b1, b2 = initialization()
for i in range(iteration):
Z1,A1,Z2,A2,error = forward_prop(X,Y,W1,W2,b1,b2)
da1,da2,dz1,dz2,dw1,dw2,db1,db2 = backward_prop(X, Y, W1, W2, b1, b2, Z1,A1,Z2,A2)
def update_parameters(w1,w2,bias1,bias2, dw1,dw2,dz1,dz2,db1,db2,learning_rate=0.5):
w1 = w1 - (learning_rate * dw1)
bias1 = bias1 - (learning_rate * dz1)
w2 = w2 - (learning_rate * dw2)
bias2 = bias2 - (learning_rate * dz2)
return w1,w2,bias1,bias2
def backward_prop(x, y, w1, w2, bias1, bias2, z1,a1,z2,a2):
da2 = a2 * (1-a2)
dz2 = (a2-y)
dw2 = np.multiply(a1.T, dz2)
db2 = dz2
da1 = a1 * (1-a1)
def forward_prop(x,y,w1,w2,bias1,bias2):
Z1 = np.dot(w1,x) + bias1
A1 = sigmoid(Z1)
Z2 = np.dot(w2,A1) + bias2
A2 = sigmoid(Z2)
error = compute_error(A2,y)
def compute_error(a3, Y):
m = Y.shape[0]
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1./m * np.sum(logprobs)
return cost
import numpy as np
def initialization():
# ইউনপুট ভেক্টর
X = np.array([.10,.30,.50]).reshape(3,1)
# ওয়েট ম্যাট্রিক্স
W1 = np.array([
[0.15,0.20,0.25],
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
s = e_x / e_x.sum()
return s