Skip to content

Instantly share code, notes, and snippets.

00:00.000 --> 00:04.960
It's no secret that open AI is not open based on the English translation of the word.
00:04.960 --> 00:07.880
Most of their tech is not open source and not open to the public.
00:07.880 --> 00:11.000
In fact, to use their AI, the only thing that opens is your wallet.
00:11.000 --> 00:14.440
FROM python:3.12.3
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY app.py .
students = [
[12102130600101, 'Bhavya', 'CS', ['PWP','DAA','CAD'] , [80, 85, 90], [70, 75, 80], [90, 95, 100]],
[12102130600102, 'Dhruv', 'IT', ['PWP','DAA','CAD'] , [75, 80, 85], [65, 70, 75], [85, 90, 95]],
[12102130600103, 'Dhara', 'ECE', ['PWP','DAA','CAD'] , [85, 90, 95], [75, 80, 85], [95, 100, 100]],
[12102130600104, 'Kayur', 'ME', ['PWP','DAA','CAD'] , [70, 75, 80], [60, 65, 70], [80, 85, 90]],
]
student_dict = {}
for student in students:
#include <iostream>
#include <cuda_runtime.h>
const int N = 256; // Matrix size (N x N)
// Kernel function for matrix multiplication
__global__ void matrixMultiply(float* A, float* B, float* C, int n) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#include "kernel.h"
#include "kernel.cu"
#include "dev_array.h"
#include <math.h>
#include<stdio.h>
#include<cuda.h>
__global__ void arradd(int *x,int *y, int *z)
{
int id=blockIdx.x * blockDim.x+threadIdx.x;
/* blockIdx.x gives the respective block id which starts from 0 */
/* threadIdx.x gives the respective thread id which starts from 0 */
/* blockDim.x gives the dimension of block i.e. number of threads in one block */
z[id]=x[id]+y[id];
}
@Bhavya031
Bhavya031 / matix.c
Created September 11, 2023 04:36
cuda matix multiplication
#include <stdio.h>
#include <cuda.h>
#define N 3 // Matrix size (3x3)
__global__ void matrixMultiply(int *a, int *b, int *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
@Bhavya031
Bhavya031 / matix.c
Created September 11, 2023 04:34
cuda matrix multiplication
#include <stdio.h>
// Matrix dimensions
#define N 4
#define M 4
#define P 4
// CUDA kernel for matrix multiplication
__global__ void matrixMul(int *a, int *b, int *c) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
/*
* Copyright (c) 2018 SUSE LLC
*
* This software is licensed to you under the GNU General Public License,
* version 2 (GPLv2). There is NO WARRANTY for this software, express or
* implied, including the implied warranties of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
* along with this software; if not, see
* http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*