Skip to content

Instantly share code, notes, and snippets.

View ruslanmv's full-sized avatar
😃

Ruslan Magana Vsevolodovna ruslanmv

😃
View GitHub Profile
@ruslanmv
ruslanmv / Exploring a Python Package or Framework.md
Created May 4, 2025 21:52
Tutorial: Exploring a Python Package or Framework

Tutorial: Exploring a Python Package or Framework

When working with a new Python framework or library (let's call it a "package"), you often need to understand its structure: what modules it contains, what classes and functions are available, and how they are organized. This tutorial shows you several ways to do this using Python's built-in tools.

We'll use a hypothetical framework named beeai_framework as our main example, based on your input.

Prerequisites

  1. Python Installed: You need a working Python interpreter.
  2. Package Installed: The package you want to explore must be installed in your Python environment (e.g., using pip install package_name).
@ruslanmv
ruslanmv / Distillation.py
Created January 31, 2025 00:36
Distillation Demo
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
from datasets import load_dataset
# Load teacher (DeepSeek-R1) and student (Granite-8B)
teacher_name = "deepseek-ai/deepseek-llm-7b-base" # Replace with actual DeepSeek-R1 HF path
student_name = "ibm-granite/granite-3.1-8b-instruct"
# Load models and tokenizers
teacher = AutoModelForCausalLM.from_pretrained(teacher_name, torch_dtype=torch.bfloat16)
@ruslanmv
ruslanmv / app.py
Created January 31, 2025 00:33
DeepSeek Chatbot
import gradio as gr
import os
import spaces
from transformers import GemmaTokenizer, AutoModelForCausalLM
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)
@ruslanmv
ruslanmv / qa.py
Created May 30, 2024 13:21
watsonx question answering
# Import necessary libraries
import os
from ibm_watson_machine_learning.foundation_models import Model
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes, DecodingMethods
def get_model(model_type, api_key, project_id, max_tokens, min_tokens, decoding, temperature, top_k, top_p):
generate_params = {
GenParams.MAX_NEW_TOKENS: max_tokens,
GenParams.MIN_NEW_TOKENS: min_tokens,
@ruslanmv
ruslanmv / ollama-export.sh
Created May 24, 2024 10:45
Ollama Model Export Script
#!/bin/bash
# Ollama Model Export Script
# Usage: bash ollama-export.sh vicuna:7b
set -e
echo "Ollama Model Export Script"
echo ""
@ruslanmv
ruslanmv / notebook_to_markdown.py
Last active May 13, 2024 08:17
Saving the Notebook to markdown
pip install "notebook<7"
### Saving the Notebook to markdown
import IPython
#from IPython.lib import kernel
from ipykernel.kernelbase import Kernel
%%javascript
var kernel = IPython.notebook.kernel;
var thename = window.document.getElementById("notebook_name").innerHTML;
var command = "theNotebook = " + "'"+thename+"'";
@ruslanmv
ruslanmv / parallel.py
Created May 9, 2024 20:08
Parallel execution juyter notebook pytorch
import os
import torch
import torch.multiprocessing as mp
# Distributed training setup
def init_distributed(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12345"
if rank == 0:
print("Initializing distributed process group...")
torch.distributed.init_process_group(backend='nccl', world_size=world_size, rank=rank)
@ruslanmv
ruslanmv / finetune.py
Last active May 9, 2024 16:39
Finetuning Llama 3 Mutiple GPU
from datasets import load_dataset
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, AutoTokenizer
from peft import LoraConfig
from trl import SFTTrainer
from transformers import TrainingArguments
from accelerate import PartialState
# Load the dataset
dataset_name = "ruslanmv/ai-medical-dataset"
@ruslanmv
ruslanmv / download.py
Last active April 5, 2024 14:15
Download Files from Jupyter Notebook
from IPython.display import FileLink
!tar -czvf lm-evaluation-harness.tar.gz lm-evaluation-harness
def create_download_link(filename):
return FileLink(filename)
download_link = create_download_link("lm-evaluation-harness.tar.gz")
display(download_link)
@ruslanmv
ruslanmv / app.py
Created March 26, 2024 07:54
Medical Chatbot with Langchain with a Custom LLM
from datasets import load_dataset
from IPython.display import clear_output
import pandas as pd
import re
from dotenv import load_dotenv
import os
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
from langchain.llms import WatsonxLLM