Skip to content

Instantly share code, notes, and snippets.

View ruslanmv's full-sized avatar
😃

Ruslan Magana Vsevolodovna ruslanmv

😃
View GitHub Profile
@ruslanmv
ruslanmv / download.py
Last active April 5, 2024 14:15
Download Files from Jupyter Notebook
from IPython.display import FileLink
!tar -czvf lm-evaluation-harness.tar.gz lm-evaluation-harness
def create_download_link(filename):
return FileLink(filename)
download_link = create_download_link("lm-evaluation-harness.tar.gz")
display(download_link)
@ruslanmv
ruslanmv / app.py
Created March 26, 2024 07:54
Medical Chatbot with Langchain with a Custom LLM
from datasets import load_dataset
from IPython.display import clear_output
import pandas as pd
import re
from dotenv import load_dotenv
import os
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.utils.enums import DecodingMethods
from langchain.llms import WatsonxLLM
@ruslanmv
ruslanmv / app.py
Created February 19, 2024 17:24
Gemini App LLM
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import os
from langchain_google_genai import GoogleGenerativeAIEmbeddings
import google.generativeai as genai
from langchain_community.vectorstores import FAISS
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.chains.question_answering import load_qa_chain
from langchain.prompts import PromptTemplate
@ruslanmv
ruslanmv / app.py
Created February 17, 2024 19:10
Run Python commands with custom python environment in background from Jupyter Notebook
# Create a shell script
script_content = f'''
#!/bin/bash
source activate automatic
"$@"
'''
# Write the script to a file
script_filename = 'run'
with open(script_filename, 'w') as script_file:
script_file.write(script_content)
@ruslanmv
ruslanmv / app.py
Created February 17, 2024 18:34
Create python environment with venv
import subprocess
import sys
import venv
import os
venv.create("venv", with_pip=True)
@ruslanmv
ruslanmv / background_env_jupyter.py
Created February 17, 2024 18:26
If you need to run multiple commands within the same environment, you might want to combine them into a shell script and execute the script:
# Create a shell script
script_content = f'''
#!/bin/bash
conda activate {conda_environment_name}
python your_script.py
'''
# Write the script to a file
script_filename = 'activate_and_run.sh'
with open(script_filename, 'w') as script_file:
@ruslanmv
ruslanmv / show_images_from_df.py
Created February 11, 2024 17:45
Print Pictures in Table from Pandas DF
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import HTML
from PIL import Image
from io import BytesIO
import base64
# Read DataFrame
df_path = os.path.join(current_dir, "images","collection.csv")
df = pd.read_csv(df_path)
@ruslanmv
ruslanmv / api.py
Created February 4, 2024 17:11
Function as an API
To expose the `generate_story_and_speech` function as an API, you can use Flask to create a simple API. Here's an example of how you can do it:
1. Install Flask: `pip install Flask`
2. Create a new file, say `api.py`, and add the following code:
```python
from flask import Flask, request, jsonify
import gradio as gr
from your_module import generate_story_and_speech
@ruslanmv
ruslanmv / requirements.py
Created February 4, 2024 13:53
Save requirements.txt from Google Colab or PC with GPU or CPU
import os
import subprocess
import sys
import shutil
def is_google_colab():
return 'google.colab' in sys.modules
def is_nvidia_smi_available():
return shutil.which("nvidia-smi") is not None
@ruslanmv
ruslanmv / app.py
Created February 3, 2024 23:45
Text to Speech
from transformers import VitsModel, AutoTokenizer
import torch
cache_dir = "./cache" # Specify the current directory as the cache directory
model = VitsModel.from_pretrained("facebook/mms-tts-eng", cache_dir=cache_dir)
tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng", cache_dir=cache_dir)
text = "some example text in the English language"
inputs = tokenizer(text, return_tensors="pt")
with torch.no_grad():
output = model(**inputs).waveform