Skip to content

Instantly share code, notes, and snippets.

View lordlinus's full-sized avatar

Sunil Sattiraju lordlinus

View GitHub Profile
import re
from enum import Enum
import streamlit as st
import os
import json
import asyncio
import concurrent.futures
from typing import Optional, List, Dict, Any
from datetime import datetime, date
@lordlinus
lordlinus / app.py
Created April 9, 2025 14:46
Voice model to output JSON
import streamlit as st
import base64
import os
import json
from openai import AzureOpenAI
from dotenv import load_dotenv
from datetime import date, datetime
from typing import Optional
from pydantic import BaseModel, Field
import tempfile
@lordlinus
lordlinus / databricks_cluster_deployment.sh
Created April 5, 2021 05:02
Bash script to deploy Databricks Cluster and other dependencies
#! /usr/bin/bash
set -o errexit
set -o nounset
set -o pipefail
export ARM_SUBSCRIPTION_ID= XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
export ARM_TENANT_ID= XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
export ARM_CLIENT_ID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
export ARM_CLIENT_SECRET=XXXXXXXXXXXXX
export MANAGEMENT_RESOURCE_ENDPOINT="https://management.core.windows.net/" # This is Fixed value (DO NOT CHANGE)
@lordlinus
lordlinus / databricks_cluster_deployment.yml
Created April 5, 2021 04:59
Azure devops yaml pipeline to deploy Azure Databricks cluster ( end to end )
# Azure DevOps pipeline to build Databricks cluster
# This pipeline depends on "variable-group-01" to provide the below variables
# ARM_CLIENT_ID
# ARM_SUBSCRIPTION_ID
# ARM_TENANT_ID
# mySecret NOTE: This variable is mapped to ARM_CLIENT_SECRET below
trigger:
- starter
// Databricks notebook source
import org.graphframes.GraphFrame
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.functions.struct
sc.setCheckpointDir("/dbfs/cp")
// COMMAND ----------
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
// Fix SQL queries being returned as literals
import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects}
JdbcDialects.registerDialect(new JdbcDialect() {
override def canHandle(url: String): Boolean = url.toLowerCase.startsWith("jdbc:databricks:")
override
def quoteIdentifier(column: String): String = column
})
import time
from queue import Queue
from threading import Thread
table_list = [
"table1",
"table2",
"table3",
"table4",
"table5",
import pandas as pd
import os
folder_path = "/os/folder/path"
summaryDF = pd.DataFrame()
data = pd.concat(
[
pd.read_excel(os.path.join(folder_path, f), sheet_name="Data")
for f in os.listdir(folder_path)
],
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.