To push container images to ghcr, you need peronal access token (PAT) - see how to create PAT
- Get PAT (personal access token)
Personal Settings > Developer settings > Personal access tokens
#------------------------------------------------------------------# | |
#- Clear-GlobalWindowsCache # | |
#------------------------------------------------------------------# | |
Function Clear-GlobalWindowsCache { | |
Remove-CacheFiles 'C:\Windows\Temp' | |
Remove-CacheFiles "C:\`$Recycle.Bin" | |
Remove-CacheFiles "C:\Windows\Prefetch" | |
C:\Windows\System32\rundll32.exe InetCpl.cpl, ClearMyTracksByProcess 255 | |
C:\Windows\System32\rundll32.exe InetCpl.cpl, ClearMyTracksByProcess 4351 | |
} |
To push container images to ghcr, you need peronal access token (PAT) - see how to create PAT
Personal Settings > Developer settings > Personal access tokens
Enter this in the search box along with your search terms:
Get all gists from the user santisbon.
user:santisbon
Find all gists with a .yml extension.
extension:yml
Find all gists with HTML files.
language:html
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.schema.output_parser import StrOutputParser | |
import requests | |
from bs4 import BeautifulSoup | |
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda | |
from langchain.utilities import DuckDuckGoSearchAPIWrapper | |
import json | |
RESULTS_PER_QUESTION = 3 |
from langchain.chains.openai_functions import create_structured_output_runnable | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.pydantic_v1 import BaseModel, Field | |
class Insight(BaseModel): | |
insight: str = Field(description="""insight""") | |
chat_model = ChatOpenAI(model_name="gpt-4-1106-preview") |
from langchain.prompts import PromptTemplate | |
from langchain.chat_models import ChatAnthropic | |
from langchain.schema.output_parser import StrOutputParser | |
#### ROUTER | |
# This is the router - responsible for chosing what to do | |
chain = PromptTemplate.from_template("""Given the user question below, classify it as either being about `weather` or `other`. | |
Do not respond with more than one word. |
from langchain.agents import load_tools | |
from langchain.agents import initialize_agent | |
from langchain.agents import AgentType | |
from langchain.llms import OpenAI | |
llm = OpenAI(temperature=0, model="gpt-3.5-turbo-instruct") | |
from metaphor_python import Metaphor | |
client = Metaphor("") |
from langchain.prompts import ChatPromptTemplate | |
from langchain.chat_models import ChatOpenAI | |
from langchain.schema.output_parser import StrOutputParser | |
from langchain.vectorstores import Chroma | |
from langchain.embeddings import OpenAIEmbeddings | |
from langchain.schema.runnable import RunnablePassthrough | |
from langchain.schema.runnable import RunnableMap | |
from langchain.schema import format_document | |
from typing import AsyncGenerator |
from langchain.chat_models import ChatOpenAI | |
from pydantic import BaseModel, Field | |
from langchain.document_loaders import UnstructuredURLLoader | |
from langchain.chains.openai_functions import create_extraction_chain_pydantic | |
class LLMItem(BaseModel): | |
title: str = Field(description="The simple and concise title of the product") | |
description: str = Field(description="The description of the product") | |
def main(): |
from langchain.document_loaders import YoutubeLoader | |
from langchain.indexes import VectorstoreIndexCreator | |
urls = [ | |
("https://www.youtube.com/watch?v=fP6vRNkNEt0", "Prompt Injection"), | |
("https://www.youtube.com/watch?v=qWv2vyOX0tk", "Low Code-No Code"), | |
("https://www.youtube.com/watch?v=k8GNCCs16F4", "Agents In Production"), | |
("https://www.youtube.com/watch?v=1gRlCjy18m4", "Agents"), | |
("https://www.youtube.com/watch?v=fLn-WqliEQU", "Output Parsing"), | |
("https://www.youtube.com/watch?v=ywT-5yKDtDg", "Document QA"), | |
("https://www.youtube.com/watch?v=GrCFyyyAxCU", "SQL"), |