Skip to content

Instantly share code, notes, and snippets.

@sam2332
Last active July 2, 2024 15:43
Show Gist options
  • Save sam2332/c087dcc6a8a1f65b1ab3c3e09647fa70 to your computer and use it in GitHub Desktop.
Save sam2332/c087dcc6a8a1f65b1ab3c3e09647fa70 to your computer and use it in GitHub Desktop.
StableWallpapers!
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"##Magic Logging Bootstrap\n",
"import logging\n",
"import sys,os\n",
"logging.basicConfig(\n",
" level=logging.INFO,\n",
" format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',\n",
" datefmt='%Y-%m-%d %H:%M:%S'\n",
")\n",
"\n",
"logger = logging.getLogger(__name__)\n",
"\n",
"import psutil\n",
"import os\n",
"import random\n",
"from bs4 import BeautifulSoup\n",
"from queue import Queue\n",
"import os\n",
"\n",
"import time\n",
"import requests\n",
"import subprocess\n",
"import json\n",
"import base64\n",
"\n",
"import requests\n",
"import jsonpickle\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"def make_message(role, content):\n",
" return {\"role\": role, \"content\": content}\n",
"def QueryOllama(query,system_message,max_tokens=100,temperature=0.9):\n",
" system_message = \"\"\"\n",
" You will respond in json in the following format:\n",
" {\"Title\":\"\"}\n",
" AND\n",
" \"\"\"+system_message\n",
" messages = [\n",
" make_message(\"system\", system_message),\n",
" make_message(\"user\", query),\n",
" ]\n",
" data = Ollama(messages, \"dolphin-mistral:latest\", max_tokens, temperature, True)\n",
" data = json.loads(data['content'])\n",
" return data['Title']\n",
"def Ollama(messages, model, max_tokens, temperature, return_json):\n",
" # Create the URL for the chat API endpoint\n",
" url = f\"http://localhost:11434/api/chat\"\n",
" # Prepare the payload with all possible fields\n",
" payload = {\n",
" \"model\": model,\n",
" \"messages\": messages,\n",
" \"options\": {\n",
" \"temperature\": temperature,\n",
" \"num_predict\":max_tokens,\n",
" },\n",
" \"stream\": False,\n",
" \"max_tokens\": max_tokens,\n",
" \"keep_alive\": \"1m\",\n",
" }\n",
" if return_json:\n",
" payload[\"format\"] = \"json\"\n",
" logging.info(f\"Sending chat request to {url} with payload: {payload}\")\n",
" # Send the request to the external chat API\n",
" response = requests.post(url, json=payload)\n",
" logging.info(\n",
" f\"Recieved {response.status_code} response from chat request: {response.text}\"\n",
" )\n",
" # Check if the request was successful\n",
" if response.status_code == 200:\n",
" # Return the response from the external service\n",
" res_data = response.json()\n",
" while res_data[\"message\"][\"content\"].strip() != res_data[\"message\"][\"content\"]:\n",
" res_data[\"message\"][\"content\"] = res_data[\"message\"][\"content\"].strip()\n",
"\n",
" logging.info(f\"Recieved response from chat request: {res_data}\")\n",
" return res_data['message']\n",
" else:\n",
" # Return an error if something went wrong\n",
" raise HTTPException(\n",
" status_code=response.status_code,\n",
" detail=\"Error processing chat request with external model\",\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"class cache_response:\n",
" def __init__(self, cache_file):\n",
" self.cache_file = cache_file\n",
" self.cache = {}\n",
" if os.path.exists(cache_file):\n",
" with open(cache_file, 'r') as f:\n",
" self.cache = jsonpickle.decode(f.read())\n",
" logger.info(\"Loaded %d cache entries from %s\", len(self.cache), cache_file)\n",
"\n",
" def __getitem__(self, key):\n",
" logger.info(\"Getting %s from cache\", key)\n",
" return self.cache[key]\n",
"\n",
" def __setitem__(self, key, value):\n",
" logger.info(\"Caching %s\", key)\n",
" self.cache[key] = value\n",
" self.save()\n",
"\n",
" def __contains__(self, key):\n",
" logger.info(\"Checking if %s is in cache\", key) \n",
" return key in self.cache\n",
"\n",
" def __delitem__(self, key):\n",
" logger.info(\"Deleting %s\", key)\n",
" del self.cache[key]\n",
"\n",
" def save(self):\n",
" with open(self.cache_file, 'w') as f:\n",
" f.write(jsonpickle.encode(self.cache))\n",
" \n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"\n",
"def getRandomSubject(nodes=None,use_llm=False):\n",
" if nodes is None:\n",
" oddity_modifiers=[]\n",
"\n",
" oddity_modifiers.extend([\n",
" \"Glowing\", \"exciting\",\"neon\",\"vibrant\",\"colorful\",\"bright\",\"dark\",\"mysterious\",\"strange\",\"weird\",\"odd\",\"unusual\",\"unique\",\"rare\",\"special\",\"magical\",\"fantastic\",\"amazing\",\"incredible\",\"awesome\",\"cool\",\"hot\",\"cold\",\"warm\",\"fuzzy\",\"soft\",\"smooth\",\"rough\",\"sharp\",\"hard\",\"heavy\",\"light\",\"fast\",\"slow\",\"quick\",\"easy\",\"simple\",\"complex\",\"difficult\",\"challenging\",\"dangerous\",\"safe\",\"secure\",\"protected\",\"harmless\",\"melting\",\"freezing\",\"boiling\",\"evaporating\",\"condensing\",\"solid\",\"liquid\",\"gas\",\"plasma\",\"elemental\",\"chemical\",\"biological\",\"organic\",\"inorganic\",\"synthetic\",\"artificial\",\"natural\",\"real\",\"imaginary\",\"virtual\",\"digital\",\"analog\",\"analogous\",\"similar\",\"different\",\"opposite\",\"contrary\",\"contradictory\",\"paradoxical\",\"ironic\",\"sarcastic\",\"satirical\",\"humorous\",\"funny\",\"comical\",\"hilarious\",\"ridiculous\",\"absurd\",\"nonsensical\",\"silly\",\"stupid\",\"crazy\",\"mad\",\"insane\",\"lunatic\",\"psychotic\",\"deranged\",\"demented\",\"delusional\",\"hallucinatory\",\"paranoid\",\"schizophrenic\",\"neurotic\",\"obsessive\",\"compulsive\",\"addictive\",\"habitual\",\"chronic\",\"acute\",\"severe\",\"intense\",\"extreme\",\"excessive\",\"recursive\",\"circular\",\"linear\",\"wavy\",\"zigzag\", \"Spiral\",\"polygonal\",\"polyhedral\",\"spherical\",\"cylindrical\",\"conical\",\"pyramidal\",\"prismatic\"\n",
" ])\n",
"\n",
"\n",
" subjects = []\n",
" #animals\n",
" subjects.extend( [\n",
" \"dog\",\"cat\",\"bird\",\"fish\",\"rabbit\",\"hamster\",\"guinea pig\",\"turtle\",\"snake\",\"lizard\",\"frog\",\"toad\",\"salamander\",\"newt\",\"axolotl\",\"scorpion\",\"clam\",\"snail\",\"slug\",\"octopus\",\"squid\",\"jellyfish\",\"starfish\",\"sea urchin\",\"sea cucumber\",\"coral\",\"anemone\",\"sponge\",\"plankton\",\"krill\",\"shark\",\"whale\",\"dolphin\",\"seal\",\"sea lion\",\"walrus\",\"sea otter\",\"penguin\",\"seagull\",\"pelican\",\"albatross\",\"swan\",\"goose\",\"duck\",\"heron\",\"crane\",\"stork\",\"flamingo\",\"eagle\",\"hawk\",\"falcon\"\n",
" ])\n",
" #plants\n",
" subjects.extend([\n",
" \"tree\",\"flower\",\"bush\",\"shrub\",\"grass\",\"weed\",\"moss\",\"fern\",\"algae\",\"fungus\",\"mushroom\",\"lichen\",\"cactus\",\"succulent\",\"orchid\",\"lily\",\"rose\",\"daisy\",\"tulip\",\"sunflower\",\"daffodil\",\"dandelion\",\"violet\",\"poppy\",\"lilac\",\"iris\",\"lily of the valley\",\"honeysuckle\",\"lavender\",\"peony\",\"chrysanthemum\",\"marigold\",\"zinnia\",\"cosmos\",\"aster\",\"dahlia\",\"gladiolus\",\"carnation\",\"hyacinth\",\"hybrid\",\"hibiscus\",\"azalea\",\"rhododendron\",\"camellia\",\"magnolia\",\"gardenia\",\"jasmine\",\"lantana\",\"bougainvillea\"\n",
" ])\n",
" #fruits\n",
" subjects.extend([\n",
" \"apple\",\"banana\",\"orange\",\"grape\",\"strawberry\",\"blueberry\",\"raspberry\",\"blackberry\",\"kiwi\",\"pineapple\",\"watermelon\",\"cantaloupe\",\"honeydew\",\"mango\",\"papaya\",\"guava\",\"passion fruit\",\"dragon fruit\",\"lychee\",\"longan\",\"rambutan\",\"durian\",\"jackfruit\",\"breadfruit\",\"coconut\",\"date\",\"fig\",\"kiwi\",\"lemon\",\"lime\",\"pomegranate\",\"avocado\",\"\",\"pear\",\"plum\",\"cherry\",\"apricot\",\"nectarine\",\"persimmon\",\"quince\",\"cranberry\",\"currant\",\"gooseberry\",\"elderberry\",\"boysenberry\",\"mulberry\",\"loganberry\",\"cloudberry\",\"lingonberry\",\"bilberry\",\"huckleberry\",\"barberry\",\"goji berry\",\"acai berry\",\"blueberry\",\"blackberry\",\"raspberry\",\"strawberry\",\"cranberry\",\"cherry\",\"plum\",\"\",\"apricot\",\"nectarine\",\"pear\",\"apple\",\"orange\",\"lemon\",\"lime\",\"grapefruit\",\"pomelo\",\"tangerine\",\"mandarin\",\"clementine\",\"kumquat\",\"persimmon\",\"pomegranate\",\"fig\",\"date\",\"olive\",\"grape\",\"banana\",\"pineapple\",\"mango\",\"papaya\",\"guava\",\"kiwi\",\"passion fruit\",\"dragon fruit\",\"lychee\",\"rambutan\",\"longan\",\"durian\",\"jackfruit\",\"breadfruit\",\"coconut\",\"avocado\",\"kiwi\",\"papaya\",\"guava\",\"passion fruit\",\"dragon fruit\",\"lychee\",\"longan\",\"rambutan\",\"durian\",\"jackfruit\",\"breadfruit\",\"coconut\",\"date\",\"fig\",\"kiwi\",\"lemon\",\"lime\",\"pomegranate\",\"avocado\",\"\",\"pear\",\"plum\",\"cherry\",\"apricot\",\"nectarine\",\"persimmon\",\"quince\",\"cranberry\",\"currant\",\"gooseberry\"\n",
" ])\n",
" #adults\n",
" subjects.extend([\n",
" \"a curious adult\", \"a happy adult\",\"a slutty woman\"\n",
" ])\n",
" #couples\n",
" subjects.extend([\n",
" \"a happy couple\", \"a loving couple\",\n",
" ])\n",
"\n",
"\n",
" actions = [\n",
" \"playing\",\"sleeping\",\"eating\",\"walking\",\"running\",\"jumping\",\"\",\"laughing\",\"singing\",\"dancing\",\"reading\",\"writing\",\"drawing\",\"painting\",\"cooking\",\"cleaning\",\"driving\",\"flying\",\"swimming\",\"diving\",\"climbing\",\"hiking\",\"camping\",\"fishing\",\"hunting\",\"skiing\",\"snowboarding\",\"skating\",\"surfing\",\"sailing\",\"boating\",\"rowing\",\"rafting\",\"kayaking\",\"canoeing\",\"biking\",\"cycling\",\"riding\",\"racing\",\"competing\",\"winning\",\"losing\",\"succeeding\",\"failing\",\"helping\",\"saving\",\"rescuing\",\"protecting\",\"defending\",\"attacking\",\"fighting\",\"arguing\",\"debating\",\"discussing\",\"talking\",\"speaking\",\"listening\",\"hearing\",\"seeing\",\"watching\",\"looking\",\"staring\",\"gazing\",\"smiling\",\"frowning\",\"\",\"laughing\",\"screaming\",\"yelling\",\"shouting\",\"whispering\",\"singing\",\"dancing\",\"playing\",\"working\",\"studying\",\"learning\",\"teaching\",\"training\",\"coaching\",\"mentoring\",\"guiding\",\"leading\",\"following\",\"supporting\",\"encouraging\",\"motivating\",\"inspiring\",\"listening\",\"hearing\",\"seeing\",\"watching\",\"looking\",\"staring\",\"gazing\",\"smiling\",\"frowning\",\"\",\"laughing\",\"screaming\",\"yelling\",\"shouting\",\"whispering\",\"singing\",\"dancing\",\"playing\",\"working\",\"studying\",\"learning\",\"teaching\",\"training\",\"coaching\",\"mentoring\",\"guiding\",\"leading\",\"following\",\"supporting\",\"encouraging\",\"motivating\",\"inspiring\",\"rescuing\",\"protecting\"\n",
" ]\n",
" oddity = random.choice(oddity_modifiers)\n",
" subject = random.choice(subjects)\n",
" action = random.choice(actions)\n",
" idea = f\"{oddity},{subject},{action}\"\n",
" logger.info(f\"Generated idea: {idea}\")\n",
" else:\n",
" idea = nodes\n",
" if use_llm:\n",
" llm_idea = QueryOllama(\n",
" idea,\n",
" system_message=\"make these items into a unique comma seperated title please, dont say title, rng: \"+str(time.time()),\n",
" max_tokens=35\n",
" )\n",
" llm_out=[]\n",
" for element in llm_idea.split():\n",
" if element is None:\n",
" continue\n",
" if element == \"\":\n",
" continue\n",
" llm_out.append(element)\n",
" llm_idea = \" \".join(llm_out)\n",
" logger.info(f\"Generated by llm: {llm_idea}\")\n",
" return idea, llm_idea\n",
" return idea,idea"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"\n",
"from PIL import Image, ImageDraw, ImageFont\n",
"import numpy as np\n",
"import io\n",
"import os\n",
"import time\n",
"def generate_wallpaper(uprompt:str, seed=None):\n",
" instance = \"http://localhost:7778\"\n",
" model = \"newdawnxl_31Bf16Version\"\n",
" wall_paper_width = 1920\n",
" wall_paper_height = 1080\n",
" gen_image_width = 1024\n",
" gen_image_height = 1024\n",
" nprompt = \"cartoon,ugly,nsfw,softcore,hardcore, nudity, low quality, bad wallpaper, student project, bad design, render, low resolution, low quality, bad quality, bad resolution, bad lighting, bad composition, bad colors, bad contrast, bad focus, bad framing, bad exposure, bad saturation, bad white balance, bad shadows, bad highlights, bad reflections, bad refractions, bad textures, bad materials, bad geometry, bad topology, bad modeling, bad rigging, bad animation, bad simulation, bad rendering, bad compositing, bad editing, bad post processing, bad retouching, bad color grading, bad effects, bad filters, bad noise, bad artifacts, bad distortion, bad blur, bad sharpening, bad denoising, bad chromatic aberration, bad vignetting, bad motion blur, bad depth of field, bad bokeh, bad lens flare, bad light leak, bad grain, bad banding, bad posterization, bad dithering, bad compression, bad pixelation, bad aliasing, bad moire, bad halftone, bad quantization, bad interpolation, bad resampling, bad upscaling, bad downscaling, bad aspect ratio, bad resolution, bad orientation, bad alignment, bad symmetry, bad balance, bad proportion, bad scale, bad perspective, bad depth, bad volume, bad shape, bad form, bad structure, bad anatomy, bad proportions, bad composition, bad layout, bad framing, bad cropping\"\n",
" steps = 32\n",
" cfg = 13\n",
" if seed is None:\n",
" useed = random.randint(0, 1000000000000)\n",
" else:\n",
" useed = seed\n",
" logging.error(requests.post(f\"{instance}/sdapi/v1/refresh-checkpoints\").json())\n",
" change_res = requests.post(\n",
" f\"{instance}/sdapi/v1/options\",\n",
" data=json.dumps({\"sd_model_checkpoint\": model}),\n",
" )\n",
" logging.error(change_res.text)\n",
" change_res_json = change_res.json()\n",
" if change_res_json is not None:\n",
" if \"error\" in change_res_json:\n",
" raise Exception(change_res_json[\"error\"])\n",
" url = f\"{instance}/sdapi/v1/txt2img\"\n",
"\n",
"\n",
" payload = {\n",
" \"prompt\": uprompt,\n",
" \"negative_prompt\": nprompt,\n",
" \"steps\": steps,\n",
" \"restore_faces\": True,\n",
" \"batch_size\": 1,\n",
" \"cfg\": cfg,\n",
" \"seed\": useed,\n",
" \"width\": gen_image_width,\n",
" \"height\": gen_image_height,\n",
" }\n",
"\n",
" logging.info(f\"Sending request to {url} for prompt {uprompt} \")\n",
" response = requests.post(url, json=payload)\n",
" result = response.json()\n",
"\n",
" image_data = base64.b64decode(result[\"images\"][0].split(\",\", 1)[0])\n",
"\n",
"\n",
" # add prompt to bottom left corner\n",
" #white background black text\n",
" # Simulated image loading and prompt setup\n",
"\n",
" image = Image.open(io.BytesIO(image_data)) # make sure 'image_data' contains your image data\n",
" \n",
" return image, uprompt, useed\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"def make_wallpaper(nodes:str, uprompt: str, useed: int):\n",
" top_margin=12\n",
" wall_paper_width = 1920\n",
" wall_paper_height = 1080\n",
" gen_image_width = 1024\n",
" gen_image_height = 1024\n",
" image, uprompt, useed = generate_wallpaper(uprompt, useed)\n",
" width, height = image.size\n",
"\n",
" # Function to interpolate between two colors\n",
" def interpolate_color(c1, c2, factor):\n",
" return tuple(int(c1[i] + (c2[i] - c1[i]) * factor) for i in range(3))\n",
"\n",
" # Function to create a gradient line\n",
" def gradient_line(draw, start_pos, end_pos, start_color, end_color, horizontal=True):\n",
" x1, y1 = start_pos\n",
" x2, y2 = end_pos\n",
" length = x2 - x1 if horizontal else y2 - y1\n",
" for i in range(length):\n",
" factor = i / length\n",
" color = interpolate_color(start_color, end_color, factor)\n",
" if horizontal:\n",
" draw.line([(x1 + i, y1), (x1 + i, y2)], fill=color)\n",
" else:\n",
" draw.line([(x1, y1 + i), (x2, y1 + i)], fill=color)\n",
"\n",
" # Sample colors from the edges\n",
" def sample_edge_colors(img, num_samples=10):\n",
" top_colors = [img.getpixel((x, 0)) for x in np.linspace(0, img.width-1, num_samples, dtype=int)]\n",
" bottom_colors = [img.getpixel((x, img.height-1)) for x in np.linspace(0, img.width-1, num_samples, dtype=int)]\n",
" left_colors = [img.getpixel((0, y)) for y in np.linspace(0, img.height-1, num_samples, dtype=int)]\n",
" right_colors = [img.getpixel((img.width-1, y)) for y in np.linspace(0, img.height-1, num_samples, dtype=int)]\n",
" return top_colors, bottom_colors, left_colors, right_colors\n",
"\n",
" top, bottom, left, right = sample_edge_colors(image)\n",
"\n",
" top, bottom, left, right = sample_edge_colors(image)\n",
" avg_color = tuple(int(sum(x) / len(x)) for x in zip(*top, *bottom, *left, *right))\n",
" wallpaper_image = Image.new(\"RGB\", (wall_paper_width, wall_paper_height), avg_color)\n",
" # Create a new wallpaper images\n",
" draw = ImageDraw.Draw(wallpaper_image)\n",
"\n",
" # Center the original image in the middle\n",
" center_x = (wall_paper_width - width) // 2\n",
" center_y = (wall_paper_height - height) // 2\n",
" center_y += top_margin\n",
" wallpaper_image.paste(image, (center_x, center_y))\n",
"\n",
" # Add prompt and seed to the bottom left corner\n",
" draw_wallpaper = ImageDraw.Draw(wallpaper_image)\n",
" font = ImageFont.load_default()\n",
"\n",
" text = f\"Seed: {useed}\"\n",
" text2 = f\"Prompt: {uprompt}\"\n",
" text_width, text_height = draw_wallpaper.textsize(text, font)\n",
" text2_width, text2_height = draw_wallpaper.textsize(text2, font)\n",
" text_position = (10, wall_paper_height - text_height - 10)\n",
" text2_position = (10, wall_paper_height - text_height - text2_height - 20)\n",
"\n",
"\n",
" # Fill area bottom left with white size of text\n",
" rect = (0, wall_paper_height - text_height - text2_height - 40, max(text_width, text2_width) + 20, wall_paper_height)\n",
" draw_wallpaper.rectangle(rect, fill=\"white\")\n",
"\n",
" draw_wallpaper.text(text_position, text, font=font, fill=\"black\")\n",
" draw_wallpaper.text(text2_position, text2, font=font, fill=\"black\")\n",
"\n",
" # Save the wallpaper\n",
" wallpaper_dir = os.path.expanduser(\"~/Pictures/Wallpapers\")\n",
" wallpaper_dir = os.path.join(wallpaper_dir, time.strftime(\"%Y-%m-%d\"))\n",
" wallpaper_dir = os.path.join(wallpaper_dir, nodes.replace(\" \", \"_\").replace(\",\", \"_\").replace(\".\", \"_\").replace(\"?\", \"_\").replace(\"!\", \"_\"))\n",
" output_file = os.path.join(wallpaper_dir, f\"{useed}.png\")\n",
" os.makedirs(wallpaper_dir, exist_ok=True)\n",
"\n",
" wallpaper_image.save(output_file)\n",
" print(f\"Wallpaper saved to {output_file}\") \n",
" os.system(f\"gsettings set org.gnome.desktop.background picture-uri 'file://{output_file}'\")\n",
" os.system(f\"gsettings set org.gnome.desktop.background picture-uri-dark 'file://{output_file}'\")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"\n",
"#if in vscode or jupyter, keep the window open else sysexit 0\n",
"if \"VSCODE_PID\" in os.environ:\n",
" pass\n",
"else:\n",
" try:\n",
" while True:\n",
" seed = random.randint(0, 1000000000000)\n",
" nodes,llm = getRandomSubject()\n",
" for useed in range(seed, seed+100, 10):\n",
" nodes,llm = getRandomSubject(nodes=nodes)\n",
" make_wallpaper(nodes,llm,useed)\n",
" time.sleep(300)\n",
" except KeyboardInterrupt:\n",
" sys.exit(0)\n",
" except Exception as e:\n",
" logger.error(e)\n",
" sys.exit(1)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"ename": "Exception",
"evalue": "Done",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mException\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[16], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mDone\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"\u001b[0;31mException\u001b[0m: Done"
]
}
],
"source": [
"raise Exception(\"Done\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"2024-07-02 09:43:21.060 INFO 4282373082 - getRandomSubject: Generated idea: vibrant,rhododendron,following\n",
"2024-07-02 09:43:21.063 INFO 1914763091 - Ollama: Sending chat request to http://localhost:11434/api/chat with payload: {'model': 'dolphin-mistral:latest', 'messages': [{'role': 'system', 'content': '\\n You will respond in json in the following format:\\n {\"Title\":\"\"}\\n AND\\n make these items into a unique comma seperated title please, dont say title, rng: 1719927801.0631666'}, {'role': 'user', 'content': 'vibrant,rhododendron,following'}], 'options': {'temperature': 0.9, 'num_predict': 35}, 'stream': False, 'max_tokens': 35, 'keep_alive': '1m', 'format': 'json'}\n",
"2024-07-02 09:43:23.105 INFO 1914763091 - Ollama: Recieved 200 response from chat request: {\"model\":\"dolphin-mistral:latest\",\"created_at\":\"2024-07-02T13:43:23.076516895Z\",\"message\":{\"role\":\"assistant\",\"content\":\"{\\\"Title\\\": \\\"Vibrant Rhododendrons Following\\\"}\"},\"done\":true,\"total_duration\":1933828463,\"load_duration\":1508383887,\"prompt_eval_count\":91,\"prompt_eval_duration\":77562000,\"eval_count\":15,\"eval_duration\":346428000}\n",
"2024-07-02 09:43:23.106 INFO 1914763091 - Ollama: Recieved response from chat request: {'model': 'dolphin-mistral:latest', 'created_at': '2024-07-02T13:43:23.076516895Z', 'message': {'role': 'assistant', 'content': '{\"Title\": \"Vibrant Rhododendrons Following\"}'}, 'done': True, 'total_duration': 1933828463, 'load_duration': 1508383887, 'prompt_eval_count': 91, 'prompt_eval_duration': 77562000, 'eval_count': 15, 'eval_duration': 346428000}\n",
"2024-07-02 09:43:23.108 INFO 4282373082 - getRandomSubject: Generated by llm: Vibrant Rhododendrons Following\n",
"2024-07-02 09:43:23.109 INFO 1914763091 - Ollama: Sending chat request to http://localhost:11434/api/chat with payload: {'model': 'dolphin-mistral:latest', 'messages': [{'role': 'system', 'content': '\\n You will respond in json in the following format:\\n {\"Title\":\"\"}\\n AND\\n make these items into a unique comma seperated title please, dont say title, rng: 1719927803.109372'}, {'role': 'user', 'content': 'vibrant,rhododendron,following'}], 'options': {'temperature': 0.9, 'num_predict': 35}, 'stream': False, 'max_tokens': 35, 'keep_alive': '1m', 'format': 'json'}\n",
"2024-07-02 09:43:23.682 INFO 1914763091 - Ollama: Recieved 200 response from chat request: {\"model\":\"dolphin-mistral:latest\",\"created_at\":\"2024-07-02T13:43:23.608133549Z\",\"message\":{\"role\":\"assistant\",\"content\":\"{\\\"Title\\\":\\\"Vibrant Rhododendrons in Bloom\\\"}\"},\"done\":true,\"total_duration\":419747091,\"load_duration\":621677,\"prompt_eval_count\":31,\"prompt_eval_duration\":72655000,\"eval_count\":15,\"eval_duration\":344545000}\n",
"2024-07-02 09:43:23.683 INFO 1914763091 - Ollama: Recieved response from chat request: {'model': 'dolphin-mistral:latest', 'created_at': '2024-07-02T13:43:23.608133549Z', 'message': {'role': 'assistant', 'content': '{\"Title\":\"Vibrant Rhododendrons in Bloom\"}'}, 'done': True, 'total_duration': 419747091, 'load_duration': 621677, 'prompt_eval_count': 31, 'prompt_eval_duration': 72655000, 'eval_count': 15, 'eval_duration': 344545000}\n",
"2024-07-02 09:43:23.684 INFO 4282373082 - getRandomSubject: Generated by llm: Vibrant Rhododendrons in Bloom\n",
"2024-07-02 09:43:23.867 ERROR 3662477535 - generate_wallpaper: None\n",
"2024-07-02 09:43:23.959 ERROR 3662477535 - generate_wallpaper: null\n",
"2024-07-02 09:43:23.960 INFO 3662477535 - generate_wallpaper: Sending request to http://localhost:7778/sdapi/v1/txt2img for prompt Vibrant Rhododendrons in Bloom \n",
"/tmp/ipykernel_413708/1729695487.py:56: DeprecationWarning: textsize is deprecated and will be removed in Pillow 10 (2023-07-01). Use textbbox or textlength instead.\n",
" text_width, text_height = draw_wallpaper.textsize(text, font)\n",
"/tmp/ipykernel_413708/1729695487.py:57: DeprecationWarning: textsize is deprecated and will be removed in Pillow 10 (2023-07-01). Use textbbox or textlength instead.\n",
" text2_width, text2_height = draw_wallpaper.textsize(text2, font)\n",
"/tmp/ipykernel_413708/1729695487.py:61: DeprecationWarning: textsize is deprecated and will be removed in Pillow 10 (2023-07-01). Use textbbox or textlength instead.\n",
" text3_width, text3_height = draw_wallpaper.textsize(text3, font)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Wallpaper saved to /home/srudloff/Pictures/Wallpapers/2024-07-02/vibrant_rhododendron_following/429936433502.png\n"
]
},
{
"ename": "KeyboardInterrupt",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[10], line 7\u001b[0m\n\u001b[1;32m 5\u001b[0m nodes,llm \u001b[38;5;241m=\u001b[39m getRandomSubject(nodes\u001b[38;5;241m=\u001b[39mnodes)\n\u001b[1;32m 6\u001b[0m make_wallpaper(nodes,llm,useed)\n\u001b[0;32m----> 7\u001b[0m \u001b[43mtime\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msleep\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m7\u001b[39;49m\u001b[43m)\u001b[49m\n",
"\u001b[0;31mKeyboardInterrupt\u001b[0m: "
]
}
],
"source": [
"while True:\n",
" seed = random.randint(0, 1000000000000)\n",
" nodes,llm = getRandomSubject()\n",
" for useed in range(seed, seed+100, 10):\n",
" nodes,llm = getRandomSubject(nodes=nodes)\n",
" make_wallpaper(nodes,llm,useed)\n",
" time.sleep(7)\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"seed = random.randint(0, 1000000000000)\n",
"nodes, subject = getRandomSubject()\n",
"make_wallpaper(nodes, subject, seed)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cur_hex = random.randint(0, 16777215)\n",
"for i in range(25):\n",
" #adapt a hex color at the end of the prompt and update it in a loop \n",
" hex_color_str = \"#\"+hex(cur_hex)[2:] \n",
" cur_hex+=256\n",
" uprompt = f\"{subject},{hex_color_str}\"\n",
" make_wallpaper(nodes, uprompt, seed)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"llubahn@ingham.orgwhile True:\n",
" seed = random.randint(0, 1000000000000)\n",
" raw,llm = getRandomSubject()\n",
" cur_hex = random.randint(0, 16777215)\n",
" for i in range(25):\n",
" #adapt a hex color at the end of the prompt and update it in a loop \n",
" hex_color_str = \"#\"+hex(cur_hex)[2:] \n",
" cur_hex+=256\n",
" uprompt = f\"{subject},{hex_color_str}\"\n",
"\n",
" make_wallpaper(raw+f\",{hex_color_str}\", uprompt, seed)\n",
" time.sleep(5)\n",
" time.sleep(15)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment