#Stay Standalone
A short script to prevent internal links to a "webapp" added to iPhone home screen to open in Safari instead of navigating internally.
#Stay Standalone
A short script to prevent internal links to a "webapp" added to iPhone home screen to open in Safari instead of navigating internally.
#! -*- Coding: utf-8 -*- | |
from gevent import monkey | |
monkey.patch_all() | |
import gevent | |
import time | |
from envoy import run | |
from sys import exit, argv | |
import subprocess | |
import pip |
#!/usr/bin/python | |
import ctypes | |
import ctypes.util | |
import os | |
import re | |
null_re = re.compile('^\0*$') | |
c_off_t = ctypes.c_int64 |
#! /usr/bin/env python | |
# this script filters output from ipython notebooks, for use in git repos | |
# http://stackoverflow.com/questions/18734739/using-ipython-notebooks-under-version-control | |
# | |
# put this file in a `bin` directory in your home directory, then run the following commands: | |
# | |
# chmod a+x ~/bin/ipynb_output_filter.py | |
# echo -e "*.ipynb \t filter=dropoutput_ipynb" >> ~/.gitattributes | |
# git config --global core.attributesfile ~/.gitattributes | |
# git config --global filter.dropoutput_ipynb.clean ~/bin/ipynb_output_filter.py |
#!/bin/python | |
#Based on: EERTREE: An Efficient Data Structure for Processing Palindromes in Strings | |
#by Mikhail Rubinchik and Arseny M. Shur https://arxiv.org/pdf/1506.04862.pdf | |
#Useful: https://medium.com/@alessiopiergiacomi/eertree-or-palindromic-tree-82453e75025b#.ofgt2r7xn | |
#Port of https://github.com/zimpha/algorithmic-library/blob/master/string-utility/eertree.cc | |
#to python by TimSC. zimpha kindly allowed me to release this code under the CC0 license. | |
from __future__ import print_function |
The following test uses mypy to check the correctness of Python code using
typing, largely introduced in 3.5 and 3.6.
The two tests check all package code and all test code in the tests
directory, respectively.
Testing frameworks such as 'nose2' will automatically run the tests and will fail if any typing error
is detected (see step #3 below for more on this).
tests
directory, which should be at the top-level in your repository alongside your module directory.__init__
that sets self.pkgname
to match your library's package name. This assumes one package per repo, so the test would need to be changed if this isn't how the repo structure works.{ pkgs ? import <nixpkgs> {} }: | |
let | |
# To use this shell.nix on NixOS your user needs to be configured as such: | |
# users.extraUsers.adisbladis = { | |
# subUidRanges = [{ startUid = 100000; count = 65536; }]; | |
# subGidRanges = [{ startGid = 100000; count = 65536; }]; | |
# }; |
This guide provides updated instructions for pairing Bluetooth devices (such as keyboards or mice) in a dual-boot environment with Linux Ubuntu and Windows 10/11, incorporating community feedback and suggestions.
#!/usr/bin/env -S bash -c 'nix-shell --pure $0 -A env' | |
# Usage: | |
# 1. run directly to enter bash (inside venv): `./venv-py37.nix` | |
# 2. build a standalone executable: `nix bundle -f ./venv-py37.nix` #this not works yet since it cause nested `unshare -U` call | |
# 3. run bash with extra arguments: `nix run -f ./venv-py37.nix '' -- -c 'python --version'` | |
# More: | |
# 1. commit id of nixpkgs can be found here: https://lazamar.co.uk/nix-versions/?channel=nixpkgs-unstable&package=python3 | |
let |
You are an expert prompt engineer. Your task is to deeply understand what I want, and in return respond with a well crafted prompt that, if fed to a separate AI, will get me exactly the result I want. | |
The prompt follows this rough outline, and makes sure to include each part as needed: | |
1. A persona. At the start, you write something to the affect of "Act as an expert in ..." This primes the LLM to respond from info relating to experts in the specific field. | |
2. The task. This part of the prompt involves exhaustively laying out the task for the LLM. It is critical this part is specific and clear. This is the most important part of the prompt. | |
3. Context. Make sure to include *any* context that is needed for the LLM to accurately, and reliably respond as needed. | |
4. Response format. Outline the ideal response format for this prompt. | |
5. Examples. This step is optional, but if examples would be beneficial, include them. | |
6. Input. If needed, leave a space in the prompt for any input data. This should be highlight |