- Update HISTORY.rst
- Update version number in
my_project/__init__.py
- Update version number in
setup.py
- Install the package again for local development, but with the new version number:
python setup.py develop
- Run the tests:
python setup.py test
""" | |
exp ::= term | exp + term | exp - term | |
term ::= factor | factor * term | factor / term | |
factor ::= number | ( exp ) | |
""" | |
class Calculator(): | |
def __init__(self, tokens): | |
self._tokens = tokens | |
self._current = tokens[0] |
my_project/__init__.py
setup.py
python setup.py develop
python setup.py test
Binary search tree (BST) can be represented by a linked data structure. Each node
contains key
value, data
and references to left
and right
subtree.
BST has following properties:
MergeSort is a recursive sorting algorithm that uses O(n log n)
comparisons in the worst case. To sort an array of n elements, we perform the following three steps in sequence:
There are two merge sort implementations: top-down (uses recursion) and bottom-up. Last one is more efficient and popular.
@asyncio.coroutine | |
def do_select(pool, i): | |
with (yield from pool) as conn: | |
cur = yield from conn.cursor() | |
yield from cur.execute("SELECT 10") | |
yield from cur.close() | |
@asyncio.coroutine |
/** Hive/Pig/Cascading/Scalding-style inner join which will perform a map-side/replicated/broadcast | |
* join if the "small" relation has fewer than maxNumRows, and a reduce-side join otherwise. | |
* @param big the large relation | |
* @param small the small relation | |
* @maxNumRows the maximum number of rows that the small relation can have to be a | |
* candidate for a map-side/replicated/broadcast join | |
* @return a joined RDD with a common key and a tuple of values from the two | |
* relations (the big relation value first, followed by the small one) | |
*/ | |
private def optimizedInnerJoin[A : ClassTag, B : ClassTag, C : ClassTag] |
import sys | |
from awsglue.transforms import * | |
from awsglue.utils import getResolvedOptions | |
from pyspark.context import SparkContext | |
from awsglue.context import GlueContext | |
from awsglue.dynamicframe import DynamicFrame | |
from awsglue.job import Job | |
args = getResolvedOptions(sys.argv, ['JOB_NAME']) |
import sys | |
from awsglue.transforms import * | |
from awsglue.utils import getResolvedOptions | |
from pyspark.context import SparkContext | |
from awsglue.context import GlueContext | |
from awsglue.dynamicframe import DynamicFrame | |
from awsglue.job import Job | |
args = getResolvedOptions(sys.argv, ['JOB_NAME']) |
import asyncio | |
import injections | |
import aiopg.sa | |
from aiohttp import web | |
@injections.has | |
class SiteHandler: | |
# this is only place holder, actual connection |
cmake_minimum_required(VERSION 3.0 FATAL_ERROR) | |
project(cpp_shim) | |
set(CMAKE_PREFIX_PATH ../libtorch) | |
find_package(Torch REQUIRED) | |
find_package(OpenCV REQUIRED) | |
add_executable(testing main.cpp) | |
message(STATUS "OpenCV library status:") | |
message(STATUS " config: ${OpenCV_DIR}") |