sudo apt-get install python3-pip
sudo pip3 install virtualenv
| /* | |
| PinReadWrite.ino | |
| //20170512 initial version | |
| //20170517 uS timing (was mS), vars are longs, i2C start/stop, and clock IN data w/. | |
| //20201002 Returns valid JSON. | |
| Simple Arduino script to set pins high, low, input, pull up, or analog/servo, | |
| clock out data with timing, and read all or a single pin back via serial IO. | |
| Written for the tiny-circuits.com TinyDuino in the end effector of the | |
| Dexter robot from HDRobotic.com, but generally useful to turn the Arduino |
| #!/bin/bash | |
| # | |
| # EDIT: this script is outdated, please see https://forums.developer.nvidia.com/t/pytorch-for-jetson-nano-version-1-6-0-now-available | |
| # | |
| sudo apt-get install python-pip | |
| # upgrade pip | |
| pip install -U pip | |
| pip --version | |
| # pip 9.0.1 from /home/ubuntu/.local/lib/python2.7/site-packages (python 2.7) |
| # For more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_gui/py_video_display/py_video_display.html | |
| import cv2 | |
| import numpy as np | |
| # Playing video from file: | |
| # cap = cv2.VideoCapture('vtest.avi') | |
| # Capturing video from webcam: | |
| cap = cv2.VideoCapture(0) | |
| currentFrame = 0 |
| import socket | |
| hostname, sld, tld, port = 'www', 'integralist', 'co.uk', 80 | |
| target = '{}.{}.{}'.format(hostname, sld, tld) | |
| # create an ipv4 (AF_INET) socket object using the tcp protocol (SOCK_STREAM) | |
| client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) | |
| # connect the client | |
| # client.connect((target, port)) |
| #!/usr/bin/env python | |
| import paho.mqtt.client as mqtt | |
| import RPi.GPIO as GPIO | |
| def on_connect(client, userdata, rc): | |
| #print ("Connected with rc: " + str(rc)) | |
| client.subscribe("kwf/demo/led") | |
| def on_message(client, userdata, msg): |
| ''' | |
| Author: Brian Oliver II | |
| Instagram: bolo_ne3 | |
| License: | |
| MIT License | |
| Copyright (c) 2016 Brian Oliver II | |
| Permission is hereby granted, free of charge, to any person obtaining a copy | |
| of this software and associated documentation files (the "Software"), to deal |
With the availability of huge amount of data for research and powerfull machines to run your code on, Machine Learning and Neural Networks is gaining their foot again and impacting us more than ever in our everyday lives. With huge players like Google opensourcing part of their Machine Learning systems like the TensorFlow software library for numerical computation, there are many options for someone interested in starting off with Machine Learning/Neural Nets to choose from. Caffe, a deep learning framework developed by the Berkeley Vision and Learning Center (BVLC) and its contributors, comes to the play with a fresh cup of coffee.
The following section is divided in to two parts. Caffe's documentation suggest
| # Note – this is not a bash script (some of the steps require reboot) | |
| # I named it .sh just so Github does correct syntax highlighting. | |
| # | |
| # This is also available as an AMI in us-east-1 (virginia): ami-cf5028a5 | |
| # | |
| # The CUDA part is mostly based on this excellent blog post: | |
| # http://tleyden.github.io/blog/2014/10/25/cuda-6-dot-5-on-aws-gpu-instance-running-ubuntu-14-dot-04/ | |
| # Install various packages | |
| sudo apt-get update |
| #!/usr/bin/python3 | |
| #required libraries | |
| import sys | |
| import ssl | |
| import paho.mqtt.client as mqtt | |
| #called while client tries to establish connection with the server | |
| def on_connect(mqttc, obj, flags, rc): | |
| if rc==0: |