Look at LSB init scripts for more information.
Copy to /etc/init.d
:
# replace "$YOUR_SERVICE_NAME" with your service's name (whenever it's not enough obvious)
/* Function: al_transform_rotation | |
*/ | |
void al_transform_rotation(const ALLEGRO_TRANSFORM *trans, float* rotation) | |
{ | |
float t = *rotation; | |
*rotation = t - atan2f(trans->m[0][1], trans->m[0][0]); | |
} |
static void bitmap_source_release_content(ALLEGRO_GC_BITMAPSOURCE *bitmap_source) | |
{ | |
if ( bitmap_source->type == BS_NORMAL ) | |
{ | |
if ( bitmap_source->name_to_bitmap ) | |
{ | |
/* free string key */ | |
AL_AATREE_ITERATOR* iterator = al_aa_begin(bitmap_source->name_to_bitmap); | |
while (iterator && al_aa_next(iterator)) |
Look at LSB init scripts for more information.
Copy to /etc/init.d
:
# replace "$YOUR_SERVICE_NAME" with your service's name (whenever it's not enough obvious)
http://pin.it/zuSovQT |
//From http://stackoverflow.com/questions/111102/how-do-javascript-closures-work | |
function sayHello2(name) { | |
var text = 'Hello ' + name; // Local variable | |
var say = function() { console.log(text); } | |
return say; | |
} | |
var say2 = sayHello2('Bob'); | |
say2(); // logs "Hello Bob" |
import android.media.MediaCodec; | |
import android.media.MediaCodecInfo; | |
import android.media.MediaFormat; | |
import android.util.Log; | |
import java.io.File; | |
import java.io.FileInputStream; | |
import java.io.FileNotFoundException; | |
import java.io.FileOutputStream; |
if (midiNote > 0) | |
{ | |
int octave = midiNote / 12; | |
int noteNum = midiNote % 12; | |
int normalizedNote; | |
if (octave % 2 == 0) | |
{ | |
normalizedNote = noteNum; | |
} |
#!/bin/bash | |
find -maxdepth 1 -type d | while read -r dir; do printf "%s:\t" "$dir"; find "$dir" -type f | wc -l; done |
""" | |
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy) | |
BSD License | |
""" | |
import numpy as np | |
# data I/O | |
data = open('input.txt', 'r').read() # should be simple plain text file | |
chars = list(set(data)) | |
data_size, vocab_size = len(data), len(chars) |
import audio_io | |
import numpy | |
audio1, _ = audio_io.ReadWavFile('voice_nodelay.wav') | |
audio1 = audio1.ravel() | |
audio2, _ = audio_io.ReadWavFile('voice_mic_blend3.wav') | |
audio2 = audio2.ravel() | |
# Truncate all signals same length, then pad to avoid boundary effects. |