この文章ではCNN実装であるCaffeを用いて,特徴ベクトルの抽出やパラメータの学習を行うための方法について説明する.
以下の作業を行いたいのであれば,Caffeを用いることが望ましい.
- CNNを利用した画像の多クラス分類
- CNNによる特徴ベクトルの抽出
- CNNの転移学習
- Stacked Auto Encoder
using UnityEngine; | |
using System.Collections; | |
using System.Collections.Generic; | |
public class DrawDraggingLine : MonoBehaviour | |
{ | |
//ドラッグ中に真. | |
private bool isDragging = false; | |
//線の座標リスト. | |
private List<Vector3> linePoints = new List<Vector3> (); |
; | |
; Apple Wireless Keyboard Eject Key and Function kKey remapper for Windows. | |
; You can get the latest version of this file at: | |
; https://gist.github.com/TheZoc/a913642e9bedf62ba3ef | |
; | |
;========================================================================================================================================== | |
; Please, if you make a significant change, fix or would like to improve this script, | |
; I'd really appreciate if you can contact me so we can merge both works :) | |
;========================================================================================================================================== | |
; |
{ | |
"name": "LEDPatternTest", | |
"description": "Testing WS2801 LED Strip on Intel Edison", | |
"version": "0.0.0", | |
"main": "main.js", | |
"engines": { | |
"node": ">=0.10.0" | |
}, | |
"dependencies": { "mqtt": "^0.3.11" } | |
} |
import re | |
# the first group is noncapturing and just ensures we're at the beginning of | |
# the string or have whitespace before the hashtag (don't want to capture anchors) | |
# without the fullwidth hashmark, hashtags in asian languages would be tough | |
hashtag_re = re.compile("(?:^|\s)[##]{1}(\w+)", re.UNICODE) |
var fs = require('fs'); | |
var filePath = process.argv[2]; | |
var content = null; | |
fs.readFile(filePath, 'utf8', function (err, data) { | |
if (err) { | |
console.log('Error: ' + err); | |
return; | |
} |
The regex patterns in this gist are intended only to match web URLs -- http, | |
https, and naked domains like "example.com". For a pattern that attempts to | |
match all URLs, regardless of protocol, see: https://gist.github.com/gruber/249502 | |
# Single-line version: | |
(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|s |
# -*- coding: utf-8 -*- | |
import datetime | |
from numpy import asarray, ceil | |
import pandas | |
import rpy2.robjects as robjects | |
def stl(data, ns, np=None, nt=None, nl=None, isdeg=0, itdeg=1, ildeg=1, | |
nsjump=None, ntjump=None, nljump=None, ni=2, no=0, fulloutput=False): |
#!/usr/bin/env python | |
# -*- coding: utf-8 -*- | |
import csv | |
import codecs | |
import numpy as np | |
import MeCab | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
from sklearn.cluster import KMeans, MiniBatchKMeans |