Author: https://github.com/seanorama
Note: This was tested on HDP 3.1. It may not work with other Spark/YARN distributions.
var mediaJSON = { "categories" : [ { "name" : "Movies", | |
"videos" : [ | |
{ "description" : "Big Buck Bunny tells the story of a giant rabbit with a heart bigger than himself. When one sunny day three rodents rudely harass him, something snaps... and the rabbit ain't no bunny anymore! In the typical cartoon tradition he prepares the nasty rodents a comical revenge.\n\nLicensed under the Creative Commons Attribution license\nhttp://www.bigbuckbunny.org", | |
"sources" : [ "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4" ], | |
"subtitle" : "By Blender Foundation", | |
"thumb" : "images/BigBuckBunny.jpg", | |
"title" : "Big Buck Bunny" | |
}, | |
{ "description" : "The first Blender Open Movie from 2006", | |
"sources" : [ "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ElephantsDream.mp4" ], |
:%s/r//g 删除DOS方式的回车^M | |
:%s= *$== 删除行尾空白 | |
:%s/^(.*)n1/1$/ 删除重复行 | |
:%s/^.{-}pdf/new.pdf/ 只是删除第一个pdf | |
:%s/<!--_.{-}-->// 又是删除多行注释(咦?为什么要说「又」呢?) | |
:g/s*^$/d 删除所有空行 :这个好用有没有人用过还有其他的方法吗? |
Author: https://github.com/seanorama
Note: This was tested on HDP 3.1. It may not work with other Spark/YARN distributions.
############################################################################### | |
# Helpful Docker commands and code snippets | |
############################################################################### | |
### CONTAINERS ### | |
docker stop $(docker ps -a -q) #stop ALL containers | |
docker rm -f $(docker ps -a -q) # remove ALL containers | |
docker rm -f $(sudo docker ps --before="container_id_here" -q) # can also filter | |
# exec into container |
import android.os.Build; | |
import java.lang.reflect.Method; | |
public class Device { | |
/** | |
* @return The device's serial number, visible to the user in {@code Settings > About phone/tablet/device > Status | |
* > Serial number}, or {@code null} if the serial number couldn't be found | |
*/ | |
public static String getSerialNumber() { |
$ab -n 10 -c 2 -p /Users/post_file.txt -T "multipart/form-data; boundary=1234567890" http://localhost/upload | |
post_file.txt (use CRLF line-endings): | |
--1234567890 | |
Content-Disposition: form-data; filename="file.png" | |
Content-Type: application/octet-stream | |
Content-Transfer-Encoding: binary | |
<base64 encoded file> |
import pandas as pd | |
# Spark context | |
import pyspark | |
sc = pyspark.SparkContext() | |
# apply parallel | |
def applyParallel(dfGrouped, func): | |
# rdd with the group of dataframes |
curl http://spark-cluster-ip:6066/v1/submissions/status/driver-20151008145126-0000 |
/** | |
* 更改流程实例的流程定义ID | |
* @param processInstanceId | |
* @param processDefinitionId | |
*/ | |
@Transactional | |
public void changeProcessDefinitionId(String processInstanceId, String processDefinitionId) { | |
long count = repositoryService.createProcessDefinitionQuery().processDefinitionId(processDefinitionId).count(); | |
if (count == 0) { | |
throw new ServiceException("指定的流程定义不存在!"); |
import java.io.BufferedReader; | |
import java.io.IOException; | |
import java.io.InputStreamReader; | |
import org.apache.http.HttpResponse; | |
import org.apache.http.client.methods.HttpPost; | |
import org.apache.http.entity.StringEntity; | |
import org.apache.http.impl.client.DefaultHttpClient; | |
public class TestLogin { |