Skip to content

Instantly share code, notes, and snippets.

I am attesting that this GitHub handle nivdul is linked to the Tezos account tz1VbDP9Mytydszm7o1EGx9v1mPyL68BeNiW for tzprofiles
sig:edsigtuSptFpj31FdpCxtt9Rii9PdGcrVu3CTTh3JCnFbubXRpN2qiaoEBGeaQnfNgDUAAYJ1ZS3wNPdt9zSatC1mM4yHUrEU66
@nivdul
nivdul / BubbleSortSwapExample.java
Created September 14, 2018 09:50
BubbleSortSwapExample
public class BubbleSortSwap {
/**
* Swap two elements indexed by a and b in an array
*/
static void swap(int[] arr, int a, int b) {
int temp = arr[a];
arr[a] = arr[b];
arr[b] = temp;
@nivdul
nivdul / BubbleSortExample.java
Last active September 14, 2018 09:07
BubbleSortExample
public class BubbleSortExample {
static int[] bubbleSort(int[] arr) {
int n = arr.length;
int temp = 0;
for(int i=0; i < n; i++){
for(int j=1; j < (n-i); j++){
{"id":"572692378957430785",
"user":"Srkian_nishu :)",
"text":"@always_nidhi @YouTube no i dnt understand bt i loved the music nd their dance awesome all the song of this mve is rocking",
"place":"Orissa",
"country":"India"}
@nivdul
nivdul / wordcount.java
Last active January 19, 2021 15:03
wordcount
// create the Spark configuration and context
SparkConf conf = new SparkConf().setAppName("Wordcount").setMaster("local[*]");
JavaSparkContext sc = new JavaSparkContext(conf);
// load data and create an RDD of string
JavaRDD<String> tweets = sc.textFile("path_To_File")
JavaPairRDD<String, Integer> wordcount = tweets.flatMap(line -> Arrays.asList(line.toString().split(" ")))
// mapper step
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
int numClasses = 4;
String impurity = "gini";
int maxDepth = 9;
int maxBins = 32;
// create model
final DecisionTreeModel model = DecisionTree.trainClassifier(trainingData, numClasses, categoricalFeaturesInfo, impurity, maxDepth, maxBins);
// Evaluate model on training instances and compute training error
@nivdul
nivdul / gist:246dbe803a2345b7bf5b
Created April 19, 2015 15:12
Split data sets
// Split data into 2 sets : training (60%) and test (40%).
JavaRDD<LabeledPoint>[] splits = data.randomSplit(new double[]{0.6, 0.4});
JavaRDD<LabeledPoint> trainingData = splits[0].cache();
JavaRDD<LabeledPoint> testData = splits[1];
@nivdul
nivdul / gist:77225c0efee45a860d30
Last active August 29, 2015 14:19
Average time between peaks
public Double computeAvgTimeBetweenPeak(JavaRDD<long[]> data) {
// define the maximum using the max function from MLlib
double[] max = this.summary.max().toArray();
// keep the timestamp of data point for which the value is greater than 0.9 * max
// and sort it !
JavaRDD<Long> filtered_y = data.filter(record -> record[1] > 0.9 * max[1])
.map(record -> record[0])
.sortBy(time -> time, true, 1);
@nivdul
nivdul / gist:666310c767cb6ef97503
Created April 19, 2015 15:10
Average Resultant acceleration
/**
* @return Double resultant = 1/n * ∑ √(x² + y² + z²)
*/
public static double computeResultantAcc(JavaRDD<double[]> data) {
// first let's compute the square of each value and the sum
// compute then the root square: √(x² + y² + z²)
// to finish apply a mean function: 1/n * sum [√(x² + y² + z²)]
JavaRDD<Vector> squared = data.map(record -> Math.pow(record[0], 2)
+ Math.pow(record[1], 2)
+ Math.pow(record[2], 2))
@nivdul
nivdul / gist:1ee82f923991fea93bc6
Created April 19, 2015 15:08
Average absolute difference
/**
* @return Vector [ (1 / n ) * ∑ |b - mean_b|, for b in {x,y,z} ]
*/
public static double[] computeAvgAbsDifference(JavaRDD<double[]> data, double[] mean) {
// then for each point x compute x - mean
// then apply an absolute value: |x - mean|
JavaRDD<Vector> abs = data.map(record -> new double[]{Math.abs(record[0] - mean[0]),
Math.abs(record[1] - mean[1]),
Math.abs(record[2] - mean[2])})