This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
api.get('/route', function (request) { | |
/* .... Body of the router */ | |
return data.toString("base64") // 1. route body should return response in Base64 String format. | |
},{ // <-- 2. params required for binary response. | |
success: | |
{ | |
contentType: 'image/png', |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
let sh = require("sharp") | |
var ApiBuilder = require('claudia-api-builder'), | |
api = module.exports = new ApiBuilder(); | |
var req = require('request').defaults({ encoding: null }); | |
module.exports = api; | |
/* | |
Router to resize the image |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* Searches the huge Sorted String list using binary search. | |
* | |
* @param searchTerm string to be searched for | |
* @param StringSeq the huge sorted sequence of strings | |
* @return the position of the searchTerm in the sequence , if not found returns -1 | |
* */ | |
def findEff(searchTerm:String,StringSeq:Seq[String],position:Int = 0):Int = { |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Maximum size of a Log. | |
logs.groupBy().max("size").show(); | |
// Minimum Size of a Log | |
logs.groupBy().min("size").show(); | |
// average Size of a Log | |
logs.groupBy().avg("size").show(); | |
// Count of each response codes. | |
logs.select("response").groupBy("response").count().show(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Case class to hold the data extracted from each line. | |
case class log(ip:String,date:String,hour:Int,min:Int,sec:Int,methodType:String,uri:String,protocol:String,response:Int,size:Int); | |
// Parses log and returns case class of the parsed values. | |
def parseLog(line: String,logRegex:scala.util.matching.Regex):log = { | |
val logRegex(ip,date,hour,min,sec,methodType,uri,protocol,response,size) = line; | |
log(ip,date,hour.toInt,min.toInt,sec.toInt,methodType,uri,protocol,response.toInt,assertInt(size)); | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Regex pattern to parse the log | |
val logRegex = """^(\S+) - - \[(\S+):(\S+):(\S+):(\S+) -\S+] "(\S+) (\S+) (\S+)\/\S+ (\S+) (\S+)""".r; | |
// File is loaded into Spark context. | |
val logFile = sc.textFile("..FILE_PATH/access_log"); | |
// LogFile RDD is filtered ( if a line doesn't conform to the regex pattern ) and converted to dataframe. | |
val logs = logFile.filter(line=>line.matches(logRegex.toString)).map(line=>parseLog(line,logRegex)).toDF(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Regex pattern to parse the log | |
val logRegex = """^(\S+) - - \[(\S+):(\S+):(\S+):(\S+) -\S+] "(\S+) (\S+) (\S+)\/\S+ (\S+) (\S+)""".r; | |
// File is loaded into Spark context. | |
val logFile = sc.textFile("../hacks/spark/feelers/loganalyzer/access_log"); | |
// LogFile RDD is filtered ( if a line doesn't conform to the regex pattern ) and converted to dataframe. | |
val logs = logFile.filter(line=>line.matches(logRegex.toString)).map(line=>parseLog(line,logRegex)).toDF(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Regex pattern to parse the log | |
val logRegex = """^(\S+) - - \[(\S+):(\S+):(\S+):(\S+) -\S+] "(\S+) (\S+) (\S+)\/\S+ (\S+) (\S+)""".r; | |
// File is loaded into Spark context. | |
val logFile = sc.textFile("../hacks/spark/feelers/loganalyzer/access_log"); | |
// LogFile RDD is filtered ( if a line doesn't conform to the regex pattern ) and converted to dataframe. | |
val logs = logFile.filter(line=>line.matches(logRegex.toString)).map(line=>parseLog(line,logRegex)).toDF(); |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
val logRegex = """^(\S+) - - \[(\S+):(\S+):(\S+):(\S+) -\S+] "(\S+) (\S+) (\S+)\/\S+ (\S+) (\S+)""".r; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
val logRegex = """^(\S+) - - \[(\S+):(\S+):(\S+):(\S+) -\S+] "(\S+) (\S+) (\S+)\/\S+ (\S+) (\S+)""".r; |