A "Best of the Best Practices" (BOBP) guide to developing in Python.
- "Build tools for others that you want to be built for you." - Kenneth Reitz
- "Simplicity is alway better than functionality." - Pieter Hintjens
## R setup (https://github.com/hoxo-m/easyRFM) | |
install.packages("devtools") # if you have not installed "devtools" package | |
devtools::install_github("hoxo-m/easyRFM") | |
library(easyRFM) | |
## setwd("~/Desktop") | |
## Load / View Data | |
data <- rfm_generate_data(30000, begin="2014-01-01", end="2015-12-31", seed=123) | |
summary(data$payment) |
consumer_key = 'your-consumer-key' | |
consumer_secret = 'your-consumer-secret' | |
access_token = 'your-access-token' | |
access_secret = 'your-access-secret' |
## load the requisite libraries into R | |
library("xlsx") | |
library("choroplethr") | |
library("choroplethrAdmin1") | |
library("ggplot2") | |
indianregions <- get_admin1_regions("india") | |
## gets dataframe of 2 columns with name of country ("india") throughout column 1 | |
## and name of regions in 2nd column |
var landGrid = [ | |
{ | |
"X": "1", | |
"Y": "1" | |
}, | |
{ | |
"X": "1", | |
"Y": "2" | |
}, | |
{ |
import pandas as pd | |
import time | |
from nltk.sentiment import SentimentIntensityAnalyzer | |
t0 = time.time() | |
top_100 = pd.read_csv('/Users/brit.cava/Desktop/TabPy/top100.csv') | |
text = top_100['Word'] | |
sid = SentimentIntensityAnalyzer() |
############################################################################### | |
# Batch script for setting up fun Python libraries for Computational Methods in the Civic Sphere 2017 | |
# | |
# Should be run after installing Anaconda 4.2+/Python 3.5+ via https://www.continuum.io/downloads | |
# | |
# Doesn't include libraries that are installed as dependencies (e.g. numpy via pandas) | |
############################################################################## | |
############################################################################## | |
###### Yes |
#SCRIPT_REAL is a function in Tableau which returns a result from an external service script. It's in this function we pass the python code. | |
SCRIPT_REAL("from nltk.sentiment import SentimentIntensityAnalyzer | |
text = _arg1 #you have to use _arg1 to reference the data column you're analyzing, in this case [Word]. It gets word further down after the , | |
scores = [] #this is a python list where the scores will get stored | |
sid = SentimentIntensityAnalyzer() #this is a class from the nltk (Natural Language Toolkit) library. We'll pass our words through this to return the score | |
for word in text: # this loops through each row in the column you pass via _arg1; in this case [Word] | |
ss = sid.polarity_scores(word) #passes the word through the sentiment analyzer to get the score |
library(rvest) | |
library(magrittr) | |
library(dplyr) | |
library(purrr) | |
library(lubridate) | |
library(tidyr) | |
library(ggplot2) | |
library(scales) | |
setwd("...working directory...") |
Code Complete by Steve McConnell | |
Jeff Atwood (Coding Horror) | |
https://blog.codinghorror.com/code-reviews-just-do-it/ | |
Measuring Defect Potentials and Defect Removal Efficiency | |
http://rbcs-us.com/site/assets/files/1337/measuring-defect-potentials-and-defect-removal-efficiency.pdf | |
Expectations, Outcomes, and Challenges Of Modern Code Review | |
https://www.microsoft.com/en-us/research/publication/expectations-outcomes-and-challenges-of-modern-code-review/ |