View moments.R
first_moment<-function(s_0,r,TotalTime) | |
{ | |
(s_0*(exp(r*TotalTime)-1))/(r*TotalTime) | |
} | |
second_moment<-function(s_0,r,s,TotalTime) | |
{ | |
part1 = (2*s_0^2)/(r*(r+s^2)*(2*r+s^2)*TotalTime^2) | |
part2 = r*exp((2*r+s^2)*TotalTime)-(2*r+s^2)*exp(r*TotalTime)+r+s^2 |
View simulationStudy.R
library(DEoptim) | |
# Parameters | |
mu_= 0.05 | |
ss_= 0.3 | |
lambda_= 25 | |
mu2_=0.01 | |
sigma_=0.1 | |
TotalTime = 50 | |
delta=1/252 |
View simulateJump.R
simulateJump=function(mu_,ss_,lambda_,mu2_,sigma_,TotalTime,delta) | |
{ | |
Sn=0 | |
times <- c(0) | |
while(Sn <= TotalTime) | |
{ | |
n <- length(times) | |
u <- runif(1) | |
expon <- -log(u)/lambda_ | |
Sn <- times[n]+expon |
View jumpDiffusionNegativeLogLikelihood.R
#This is the code for the negative log likelihood function | |
#for the JUmp-difusion model | |
nll_jumps<-function(w) | |
{ | |
mu=w[1] # drift | |
s=(w[2]) # diffsuive vol | |
lam=(w[3]) # jump frequency | |
mu_j=w[4] # jump size |
View Create corpus.R
library(httr) | |
library(httpuv) | |
library(RColorBrewer) | |
library(twitteR) | |
library(tm) | |
library(wordcloud) | |
library(base64enc) | |
library(devtools) | |
library(ROAuth) |
View wordcloud.R
texts_from_tweets=cleanTweets(texts_from_tweets) | |
tweets_corpus = Corpus(VectorSource(texts_from_tweets)) | |
tweets_corpus_cleaned<- tm_map(tweets_corpus, function(x)removeWords(x,stopwords())) | |
# create document term matrix applying some transformations | |
add_more_stopwords=c("reports","report","opinion","column","can","will","still","read","wants","says","national","world","sport","life","video", | |
"lunchbox","comment","must","miss","new","editorial","popular", "cartoon","international","national","politics","companies", | |
"business","day","top","week","markets","economy","subscribe" , "case","missed","ahead","editor","premium","tomorrow","stories", | |
"click","keep","needs","interview","moneyweb","year","soapbox","news","expo","register","today","need","now","podcast","lineup", | |
"bafana","line-up","reader","question","wednesday","conversation","money","company","writes","how", |
View Clean Tweet.R
#Clean the tweets by removing silly symblos/characters | |
cleanTweets <- function(tweets) | |
{ | |
tweets = gsub("@", "", tweets) | |
tweets = gsub("@\\w+", " ", tweets) | |
tweets = gsub("https", "", tweets) | |
tweets <- gsub("[ |\t]{2,}", " ", tweets) | |
tweets <- gsub("[ |\t]{2,}", " ", tweets) | |
tweets <- gsub("amp", " ", tweets) | |
tweets <- gsub("^ ", "", tweets) |
View Get the Tweets.R
#This code reads in the tweets from the timelines of the three news websites | |
#Note that there is a limit of 3200 tweets that can be extracted from a users timeline. | |
fin24Tweets <- userTimeline('Fin24', n=3200) | |
bdLiveTweets <- userTimeline('BDliveSA', n=3200) | |
moneyWebTweets <- userTimeline('Moneyweb', n=3200) |
View Twitter Authentication.R
#Creating a twitter authentication in R | |
#The infomation can be found under the "Keys and Acces Tokens" tab of your twitter aplication. | |
api_key <- "Enter your API key here" #This is called the: Consumer Key (API Key) | |
api_secret <- "Enter your secret API key here" #This is called the: Consumer Secret (API Secret) | |
access_token <- "Enter your access token here" #This is called the: Access Token | |
access_token_secret <- "Enter your secret access token here" #This is called the: Access Token Secret | |
#Run this code to set up the twitter authentication. | |
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret) |
View Fitting a Jump Diffusion.r
########## This code calculates non parametric estimates of the jump-difusion parameters ####### | |
simulateJump=function(mu_,ss_,lambda_,mu2_,sigma_,TotalTime,delta) | |
{ | |
Sn=0 | |
times <- c(0) | |
while(Sn <= TotalTime) | |
{ | |
n <- length(times) | |
u <- runif(1) | |
expon <- -log(u)/lambda_ |
NewerOlder