Skip to content

Instantly share code, notes, and snippets.

View WilsonMongwe's full-sized avatar

Wilson Mongwe WilsonMongwe

View GitHub Profile
first_moment<-function(s_0,r,TotalTime)
{
(s_0*(exp(r*TotalTime)-1))/(r*TotalTime)
}
second_moment<-function(s_0,r,s,TotalTime)
{
part1 = (2*s_0^2)/(r*(r+s^2)*(2*r+s^2)*TotalTime^2)
part2 = r*exp((2*r+s^2)*TotalTime)-(2*r+s^2)*exp(r*TotalTime)+r+s^2
library(DEoptim)
# Parameters
mu_= 0.05
ss_= 0.3
lambda_= 25
mu2_=0.01
sigma_=0.1
TotalTime = 50
delta=1/252
simulateJump=function(mu_,ss_,lambda_,mu2_,sigma_,TotalTime,delta)
{
Sn=0
times <- c(0)
while(Sn <= TotalTime)
{
n <- length(times)
u <- runif(1)
expon <- -log(u)/lambda_
Sn <- times[n]+expon
#This is the code for the negative log likelihood function
#for the JUmp-difusion model
nll_jumps<-function(w)
{
mu=w[1] # drift
s=(w[2]) # diffsuive vol
lam=(w[3]) # jump frequency
mu_j=w[4] # jump size
@WilsonMongwe
WilsonMongwe / Create corpus.R
Created October 8, 2017 07:06
Analyzing a south african financial news corpus from twitter
library(httr)
library(httpuv)
library(RColorBrewer)
library(twitteR)
library(tm)
library(wordcloud)
library(base64enc)
library(devtools)
library(ROAuth)
texts_from_tweets=cleanTweets(texts_from_tweets)
tweets_corpus = Corpus(VectorSource(texts_from_tweets))
tweets_corpus_cleaned<- tm_map(tweets_corpus, function(x)removeWords(x,stopwords()))
# create document term matrix applying some transformations
add_more_stopwords=c("reports","report","opinion","column","can","will","still","read","wants","says","national","world","sport","life","video",
"lunchbox","comment","must","miss","new","editorial","popular", "cartoon","international","national","politics","companies",
"business","day","top","week","markets","economy","subscribe" , "case","missed","ahead","editor","premium","tomorrow","stories",
"click","keep","needs","interview","moneyweb","year","soapbox","news","expo","register","today","need","now","podcast","lineup",
"bafana","line-up","reader","question","wednesday","conversation","money","company","writes","how",
#Clean the tweets by removing silly symblos/characters
cleanTweets <- function(tweets)
{
tweets = gsub("@", "", tweets)
tweets = gsub("@\\w+", " ", tweets)
tweets = gsub("https", "", tweets)
tweets <- gsub("[ |\t]{2,}", " ", tweets)
tweets <- gsub("[ |\t]{2,}", " ", tweets)
tweets <- gsub("amp", " ", tweets)
tweets <- gsub("^ ", "", tweets)
#This code reads in the tweets from the timelines of the three news websites
#Note that there is a limit of 3200 tweets that can be extracted from a users timeline.
fin24Tweets <- userTimeline('Fin24', n=3200)
bdLiveTweets <- userTimeline('BDliveSA', n=3200)
moneyWebTweets <- userTimeline('Moneyweb', n=3200)
#Creating a twitter authentication in R
#The infomation can be found under the "Keys and Acces Tokens" tab of your twitter aplication.
api_key <- "Enter your API key here" #This is called the: Consumer Key (API Key)
api_secret <- "Enter your secret API key here" #This is called the: Consumer Secret (API Secret)
access_token <- "Enter your access token here" #This is called the: Access Token
access_token_secret <- "Enter your secret access token here" #This is called the: Access Token Secret
#Run this code to set up the twitter authentication.
setup_twitter_oauth(api_key,api_secret,access_token,access_token_secret)
@WilsonMongwe
WilsonMongwe / Fitting a Jump Diffusion.r
Last active September 30, 2017 07:44
Calibration of financial models
########## This code calculates non parametric estimates of the jump-difusion parameters #######
simulateJump=function(mu_,ss_,lambda_,mu2_,sigma_,TotalTime,delta)
{
Sn=0
times <- c(0)
while(Sn <= TotalTime)
{
n <- length(times)
u <- runif(1)
expon <- -log(u)/lambda_