Skip to content

Instantly share code, notes, and snippets.

@kneerunjun
Last active September 22, 2023 23:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kneerunjun/ad43680e962e6f90344cf9ce8e8c2f63 to your computer and use it in GitHub Desktop.
Save kneerunjun/ad43680e962e6f90344cf9ce8e8c2f63 to your computer and use it in GitHub Desktop.
Interview tidbits
package main
/* For a given number say N, 2 threads (coroutines) namely foo thread, bar thread each are capable of printing "foo" and "bar"
Purpose of this program though is to print "foobar" for n times
*/
import (
"fmt"
"sync"
)
const (
N = 15
)
func main() {
fooStream := make(chan string, N)
for i := 0; i < N; i++ {
fooStream <- "foo"
}
barStream := make(chan string, N)
for i := 0; i < N; i++ {
barStream <- "bar"
}
close(fooStream)
close(barStream)
stream := make(chan string, N)
defer close(stream)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
for val := range fooStream {
stream <- val
}
}()
wg.Add(1)
go func() {
defer wg.Done()
for val := range barStream {
foo := <-stream
fmt.Printf("%s%s\n", foo, val)
}
}()
wg.Wait()
}
/*
author :kneerunjun@gmail.com
Cloning a linked list with each node having a next + arbit pointer
https://www.geeksforgeeks.org/a-linked-list-with-next-and-arbit-pointer/
*/
package main
import (
"fmt"
"math/rand"
)
type Node struct {
Val string
Next *Node
Arbit *Node
CloneMap *Node
}
func (n *Node) AsLineItems() []string {
result := []string{}
currNode := n
for {
if currNode == nil {
break
}
result = append(result, currNode.Val)
currNode = currNode.Next
}
return result
}
func CloneList(ll *Node) (*Node, error) {
var result *Node
result = NewLinkedList(ll.AsLineItems()...)
x := ll
y := result
for {
if x == nil {
break
}
x.CloneMap = y
x = x.Next
y = y.Next
}
y = result
x = ll
for {
if y == nil {
break
}
y.Arbit = x.Arbit.CloneMap
y = y.Next
x = x.Next
}
return result, nil
}
// Dainty format print of the linked list
func PrintLL(ll *Node) {
currNode := ll
for {
if currNode == nil {
break
}
if currNode != nil {
if currNode.Arbit != nil {
fmt.Printf("(%s)(>>%s)->", currNode.Val, currNode.Arbit.Val)
} else {
fmt.Printf("(%s)(>>)->", currNode.Val)
}
}
currNode = currNode.Next
}
}
func NewLinkedList(items ...string) *Node {
nodes := []Node{}
for range items {
nodes = append(nodes, Node{})
}
// Making the sequential connecitons and pushing values
for i, v := range items {
nodes[i].Val = v
if i+1 < len(items) {
nodes[i].Next = &nodes[i+1]
}
}
// Now making the arbit connections
for i, _ := range items {
nodes[i].Arbit = &(nodes[rand.Intn(len(nodes))])
}
return &nodes[0]
}
func main() {
ll := NewLinkedList("Micheal", "Toby", "Jim", "Pam", "Dwight", "Erin", "Andy", "Kevin", "Creed")
fmt.Printf("Here is your original linked list ..\n")
PrintLL(ll)
clone, err := CloneList(ll)
if err != nil {
fmt.Printf("failed to clone the list %s", err)
}
fmt.Printf("\nHere is your cloned one ...\n")
PrintLL(clone)
}
package main
import (
"fmt"
"sync"
)
func main() {
stream := make(chan interface{}, 30)
allStrings := []string{"Micheal", "Jim", "Pam", "Kevin", "Oscar", "Angela", "Dwight", "Creed", "Andy", "Meredith"}
for _, val := range allStrings {
stream <- val
}
allNumbers := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
for _, val := range allNumbers {
stream <- val
}
allBools := []bool{false, false, true, true, true, false, false, false, true, true}
for _, val := range allBools {
stream <- val
}
close(stream)
totalReceived := 0
for val := range workerPool(10, stream) {
totalReceived = totalReceived + val
}
fmt.Printf("total values received %d", totalReceived)
}
func workerPool(maxWorkers int, strm chan interface{}) chan int {
result := make(chan int, 100)
go func() {
var wg sync.WaitGroup
defer close(result)
for i := 0; i < maxWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for strmVal := range strm {
// fmt.Println("Now data typing the value")
fmt.Printf("Type of the data %T\n", strmVal)
result <- 1
}
}()
}
wg.Wait()
}()
return result
}
package main
/* =========================
Author : kneerunjun@gmail.com
Location : Pune
Date : 18-NOV-2022
Assignment on xkcd comics download demonstrating concurrency techniques in GO
JSON comics manifest to be downloaded using http client
The performance should be dependent on how many workers are initiated
========================= */
import (
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"sync"
"time"
)
const (
MAX_WORKERS = 32 // direct corelation to the performance
MAX_JOBS = 50 // change this to bump up the number of jobs that need to be fired
)
func main() {
fmt.Println("starting xkcd fetch..")
jobs := make(chan int, MAX_JOBS)
for i := 0; i < MAX_JOBS; i++ {
// We know there about 2500 comics
// https://xkcd.com/json.html
// Ids of the comics hence range from 1-2499
// each job is equivalent to the comic id that is used to download
jobs <- rand.Intn(2499)
}
close(jobs) // closing the channel does not mean you cannot read on it
// closing only means you can no longer write on that channel
// https://stackoverflow.com/questions/29269850/why-do-i-receive-values-from-a-closed-channel
// this will tell you can still receive on a closed channel
// we write MAX_JOBS values on the channel, and close it
// All the values on the channel can still be read despite it beingclosed.
// once all the values are read, the channel will give zero values since its closed.
start := time.Now()
// Now that its the worker_pool's responsibility to close the results channel
// we can happily run range over the results channel.
// remember how in our earlier version this was not possible
for result := range worker_pool(MAX_WORKERS, jobs) {
fmt.Println(result["num"], result["safe_title"])
}
end := time.Now()
fmt.Printf("%f seconds\n", (end.Sub(start)).Seconds())
}
func worker_pool(workers int, jobs chan int) chan map[string]interface{} {
results := make(chan map[string]interface{}, len(jobs))
go func() {
var wg sync.WaitGroup
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
worker(jobs, results)
wg.Done()
}()
}
wg.Wait()
close(results)
}()
return results
}
// worker : will take 2 channels, one to read the job or comic id and other to pass back the result
// The more number of workers better is the performance till a point it peaks out.
func worker(jobs <-chan int, result chan<- map[string]interface{}) {
for val := range jobs {
data, err := FetchJsonComic(val)
if err != nil {
fmt.Println(err)
}
result <- data
}
}
// FetchJsonComic : uses http to fetch the byte transcript of the comic over the internet
// comicID : each of the comic has a unique id that can be attached as url param
// Refer here https://xkcd.com/json.html
// https://xkcd.com/614/info.0.json gets comic number 614 from the web
//
/*
// your sample code goes in here
*/
func FetchJsonComic(comicID int) (map[string]interface{}, error) {
url := fmt.Sprintf("https://xkcd.com/%d/info.0.json", comicID)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to form the request, aborting")
}
client := http.Client{
Timeout: 5 * time.Second,
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request,check network connection %s", url)
}
// Reading the result
result := map[string]interface{}{} // this is what onto which we unmarshal the result
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("http error, error getting the comic %s", err)
}
// unmarshall read the body only if the status was 200 ok
byt, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error reading the response %s", err)
}
if json.Unmarshal(byt, &result) != nil {
return nil, fmt.Errorf("error unmarshaling data %s", err)
}
// fmt.Println(url)
// fmt.Printf("Comic: %f, Title: %s, Caption: %s", result["num"], result["safe_title"], result["alt"])
return result, nil
}
/*
=========================
Author : kneerunjun@gmail.com
Location : Pune
Date : 18-NOV-2022
Assignment on xkcd comics download demonstrating concurrency techniques in GO
JSON comics manifest to be downloaded using http client
The performance should be dependent on how many workers are initiated
Once the above logic is achieved need to implement rate limiting
Constraints on the number of time a resource is accessed to finite number per unit time
the number of requests that are fired onto xkcd are then limited to a finite number in a given unit time
=========================
*/
package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"sync"
"time"
)
const (
MAX_JOBS = 25 // total inumber of fetch downloads
MAX_WORKERS = 10 // total number of worker threads
RATE_LIMIT = 3 // max number of times the resource is accessed in unit time
RATE_PERSEC = 10 // Unit time of rate limiting
)
func main() {
// Load the jobs to the maximum
jobs := make(chan int, MAX_JOBS)
interrupt := make(chan bool)
for i := 1; i <= MAX_JOBS; i++ {
// jobs <- rand.Intn(2499)
jobs <- i
}
close(jobs) // we can always read from closed channel later
// Worker pool returns a channel that can read for results from the download
for result := range worker_pool(jobs, MAX_WORKERS, interrupt) {
fmt.Println(result["num"], result["safe_title"])
}
close(interrupt)
}
// worker_pool : spins out number of worker threads
// a distinct thread that can maintain the token bucket
func worker_pool(jobs chan int, maxTh int, interrupt chan bool) chan map[string]interface{} {
result := make(chan map[string]interface{}, MAX_JOBS)
bucket := make(chan int, RATE_LIMIT)
go func(interrupt chan bool, bucket chan int) {
// Rate limiting observer thread
defer close(bucket)
// fill up the bucket to start with
for i := 0; i < RATE_LIMIT; i++ {
bucket <- 1
}
for {
select {
case <-time.After(RATE_PERSEC * time.Second):
if len(bucket) == 0 {
for i := 0; i < RATE_LIMIT; i++ {
bucket <- 1
}
}
case <-interrupt:
return
}
}
}(interrupt, bucket)
go func() {
var wg sync.WaitGroup
for i := 0; i < maxTh; i++ {
wg.Add(1)
go func(id int) {
worker_thread(jobs, result, bucket)
wg.Done()
}(i)
}
wg.Wait()
close(result)
}()
return result
}
func worker_thread(jobs chan int, result chan map[string]interface{}, tokenBucket <-chan int) {
for index := range jobs {
<-tokenBucket // wait for the token bucket to get replinished , will block only of the bucket is empty
res, err := FetchJsonComic(index)
if err != nil {
fmt.Printf("failed to get json comic #%d: %s\n", index, err)
}
result <- res
}
}
func FetchJsonComic(comicID int) (map[string]interface{}, error) {
url := fmt.Sprintf("https://xkcd.com/%d/info.0.json", comicID)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to form the request, aborting")
}
client := http.Client{
Timeout: 5 * time.Second,
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request,check network connection %s", url)
}
// Reading the result
result := map[string]interface{}{} // this is what onto which we unmarshal the result
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("http error, error getting the comic %s", err)
}
// unmarshall read the body only if the status was 200 ok
byt, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error reading the response %s", err)
}
if json.Unmarshal(byt, &result) != nil {
return nil, fmt.Errorf("error unmarshaling data %s", err)
}
// fmt.Println(url)
// fmt.Printf("Comic: %f, Title: %s, Caption: %s", result["num"], result["safe_title"], result["alt"])
return result, nil
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment