Skip to content

Instantly share code, notes, and snippets.

@SaoYan
Last active May 24, 2024 19:30
Show Gist options
  • Save SaoYan/32cf28b4689d3d9b077cc96d105a31df to your computer and use it in GitHub Desktop.
Save SaoYan/32cf28b4689d3d9b077cc96d105a31df to your computer and use it in GitHub Desktop.
A Tour of Go Exercise: Web Crawler
package main
import (
"fmt"
"sync"
)
/*
This solution uses channels to force each gorountines to wait for its child gorountines to exit.
*/
type SafeCounter struct {
v map[string]bool
mux sync.Mutex
}
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var cnt SafeCounter = SafeCounter{v: make(map[string]bool)}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, exit chan bool) {
// Fetch URLs in parallel.
// Don't fetch the same URL twice.
if depth <= 0 {
exit <- true
return
}
cnt.mux.Lock()
_, ok := cnt.v[url]
if ok == false {
cnt.v[url] = true
cnt.mux.Unlock()
} else {
exit <- true
cnt.mux.Unlock()
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
exit <- true
return
}
fmt.Printf("found: %s %q\n", url, body)
e := make(chan bool)
for _, u := range urls {
go Crawl(u, depth-1, fetcher, e)
}
// wait for all child gorountines to exit
for i := 0; i < len(urls); i++ {
<-e
}
exit <- true
}
func main() {
exit := make(chan bool)
go Crawl("https://golang.org/", 4, fetcher, exit)
<-exit
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
package main
import (
"fmt"
"sync"
)
/*
This solution uses WaitGroup to force each gorountines to wait for its child gorountines to exit.
*/
type SafeCounter struct {
v map[string]bool
mux sync.Mutex
}
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var cnt SafeCounter = SafeCounter{v: make(map[string]bool)}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, wg *sync.WaitGroup) {
// Fetch URLs in parallel.
// Don't fetch the same URL twice.
if depth <= 0 {
wg.Done()
return
}
cnt.mux.Lock()
_, ok := cnt.v[url]
if ok == false {
cnt.v[url] = true
cnt.mux.Unlock()
} else {
wg.Done()
cnt.mux.Unlock()
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
wg.Done()
return
}
fmt.Printf("found: %s %q\n", url, body)
var wg_ sync.WaitGroup
for _, u := range urls {
wg_.Add(1)
go Crawl(u, depth-1, fetcher, &wg_)
}
wg_.Wait()
wg.Done()
}
func main() {
var wg sync.WaitGroup
go Crawl("https://golang.org/", 4, fetcher, &wg)
wg.Add(1)
wg.Wait()
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
@curbol
Copy link

curbol commented Jun 14, 2023

package main

import (
	"fmt"
	"sync"
)

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

type crawlResponse struct {
	url string
	depth int
	
	body string
	urls []string
	err error
}

type crawlState struct {
	mu sync.Mutex
	found map[string]any
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
	ch := make(chan crawlResponse, 10)
	state := crawlState{
		found: map[string]any{url:true},
	}
	
	var wg sync.WaitGroup
	wg.Add(1)
	go ReadUrl(url, depth, fetcher, ch)
	
	go func() {
		for r := range ch {
			if r.err != nil {
				fmt.Println(r.err)
			}
			fmt.Printf("found: %s %q\n", r.url, r.body)

			if r.depth > 0 {
				for _, u := range r.urls {
					_, ok := state.found[u]
					if ok {
						continue
					}
					state.mu.Lock()
					state.found[u] = true
					state.mu.Unlock()

					wg.Add(1)
					go ReadUrl(u, r.depth - 1, fetcher, ch)
				}
			}
			wg.Done()
		}
	}()
	
	wg.Wait()
	close(ch)
}

func ReadUrl(url string, depth int, fetcher Fetcher, ch chan crawlResponse) {
	body, urls, err := fetcher.Fetch(url)
	ch <- crawlResponse{url, depth, body, urls, err}
}

func main() {
	Crawl("https://golang.org/", 4, fetcher)
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
	body string
	urls []string
}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
}

@varian97
Copy link

varian97 commented Aug 21, 2023

Using WaitGroup and I am not a big fan of global variable

package main

import (
	"fmt"
	"sync"
)

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, checker urlChecker, wg *sync.WaitGroup) {
	defer wg.Done()

	if depth <= 0 {
		return
	}

	checker.Set(url)
	body, urls, err := fetcher.Fetch(url)

	if err != nil {
		fmt.Println(err)
		return
	}

	fmt.Printf("found: %s %q\n", url, body)
	for _, u := range urls {
		isExists := checker.Get(u)
		if !isExists {
			wg.Add(1)
			go Crawl(u, depth-1, fetcher, checker, wg)
		}
	}

	return
}

func main() {
	var wg sync.WaitGroup
	wg.Add(1)

	checker := urlChecker {visited: make(map[string]bool)}
	Crawl("https://golang.org/", 4, fetcher, checker, &wg)

	wg.Wait()
}

type urlChecker struct {
	mut sync.Mutex
	visited map[string]bool
}

func (checker *urlChecker) Set(url string) {
	checker.mut.Lock()
	checker.visited[url] = true
	checker.mut.Unlock()
}

func (checker *urlChecker) Get(url string) bool {
	checker.mut.Lock()
	defer checker.mut.Unlock()
	_, ok := checker.visited[url]
	return ok
}

type fakeResult struct {
	body string
	urls []string
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
}

@Rickard80
Copy link

Rickard80 commented Nov 1, 2023

Using only what has been learned from previous lessons (select, <-exit, Mutex), didn't change the main function, but refactored the Crawl function to increase readability in code.

package main

import (
	"fmt"
	"sync"
)

// ADDED (3): SET UP MAP STRUCT FOR FETCHED URLS (mutual exclusion)
type FetchedUrls struct {
	mu sync.Mutex
	crawled map[string]bool
}

func (f *FetchedUrls) Add(key string) {
	f.mu.Lock()	
	f.crawled[key] = true
	f.mu.Unlock()
}
func (f *FetchedUrls) Exists(key string) bool {
	f.mu.Lock()
	defer f.mu.Unlock()
	return f.crawled[key]
}

// ADDED (1): STRUCT FOR MAIN CHANNEL
type FakeResponse struct {
	body string
	url string
	err error
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
	fetchedUrls := FetchedUrls{crawled: map[string]bool{}}
	ch	 := make(chan FakeResponse)
	exit := make(chan bool)
	
	go crawlHelper(url, depth, fetcher, fetchedUrls, ch, exit)
	
	for {			// infinite loop ...
		select {	// ... with a listener
			case fakeResponse := <-ch:
				if fakeResponse.err != nil {
					fmt.Println(fakeResponse.err)
				} else {
					fmt.Printf("found: %s %q\n", fakeResponse.url, fakeResponse.body)
				}	
			case <-exit:
				return	// terminate listening to all goroutines
		}
	}
}

// ADDED (1): RECURSIVE LOOP
func crawlHelper(url string, depth int,
				 fetcher Fetcher, fetchedUrls FetchedUrls,
				 ch chan FakeResponse, exit chan bool) {
	fetchedUrls.Add(url)

	body, urls, err := fetcher.Fetch(url)
	ch <- FakeResponse{body, url, err}
		
	if depth > 1 {
		children := 0
		
		for _, u := range urls {
			if !fetchedUrls.Exists(u) {		// only create a new goroutine if url hasn't been fetched
				children++
				go crawlHelper(u, depth-1, fetcher, fetchedUrls, ch, exit)					
			}
		}

		for i := 0; i < children; i++ {
			<-exit		// collect exit calls to prevent calling parent directly
		}
	}
	
	exit <- true			// call parent when this goroutine and its children are done
}

func main() {
	Crawl("https://golang.org/", 4, fetcher)
}

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
	body string
	urls []string
}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
 }

@Fleyderer
Copy link

Just structure with map + mutex and populating channel

package main

import (
	"fmt"
	"sync"
)

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

type SafeCrawl struct {
	crawled map[string]int
	mu sync.Mutex
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func CrawlRecursive(url string, depth int, fetcher Fetcher, ch chan string, sc SafeCrawl) {
	sc.mu.Lock()
	if depth <= 0 || sc.crawled[url] != 0 {
		return
	}
	sc.crawled[url] = 1
	sc.mu.Unlock()
	body, urls, err := fetcher.Fetch(url)
	if err != nil {
		fmt.Println(err)
		return
	}
	ch <- fmt.Sprintf("found: %s %q", url, body)
	for _, u := range urls {
		CrawlRecursive(u, depth-1, fetcher, ch, sc)
	}
}

func Crawl(url string, depth int, fetcher Fetcher, ch chan string) {
	sc := SafeCrawl{crawled : make(map[string]int)}
	CrawlRecursive(url, depth, fetcher, ch, sc)
	close(ch)
}

func main() {
	ch := make(chan string)
	go Crawl("https://golang.org/", 4, fetcher, ch)
	for crawled := range ch {
		fmt.Println(crawled)
	}
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
	body string
	urls []string
}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
}

@vzsoares
Copy link

vzsoares commented Apr 14, 2024

@Fleyderer
i'd say you need to call CrawlRecursive with go every time in order to properly make it concurrent

@vzsoares
Copy link

package main

import (
	"fmt"
	"sync"
)

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

type urlCache struct {
	sync.RWMutex
	m map[string]bool
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, wg *sync.WaitGroup, cache *urlCache) {
	if depth <= 0 {
		return
	}
	body, urls, err := fetcher.Fetch(url)
	if err != nil {
		fmt.Println(err)
		return
	}
	fmt.Printf("found: %s %q\n", url, body)
	for _, u := range urls {
		cache.RLock()
		_, ok := cache.m[u]
		cache.RUnlock()
		if ok {
			continue
		}
		cache.Lock()
		cache.m[u] = true
		cache.Unlock()
		wg.Add(1)
		go func() {
			defer wg.Done()
			Crawl(u, depth-1, fetcher, wg, cache)
		}()

	}
	return
}

func main() {
	wg := sync.WaitGroup{}
	cache := urlCache{m: make(map[string]bool)}
	wg.Add(1)
	go func() {
		defer wg.Done()
		Crawl("https://golang.org/", 4, fetcher, &wg, &cache)
	}()
	wg.Wait()
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
	body string
	urls []string
}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
}

@carolinux
Copy link

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment