Skip to content

Instantly share code, notes, and snippets.

@SaoYan
Last active May 24, 2024 19:30
Show Gist options
  • Save SaoYan/32cf28b4689d3d9b077cc96d105a31df to your computer and use it in GitHub Desktop.
Save SaoYan/32cf28b4689d3d9b077cc96d105a31df to your computer and use it in GitHub Desktop.
A Tour of Go Exercise: Web Crawler
package main
import (
"fmt"
"sync"
)
/*
This solution uses channels to force each gorountines to wait for its child gorountines to exit.
*/
type SafeCounter struct {
v map[string]bool
mux sync.Mutex
}
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var cnt SafeCounter = SafeCounter{v: make(map[string]bool)}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, exit chan bool) {
// Fetch URLs in parallel.
// Don't fetch the same URL twice.
if depth <= 0 {
exit <- true
return
}
cnt.mux.Lock()
_, ok := cnt.v[url]
if ok == false {
cnt.v[url] = true
cnt.mux.Unlock()
} else {
exit <- true
cnt.mux.Unlock()
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
exit <- true
return
}
fmt.Printf("found: %s %q\n", url, body)
e := make(chan bool)
for _, u := range urls {
go Crawl(u, depth-1, fetcher, e)
}
// wait for all child gorountines to exit
for i := 0; i < len(urls); i++ {
<-e
}
exit <- true
}
func main() {
exit := make(chan bool)
go Crawl("https://golang.org/", 4, fetcher, exit)
<-exit
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
package main
import (
"fmt"
"sync"
)
/*
This solution uses WaitGroup to force each gorountines to wait for its child gorountines to exit.
*/
type SafeCounter struct {
v map[string]bool
mux sync.Mutex
}
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
var cnt SafeCounter = SafeCounter{v: make(map[string]bool)}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, wg *sync.WaitGroup) {
// Fetch URLs in parallel.
// Don't fetch the same URL twice.
if depth <= 0 {
wg.Done()
return
}
cnt.mux.Lock()
_, ok := cnt.v[url]
if ok == false {
cnt.v[url] = true
cnt.mux.Unlock()
} else {
wg.Done()
cnt.mux.Unlock()
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
wg.Done()
return
}
fmt.Printf("found: %s %q\n", url, body)
var wg_ sync.WaitGroup
for _, u := range urls {
wg_.Add(1)
go Crawl(u, depth-1, fetcher, &wg_)
}
wg_.Wait()
wg.Done()
}
func main() {
var wg sync.WaitGroup
go Crawl("https://golang.org/", 4, fetcher, &wg)
wg.Add(1)
wg.Wait()
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"https://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"https://golang.org/pkg/",
"https://golang.org/cmd/",
},
},
"https://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"https://golang.org/",
"https://golang.org/cmd/",
"https://golang.org/pkg/fmt/",
"https://golang.org/pkg/os/",
},
},
"https://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
"https://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"https://golang.org/",
"https://golang.org/pkg/",
},
},
}
@Fleyderer
Copy link

Just structure with map + mutex and populating channel

package main

import (
	"fmt"
	"sync"
)

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

type SafeCrawl struct {
	crawled map[string]int
	mu sync.Mutex
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func CrawlRecursive(url string, depth int, fetcher Fetcher, ch chan string, sc SafeCrawl) {
	sc.mu.Lock()
	if depth <= 0 || sc.crawled[url] != 0 {
		return
	}
	sc.crawled[url] = 1
	sc.mu.Unlock()
	body, urls, err := fetcher.Fetch(url)
	if err != nil {
		fmt.Println(err)
		return
	}
	ch <- fmt.Sprintf("found: %s %q", url, body)
	for _, u := range urls {
		CrawlRecursive(u, depth-1, fetcher, ch, sc)
	}
}

func Crawl(url string, depth int, fetcher Fetcher, ch chan string) {
	sc := SafeCrawl{crawled : make(map[string]int)}
	CrawlRecursive(url, depth, fetcher, ch, sc)
	close(ch)
}

func main() {
	ch := make(chan string)
	go Crawl("https://golang.org/", 4, fetcher, ch)
	for crawled := range ch {
		fmt.Println(crawled)
	}
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
	body string
	urls []string
}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
}

@vzsoares
Copy link

vzsoares commented Apr 14, 2024

@Fleyderer
i'd say you need to call CrawlRecursive with go every time in order to properly make it concurrent

@vzsoares
Copy link

package main

import (
	"fmt"
	"sync"
)

type Fetcher interface {
	// Fetch returns the body of URL and
	// a slice of URLs found on that page.
	Fetch(url string) (body string, urls []string, err error)
}

type urlCache struct {
	sync.RWMutex
	m map[string]bool
}

// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher, wg *sync.WaitGroup, cache *urlCache) {
	if depth <= 0 {
		return
	}
	body, urls, err := fetcher.Fetch(url)
	if err != nil {
		fmt.Println(err)
		return
	}
	fmt.Printf("found: %s %q\n", url, body)
	for _, u := range urls {
		cache.RLock()
		_, ok := cache.m[u]
		cache.RUnlock()
		if ok {
			continue
		}
		cache.Lock()
		cache.m[u] = true
		cache.Unlock()
		wg.Add(1)
		go func() {
			defer wg.Done()
			Crawl(u, depth-1, fetcher, wg, cache)
		}()

	}
	return
}

func main() {
	wg := sync.WaitGroup{}
	cache := urlCache{m: make(map[string]bool)}
	wg.Add(1)
	go func() {
		defer wg.Done()
		Crawl("https://golang.org/", 4, fetcher, &wg, &cache)
	}()
	wg.Wait()
}

// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult

type fakeResult struct {
	body string
	urls []string
}

func (f fakeFetcher) Fetch(url string) (string, []string, error) {
	if res, ok := f[url]; ok {
		return res.body, res.urls, nil
	}
	return "", nil, fmt.Errorf("not found: %s", url)
}

// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
	"https://golang.org/": &fakeResult{
		"The Go Programming Language",
		[]string{
			"https://golang.org/pkg/",
			"https://golang.org/cmd/",
		},
	},
	"https://golang.org/pkg/": &fakeResult{
		"Packages",
		[]string{
			"https://golang.org/",
			"https://golang.org/cmd/",
			"https://golang.org/pkg/fmt/",
			"https://golang.org/pkg/os/",
		},
	},
	"https://golang.org/pkg/fmt/": &fakeResult{
		"Package fmt",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
	"https://golang.org/pkg/os/": &fakeResult{
		"Package os",
		[]string{
			"https://golang.org/",
			"https://golang.org/pkg/",
		},
	},
}

@carolinux
Copy link

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment