Last active
August 29, 2015 14:02
-
-
Save mrnugget/243ff880dbeb13f9cb79 to your computer and use it in GitHub Desktop.
Go snippet: make http requests concurrently but timeout after a specified interval
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package main | |
import "log" | |
import "net/http" | |
import "time" | |
type Response struct { | |
StatusCode int | |
Url string | |
} | |
func fetch(url string, done chan *Response, quit chan bool) { | |
resp, err := http.Get(url) | |
if err != nil { | |
log.Fatal(err) | |
} | |
for { | |
select { | |
case done <- &Response{resp.StatusCode, url}: | |
return | |
case <-quit: | |
return | |
} | |
} | |
} | |
func waitForAll(done chan *Response, quit chan bool, duration time.Duration) []*Response { | |
responses := []*Response{} | |
timeout := time.After(duration) | |
for { | |
select { | |
case resp := <-done: | |
responses = append(responses, resp) | |
case <-timeout: | |
close(quit) | |
return responses | |
} | |
} | |
} | |
func main() { | |
urls := []string{"http://flinc.org", "http://google.de", "http://amazon.de"} | |
quit := make(chan bool) | |
done := make(chan *Response, 0) | |
log.Printf("Fetching %d URLs concurrently", len(urls)) | |
for _, url := range urls { | |
go fetch(url, done, quit) | |
} | |
responses := waitForAll(done, quit, 1000*time.Millisecond) | |
log.Printf("%d responded in the timeframe", len(responses)) | |
for _, resp := range responses { | |
log.Printf("%d %s", resp.StatusCode, resp.Url) | |
} | |
} |
Fixed the time.After
bug.
Introduced a quit channel, which should stop the goroutine leaking.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Probably doesn't matter for this, but this has a goroutine leak in line 17 for any responses that come in after the timeout. The send to the
done
channel will block forever since nobody is consuming from it anymore.Additionally it seems like the timeout resets after every response, so waitForAll may run longer than 300ms.