Limit parallel goroutines with buffered channel.
package main | |
import ( | |
"log" | |
"sync" | |
"time" | |
) | |
const Limit = 5 | |
func main() { | |
log.SetFlags(log.Ltime) // format log output hh:mm:ss | |
var wg sync.WaitGroup | |
workers := make(chan struct{}, Limit) | |
doWork := func(i int, j string) { | |
defer wg.Done() | |
time.Sleep(2 * time.Second) | |
log.Printf("Worker %d working on %s\n", i, j) | |
<-workers | |
} | |
for j := 0; j < 15; j++ { | |
work := string(rune(97 + j)) | |
log.Printf("Work %s enqueued\n", work) | |
workers <- struct{}{} | |
wg.Add(1) | |
go doWork(j, work) | |
} | |
wg.Wait() | |
} |
This comment has been minimized.
This comment has been minimized.
That is true, I just got interested how one could achieve same thing using buffered channel instead of worker pool and got little drifted from original use case. =) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
This comment has been minimized.
eduncan911 commentedMay 25, 2016
•
edited
With this pattern, if you set your Limit to 1 million, you'd get 1 million Go routines all using 4KB of RAM each. which ends up using 4 GB of RAM. But more ugently, if you are accessing 1 million file descriptors, you'll get the Too many files open issue the author described.
https://rhardih.io/2016/05/dead-simple-concurrency-limitation-in-go/
The point of the author's blog post is to instead use a pool of workers, that you limit to a controlled number. Not 1 million, but instead 50 or 200 or so.