Created
January 30, 2023 19:42
-
-
Save magik6k/3780bd7ff58d9ad7b587e8d686bc10cf to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- b/vendor/github.com/ipfs/go-bitswap/bitswap.go 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/bitswap.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,181 +0,0 @@ | |
-package bitswap | |
- | |
-import ( | |
- "context" | |
- "fmt" | |
- | |
- "github.com/ipfs/go-bitswap/client" | |
- "github.com/ipfs/go-bitswap/internal/defaults" | |
- "github.com/ipfs/go-bitswap/message" | |
- "github.com/ipfs/go-bitswap/network" | |
- "github.com/ipfs/go-bitswap/server" | |
- "github.com/ipfs/go-bitswap/tracer" | |
- "github.com/ipfs/go-metrics-interface" | |
- | |
- blocks "github.com/ipfs/go-block-format" | |
- "github.com/ipfs/go-cid" | |
- blockstore "github.com/ipfs/go-ipfs-blockstore" | |
- exchange "github.com/ipfs/go-ipfs-exchange-interface" | |
- logging "github.com/ipfs/go-log" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
- | |
- "go.uber.org/multierr" | |
-) | |
- | |
-var log = logging.Logger("bitswap") | |
- | |
-// old interface we are targeting | |
-type bitswap interface { | |
- Close() error | |
- GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) | |
- GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) | |
- GetWantBlocks() []cid.Cid | |
- GetWantHaves() []cid.Cid | |
- GetWantlist() []cid.Cid | |
- IsOnline() bool | |
- LedgerForPeer(p peer.ID) *server.Receipt | |
- NewSession(ctx context.Context) exchange.Fetcher | |
- NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error | |
- PeerConnected(p peer.ID) | |
- PeerDisconnected(p peer.ID) | |
- ReceiveError(err error) | |
- ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) | |
- Stat() (*Stat, error) | |
- WantlistForPeer(p peer.ID) []cid.Cid | |
-} | |
- | |
-var _ exchange.SessionExchange = (*Bitswap)(nil) | |
-var _ bitswap = (*Bitswap)(nil) | |
-var HasBlockBufferSize = defaults.HasBlockBufferSize | |
- | |
-type Bitswap struct { | |
- *client.Client | |
- *server.Server | |
- | |
- tracer tracer.Tracer | |
- net network.BitSwapNetwork | |
-} | |
- | |
-func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Bitswap { | |
- bs := &Bitswap{ | |
- net: net, | |
- } | |
- | |
- var serverOptions []server.Option | |
- var clientOptions []client.Option | |
- | |
- for _, o := range options { | |
- switch typedOption := o.v.(type) { | |
- case server.Option: | |
- serverOptions = append(serverOptions, typedOption) | |
- case client.Option: | |
- clientOptions = append(clientOptions, typedOption) | |
- case option: | |
- typedOption(bs) | |
- default: | |
- panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option(nil))) | |
- } | |
- } | |
- | |
- if bs.tracer != nil { | |
- var tracer tracer.Tracer = nopReceiveTracer{bs.tracer} | |
- clientOptions = append(clientOptions, client.WithTracer(tracer)) | |
- serverOptions = append(serverOptions, server.WithTracer(tracer)) | |
- } | |
- | |
- if HasBlockBufferSize != defaults.HasBlockBufferSize { | |
- serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) | |
- } | |
- | |
- ctx = metrics.CtxSubScope(ctx, "bitswap") | |
- | |
- bs.Server = server.New(ctx, net, bstore, serverOptions...) | |
- bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) | |
- net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once | |
- | |
- return bs | |
-} | |
- | |
-func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { | |
- return multierr.Combine( | |
- bs.Client.NotifyNewBlocks(ctx, blks...), | |
- bs.Server.NotifyNewBlocks(ctx, blks...), | |
- ) | |
-} | |
- | |
-type Stat struct { | |
- Wantlist []cid.Cid | |
- Peers []string | |
- BlocksReceived uint64 | |
- DataReceived uint64 | |
- DupBlksReceived uint64 | |
- DupDataReceived uint64 | |
- MessagesReceived uint64 | |
- BlocksSent uint64 | |
- DataSent uint64 | |
- ProvideBufLen int | |
-} | |
- | |
-func (bs *Bitswap) Stat() (*Stat, error) { | |
- cs, err := bs.Client.Stat() | |
- if err != nil { | |
- return nil, err | |
- } | |
- ss, err := bs.Server.Stat() | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- return &Stat{ | |
- Wantlist: cs.Wantlist, | |
- BlocksReceived: cs.BlocksReceived, | |
- DataReceived: cs.DataReceived, | |
- DupBlksReceived: cs.DupBlksReceived, | |
- DupDataReceived: cs.DupDataReceived, | |
- MessagesReceived: cs.MessagesReceived, | |
- Peers: ss.Peers, | |
- BlocksSent: ss.BlocksSent, | |
- DataSent: ss.DataSent, | |
- ProvideBufLen: ss.ProvideBufLen, | |
- }, nil | |
-} | |
- | |
-func (bs *Bitswap) Close() error { | |
- bs.net.Stop() | |
- return multierr.Combine( | |
- bs.Client.Close(), | |
- bs.Server.Close(), | |
- ) | |
-} | |
- | |
-func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { | |
- if p == bs.net.Self() { | |
- return bs.Client.GetWantlist() | |
- } | |
- return bs.Server.WantlistForPeer(p) | |
-} | |
- | |
-func (bs *Bitswap) PeerConnected(p peer.ID) { | |
- bs.Client.PeerConnected(p) | |
- bs.Server.PeerConnected(p) | |
-} | |
- | |
-func (bs *Bitswap) PeerDisconnected(p peer.ID) { | |
- bs.Client.PeerDisconnected(p) | |
- bs.Server.PeerDisconnected(p) | |
-} | |
- | |
-func (bs *Bitswap) ReceiveError(err error) { | |
- log.Infof("Bitswap Client ReceiveError: %s", err) | |
- // TODO log the network error | |
- // TODO bubble the network error up to the parent context/error logger | |
-} | |
- | |
-func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { | |
- if bs.tracer != nil { | |
- bs.tracer.MessageReceived(p, incoming) | |
- } | |
- | |
- bs.Client.ReceiveMessage(ctx, p, incoming) | |
- bs.Server.ReceiveMessage(ctx, p, incoming) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/client.go a/vendor/github.com/ipfs/go-bitswap/client/client.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/client.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/client.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,479 +0,0 @@ | |
-// Package bitswap implements the IPFS exchange interface with the BitSwap | |
-// bilateral exchange protocol. | |
-package client | |
- | |
-import ( | |
- "context" | |
- "errors" | |
- | |
- "sync" | |
- "time" | |
- | |
- delay "github.com/ipfs/go-ipfs-delay" | |
- "go.opentelemetry.io/otel/attribute" | |
- "go.opentelemetry.io/otel/trace" | |
- | |
- bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" | |
- bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" | |
- bsmq "github.com/ipfs/go-bitswap/client/internal/messagequeue" | |
- "github.com/ipfs/go-bitswap/client/internal/notifications" | |
- bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" | |
- bspqm "github.com/ipfs/go-bitswap/client/internal/providerquerymanager" | |
- bssession "github.com/ipfs/go-bitswap/client/internal/session" | |
- bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" | |
- bssm "github.com/ipfs/go-bitswap/client/internal/sessionmanager" | |
- bsspm "github.com/ipfs/go-bitswap/client/internal/sessionpeermanager" | |
- "github.com/ipfs/go-bitswap/internal" | |
- "github.com/ipfs/go-bitswap/internal/defaults" | |
- bsmsg "github.com/ipfs/go-bitswap/message" | |
- bmetrics "github.com/ipfs/go-bitswap/metrics" | |
- bsnet "github.com/ipfs/go-bitswap/network" | |
- "github.com/ipfs/go-bitswap/tracer" | |
- blocks "github.com/ipfs/go-block-format" | |
- "github.com/ipfs/go-cid" | |
- blockstore "github.com/ipfs/go-ipfs-blockstore" | |
- exchange "github.com/ipfs/go-ipfs-exchange-interface" | |
- logging "github.com/ipfs/go-log" | |
- "github.com/ipfs/go-metrics-interface" | |
- process "github.com/jbenet/goprocess" | |
- procctx "github.com/jbenet/goprocess/context" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-var log = logging.Logger("bitswap-client") | |
- | |
-// Option defines the functional option type that can be used to configure | |
-// bitswap instances | |
-type Option func(*Client) | |
- | |
-// ProviderSearchDelay overwrites the global provider search delay | |
-func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { | |
- return func(bs *Client) { | |
- bs.provSearchDelay = newProvSearchDelay | |
- } | |
-} | |
- | |
-// RebroadcastDelay overwrites the global provider rebroadcast delay | |
-func RebroadcastDelay(newRebroadcastDelay delay.D) Option { | |
- return func(bs *Client) { | |
- bs.rebroadcastDelay = newRebroadcastDelay | |
- } | |
-} | |
- | |
-func SetSimulateDontHavesOnTimeout(send bool) Option { | |
- return func(bs *Client) { | |
- bs.simulateDontHavesOnTimeout = send | |
- } | |
-} | |
- | |
-// Configures the Client to use given tracer. | |
-// This provides methods to access all messages sent and received by the Client. | |
-// This interface can be used to implement various statistics (this is original intent). | |
-func WithTracer(tap tracer.Tracer) Option { | |
- return func(bs *Client) { | |
- bs.tracer = tap | |
- } | |
-} | |
- | |
-func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { | |
- return func(bs *Client) { | |
- bs.blockReceivedNotifier = brn | |
- } | |
-} | |
- | |
-type BlockReceivedNotifier interface { | |
- // ReceivedBlocks notifies the decision engine that a peer is well-behaving | |
- // and gave us useful data, potentially increasing its score and making us | |
- // send them more data in exchange. | |
- ReceivedBlocks(peer.ID, []blocks.Block) | |
-} | |
- | |
-// New initializes a Bitswap client that runs until client.Close is called. | |
-func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Client { | |
- // important to use provided parent context (since it may include important | |
- // loggable data). It's probably not a good idea to allow bitswap to be | |
- // coupled to the concerns of the ipfs daemon in this way. | |
- // | |
- // FIXME(btc) Now that bitswap manages itself using a process, it probably | |
- // shouldn't accept a context anymore. Clients should probably use Close() | |
- // exclusively. We should probably find another way to share logging data | |
- ctx, cancelFunc := context.WithCancel(parent) | |
- | |
- px := process.WithTeardown(func() error { | |
- return nil | |
- }) | |
- | |
- // onDontHaveTimeout is called when a want-block is sent to a peer that | |
- // has an old version of Bitswap that doesn't support DONT_HAVE messages, | |
- // or when no response is received within a timeout. | |
- var sm *bssm.SessionManager | |
- var bs *Client | |
- onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { | |
- // Simulate a message arriving with DONT_HAVEs | |
- if bs.simulateDontHavesOnTimeout { | |
- sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) | |
- } | |
- } | |
- peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { | |
- return bsmq.New(ctx, p, network, onDontHaveTimeout) | |
- } | |
- | |
- sim := bssim.New() | |
- bpm := bsbpm.New() | |
- pm := bspm.New(ctx, peerQueueFactory, network.Self()) | |
- pqm := bspqm.New(ctx, network) | |
- | |
- sessionFactory := func( | |
- sessctx context.Context, | |
- sessmgr bssession.SessionManager, | |
- id uint64, | |
- spm bssession.SessionPeerManager, | |
- sim *bssim.SessionInterestManager, | |
- pm bssession.PeerManager, | |
- bpm *bsbpm.BlockPresenceManager, | |
- notif notifications.PubSub, | |
- provSearchDelay time.Duration, | |
- rebroadcastDelay delay.D, | |
- self peer.ID) bssm.Session { | |
- return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) | |
- } | |
- sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { | |
- return bsspm.New(id, network.ConnectionManager()) | |
- } | |
- notif := notifications.New() | |
- sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) | |
- | |
- bs = &Client{ | |
- blockstore: bstore, | |
- network: network, | |
- process: px, | |
- pm: pm, | |
- pqm: pqm, | |
- sm: sm, | |
- sim: sim, | |
- notif: notif, | |
- counters: new(counters), | |
- dupMetric: bmetrics.DupHist(ctx), | |
- allMetric: bmetrics.AllHist(ctx), | |
- provSearchDelay: defaults.ProvSearchDelay, | |
- rebroadcastDelay: delay.Fixed(time.Minute), | |
- simulateDontHavesOnTimeout: true, | |
- } | |
- | |
- // apply functional options before starting and running bitswap | |
- for _, option := range options { | |
- option(bs) | |
- } | |
- | |
- bs.pqm.Startup() | |
- | |
- // bind the context and process. | |
- // do it over here to avoid closing before all setup is done. | |
- go func() { | |
- <-px.Closing() // process closes first | |
- sm.Shutdown() | |
- cancelFunc() | |
- notif.Shutdown() | |
- }() | |
- procctx.CloseAfterContext(px, ctx) // parent cancelled first | |
- | |
- return bs | |
-} | |
- | |
-// Client instances implement the bitswap protocol. | |
-type Client struct { | |
- pm *bspm.PeerManager | |
- | |
- // the provider query manager manages requests to find providers | |
- pqm *bspqm.ProviderQueryManager | |
- | |
- // network delivers messages on behalf of the session | |
- network bsnet.BitSwapNetwork | |
- | |
- // blockstore is the local database | |
- // NB: ensure threadsafety | |
- blockstore blockstore.Blockstore | |
- | |
- // manages channels of outgoing blocks for sessions | |
- notif notifications.PubSub | |
- | |
- process process.Process | |
- | |
- // Counters for various statistics | |
- counterLk sync.Mutex | |
- counters *counters | |
- | |
- // Metrics interface metrics | |
- dupMetric metrics.Histogram | |
- allMetric metrics.Histogram | |
- | |
- // External statistics interface | |
- tracer tracer.Tracer | |
- | |
- // the SessionManager routes requests to interested sessions | |
- sm *bssm.SessionManager | |
- | |
- // the SessionInterestManager keeps track of which sessions are interested | |
- // in which CIDs | |
- sim *bssim.SessionInterestManager | |
- | |
- // how long to wait before looking for providers in a session | |
- provSearchDelay time.Duration | |
- | |
- // how often to rebroadcast providing requests to find more optimized providers | |
- rebroadcastDelay delay.D | |
- | |
- blockReceivedNotifier BlockReceivedNotifier | |
- | |
- // whether we should actually simulate dont haves on request timeout | |
- simulateDontHavesOnTimeout bool | |
-} | |
- | |
-type counters struct { | |
- blocksRecvd uint64 | |
- dupBlocksRecvd uint64 | |
- dupDataRecvd uint64 | |
- dataRecvd uint64 | |
- messagesRecvd uint64 | |
-} | |
- | |
-// GetBlock attempts to retrieve a particular block from peers within the | |
-// deadline enforced by the context. | |
-func (bs *Client) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { | |
- ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) | |
- defer span.End() | |
- return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) | |
-} | |
- | |
-// GetBlocks returns a channel where the caller may receive blocks that | |
-// correspond to the provided |keys|. Returns an error if BitSwap is unable to | |
-// begin this request within the deadline enforced by the context. | |
-// | |
-// NB: Your request remains open until the context expires. To conserve | |
-// resources, provide a context with a reasonably short deadline (ie. not one | |
-// that lasts throughout the lifetime of the server) | |
-func (bs *Client) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { | |
- ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) | |
- defer span.End() | |
- session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) | |
- return session.GetBlocks(ctx, keys) | |
-} | |
- | |
-// NotifyNewBlocks announces the existence of blocks to this bitswap service. | |
-// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure | |
-// that those blocks are available in the blockstore before calling this function. | |
-func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { | |
- ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") | |
- defer span.End() | |
- | |
- select { | |
- case <-bs.process.Closing(): | |
- return errors.New("bitswap is closed") | |
- default: | |
- } | |
- | |
- blkCids := make([]cid.Cid, len(blks)) | |
- for i, blk := range blks { | |
- blkCids[i] = blk.Cid() | |
- } | |
- | |
- // Send all block keys (including duplicates) to any sessions that want them. | |
- // (The duplicates are needed by sessions for accounting purposes) | |
- bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) | |
- | |
- // Publish the block to any Bitswap clients that had requested blocks. | |
- // (the sessions use this pubsub mechanism to inform clients of incoming | |
- // blocks) | |
- bs.notif.Publish(blks...) | |
- | |
- return nil | |
-} | |
- | |
-// receiveBlocksFrom process blocks received from the network | |
-func (bs *Client) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { | |
- select { | |
- case <-bs.process.Closing(): | |
- return errors.New("bitswap is closed") | |
- default: | |
- } | |
- | |
- wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) | |
- for _, b := range notWanted { | |
- log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) | |
- } | |
- | |
- allKs := make([]cid.Cid, 0, len(blks)) | |
- for _, b := range blks { | |
- allKs = append(allKs, b.Cid()) | |
- } | |
- | |
- // Inform the PeerManager so that we can calculate per-peer latency | |
- combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) | |
- combined = append(combined, allKs...) | |
- combined = append(combined, haves...) | |
- combined = append(combined, dontHaves...) | |
- bs.pm.ResponseReceived(from, combined) | |
- | |
- // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. | |
- bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) | |
- | |
- if bs.blockReceivedNotifier != nil { | |
- bs.blockReceivedNotifier.ReceivedBlocks(from, wanted) | |
- } | |
- | |
- // Publish the block to any Bitswap clients that had requested blocks. | |
- // (the sessions use this pubsub mechanism to inform clients of incoming | |
- // blocks) | |
- for _, b := range wanted { | |
- bs.notif.Publish(b) | |
- } | |
- | |
- for _, b := range wanted { | |
- log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) | |
- } | |
- | |
- return nil | |
-} | |
- | |
-// ReceiveMessage is called by the network interface when a new message is | |
-// received. | |
-func (bs *Client) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { | |
- bs.counterLk.Lock() | |
- bs.counters.messagesRecvd++ | |
- bs.counterLk.Unlock() | |
- | |
- if bs.tracer != nil { | |
- bs.tracer.MessageReceived(p, incoming) | |
- } | |
- | |
- iblocks := incoming.Blocks() | |
- | |
- if len(iblocks) > 0 { | |
- bs.updateReceiveCounters(iblocks) | |
- for _, b := range iblocks { | |
- log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) | |
- } | |
- } | |
- | |
- haves := incoming.Haves() | |
- dontHaves := incoming.DontHaves() | |
- if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { | |
- // Process blocks | |
- err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) | |
- if err != nil { | |
- log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (bs *Client) updateReceiveCounters(blocks []blocks.Block) { | |
- // Check which blocks are in the datastore | |
- // (Note: any errors from the blockstore are simply logged out in | |
- // blockstoreHas()) | |
- blocksHas := bs.blockstoreHas(blocks) | |
- | |
- bs.counterLk.Lock() | |
- defer bs.counterLk.Unlock() | |
- | |
- // Do some accounting for each block | |
- for i, b := range blocks { | |
- has := blocksHas[i] | |
- | |
- blkLen := len(b.RawData()) | |
- bs.allMetric.Observe(float64(blkLen)) | |
- if has { | |
- bs.dupMetric.Observe(float64(blkLen)) | |
- } | |
- | |
- c := bs.counters | |
- | |
- c.blocksRecvd++ | |
- c.dataRecvd += uint64(blkLen) | |
- if has { | |
- c.dupBlocksRecvd++ | |
- c.dupDataRecvd += uint64(blkLen) | |
- } | |
- } | |
-} | |
- | |
-func (bs *Client) blockstoreHas(blks []blocks.Block) []bool { | |
- res := make([]bool, len(blks)) | |
- | |
- wg := sync.WaitGroup{} | |
- for i, block := range blks { | |
- wg.Add(1) | |
- go func(i int, b blocks.Block) { | |
- defer wg.Done() | |
- | |
- has, err := bs.blockstore.Has(context.TODO(), b.Cid()) | |
- if err != nil { | |
- log.Infof("blockstore.Has error: %s", err) | |
- has = false | |
- } | |
- | |
- res[i] = has | |
- }(i, block) | |
- } | |
- wg.Wait() | |
- | |
- return res | |
-} | |
- | |
-// PeerConnected is called by the network interface | |
-// when a peer initiates a new connection to bitswap. | |
-func (bs *Client) PeerConnected(p peer.ID) { | |
- bs.pm.Connected(p) | |
-} | |
- | |
-// PeerDisconnected is called by the network interface when a peer | |
-// closes a connection | |
-func (bs *Client) PeerDisconnected(p peer.ID) { | |
- bs.pm.Disconnected(p) | |
-} | |
- | |
-// ReceiveError is called by the network interface when an error happens | |
-// at the network layer. Currently just logs error. | |
-func (bs *Client) ReceiveError(err error) { | |
- log.Infof("Bitswap Client ReceiveError: %s", err) | |
- // TODO log the network error | |
- // TODO bubble the network error up to the parent context/error logger | |
-} | |
- | |
-// Close is called to shutdown the Client | |
-func (bs *Client) Close() error { | |
- return bs.process.Close() | |
-} | |
- | |
-// GetWantlist returns the current local wantlist (both want-blocks and | |
-// want-haves). | |
-func (bs *Client) GetWantlist() []cid.Cid { | |
- return bs.pm.CurrentWants() | |
-} | |
- | |
-// GetWantBlocks returns the current list of want-blocks. | |
-func (bs *Client) GetWantBlocks() []cid.Cid { | |
- return bs.pm.CurrentWantBlocks() | |
-} | |
- | |
-// GetWanthaves returns the current list of want-haves. | |
-func (bs *Client) GetWantHaves() []cid.Cid { | |
- return bs.pm.CurrentWantHaves() | |
-} | |
- | |
-// IsOnline is needed to match go-ipfs-exchange-interface | |
-func (bs *Client) IsOnline() bool { | |
- return true | |
-} | |
- | |
-// NewSession generates a new Bitswap session. You should use this, rather | |
-// that calling Client.GetBlocks, any time you intend to do several related | |
-// block requests in a row. The session returned will have it's own GetBlocks | |
-// method, but the session will use the fact that the requests are related to | |
-// be more efficient in its requests to peers. If you are using a session | |
-// from go-blockservice, it will create a bitswap session automatically. | |
-func (bs *Client) NewSession(ctx context.Context) exchange.Fetcher { | |
- ctx, span := internal.StartSpan(ctx, "NewSession") | |
- defer span.End() | |
- return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/blockpresencemanager/blockpresencemanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/blockpresencemanager/blockpresencemanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/blockpresencemanager/blockpresencemanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/blockpresencemanager/blockpresencemanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,121 +0,0 @@ | |
-package blockpresencemanager | |
- | |
-import ( | |
- "sync" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// BlockPresenceManager keeps track of which peers have indicated that they | |
-// have or explicitly don't have a block | |
-type BlockPresenceManager struct { | |
- sync.RWMutex | |
- presence map[cid.Cid]map[peer.ID]bool | |
-} | |
- | |
-func New() *BlockPresenceManager { | |
- return &BlockPresenceManager{ | |
- presence: make(map[cid.Cid]map[peer.ID]bool), | |
- } | |
-} | |
- | |
-// ReceiveFrom is called when a peer sends us information about which blocks | |
-// it has and does not have | |
-func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { | |
- bpm.Lock() | |
- defer bpm.Unlock() | |
- | |
- for _, c := range haves { | |
- bpm.updateBlockPresence(p, c, true) | |
- } | |
- for _, c := range dontHaves { | |
- bpm.updateBlockPresence(p, c, false) | |
- } | |
-} | |
- | |
-func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { | |
- _, ok := bpm.presence[c] | |
- if !ok { | |
- bpm.presence[c] = make(map[peer.ID]bool) | |
- } | |
- | |
- // Make sure not to change HAVE to DONT_HAVE | |
- has, pok := bpm.presence[c][p] | |
- if pok && has { | |
- return | |
- } | |
- bpm.presence[c][p] = present | |
-} | |
- | |
-// PeerHasBlock indicates whether the given peer has sent a HAVE for the given | |
-// cid | |
-func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { | |
- bpm.RLock() | |
- defer bpm.RUnlock() | |
- | |
- return bpm.presence[c][p] | |
-} | |
- | |
-// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE | |
-// for the given cid | |
-func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { | |
- bpm.RLock() | |
- defer bpm.RUnlock() | |
- | |
- have, known := bpm.presence[c][p] | |
- return known && !have | |
-} | |
- | |
-// Filters the keys such that all the given peers have received a DONT_HAVE | |
-// for a key. | |
-// This allows us to know if we've exhausted all possibilities of finding | |
-// the key with the peers we know about. | |
-func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { | |
- bpm.RLock() | |
- defer bpm.RUnlock() | |
- | |
- var res []cid.Cid | |
- for _, c := range ks { | |
- if bpm.allDontHave(peers, c) { | |
- res = append(res, c) | |
- } | |
- } | |
- return res | |
-} | |
- | |
-func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { | |
- // Check if we know anything about the cid's block presence | |
- ps, cok := bpm.presence[c] | |
- if !cok { | |
- return false | |
- } | |
- | |
- // Check if we explicitly know that all the given peers do not have the cid | |
- for _, p := range peers { | |
- if has, pok := ps[p]; !pok || has { | |
- return false | |
- } | |
- } | |
- return true | |
-} | |
- | |
-// RemoveKeys cleans up the given keys from the block presence map | |
-func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { | |
- bpm.Lock() | |
- defer bpm.Unlock() | |
- | |
- for _, c := range ks { | |
- delete(bpm.presence, c) | |
- } | |
-} | |
- | |
-// HasKey indicates whether the BlockPresenceManager is tracking the given key | |
-// (used by the tests) | |
-func (bpm *BlockPresenceManager) HasKey(c cid.Cid) bool { | |
- bpm.Lock() | |
- defer bpm.Unlock() | |
- | |
- _, ok := bpm.presence[c] | |
- return ok | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/getter/getter.go a/vendor/github.com/ipfs/go-bitswap/client/internal/getter/getter.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/getter/getter.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/getter/getter.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,138 +0,0 @@ | |
-package getter | |
- | |
-import ( | |
- "context" | |
- "errors" | |
- | |
- "github.com/ipfs/go-bitswap/client/internal" | |
- notifications "github.com/ipfs/go-bitswap/client/internal/notifications" | |
- logging "github.com/ipfs/go-log" | |
- | |
- blocks "github.com/ipfs/go-block-format" | |
- cid "github.com/ipfs/go-cid" | |
- ipld "github.com/ipfs/go-ipld-format" | |
-) | |
- | |
-var log = logging.Logger("bitswap") | |
- | |
-// GetBlocksFunc is any function that can take an array of CIDs and return a | |
-// channel of incoming blocks. | |
-type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) | |
- | |
-// SyncGetBlock takes a block cid and an async function for getting several | |
-// blocks that returns a channel, and uses that function to return the | |
-// block syncronously. | |
-func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { | |
- p, span := internal.StartSpan(p, "Getter.SyncGetBlock") | |
- defer span.End() | |
- | |
- if !k.Defined() { | |
- log.Error("undefined cid in GetBlock") | |
- return nil, ipld.ErrNotFound{Cid: k} | |
- } | |
- | |
- // Any async work initiated by this function must end when this function | |
- // returns. To ensure this, derive a new context. Note that it is okay to | |
- // listen on parent in this scope, but NOT okay to pass |parent| to | |
- // functions called by this one. Otherwise those functions won't return | |
- // when this context's cancel func is executed. This is difficult to | |
- // enforce. May this comment keep you safe. | |
- ctx, cancel := context.WithCancel(p) | |
- defer cancel() | |
- | |
- promise, err := gb(ctx, []cid.Cid{k}) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- select { | |
- case block, ok := <-promise: | |
- if !ok { | |
- select { | |
- case <-ctx.Done(): | |
- return nil, ctx.Err() | |
- default: | |
- return nil, errors.New("promise channel was closed") | |
- } | |
- } | |
- return block, nil | |
- case <-p.Done(): | |
- return nil, p.Err() | |
- } | |
-} | |
- | |
-// WantFunc is any function that can express a want for set of blocks. | |
-type WantFunc func(context.Context, []cid.Cid) | |
- | |
-// AsyncGetBlocks take a set of block cids, a pubsub channel for incoming | |
-// blocks, a want function, and a close function, and returns a channel of | |
-// incoming blocks. | |
-func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, | |
- want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { | |
- ctx, span := internal.StartSpan(ctx, "Getter.AsyncGetBlocks") | |
- defer span.End() | |
- | |
- // If there are no keys supplied, just return a closed channel | |
- if len(keys) == 0 { | |
- out := make(chan blocks.Block) | |
- close(out) | |
- return out, nil | |
- } | |
- | |
- // Use a PubSub notifier to listen for incoming blocks for each key | |
- remaining := cid.NewSet() | |
- promise := notif.Subscribe(ctx, keys...) | |
- for _, k := range keys { | |
- log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k) | |
- remaining.Add(k) | |
- } | |
- | |
- // Send the want request for the keys to the network | |
- want(ctx, keys) | |
- | |
- out := make(chan blocks.Block) | |
- go handleIncoming(ctx, sessctx, remaining, promise, out, cwants) | |
- return out, nil | |
-} | |
- | |
-// Listens for incoming blocks, passing them to the out channel. | |
-// If the context is cancelled or the incoming channel closes, calls cfun with | |
-// any keys corresponding to blocks that were never received. | |
-func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, | |
- in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { | |
- | |
- ctx, cancel := context.WithCancel(ctx) | |
- | |
- // Clean up before exiting this function, and call the cancel function on | |
- // any remaining keys | |
- defer func() { | |
- cancel() | |
- close(out) | |
- // can't just defer this call on its own, arguments are resolved *when* the defer is created | |
- cfun(remaining.Keys()) | |
- }() | |
- | |
- for { | |
- select { | |
- case blk, ok := <-in: | |
- // If the channel is closed, we're done (note that PubSub closes | |
- // the channel once all the keys have been received) | |
- if !ok { | |
- return | |
- } | |
- | |
- remaining.Remove(blk.Cid()) | |
- select { | |
- case out <- blk: | |
- case <-ctx.Done(): | |
- return | |
- case <-sessctx.Done(): | |
- return | |
- } | |
- case <-ctx.Done(): | |
- return | |
- case <-sessctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/donthavetimeoutmgr.go a/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/donthavetimeoutmgr.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/donthavetimeoutmgr.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/donthavetimeoutmgr.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,398 +0,0 @@ | |
-package messagequeue | |
- | |
-import ( | |
- "context" | |
- "sync" | |
- "time" | |
- | |
- "github.com/benbjohnson/clock" | |
- cid "github.com/ipfs/go-cid" | |
- "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
-) | |
- | |
-const ( | |
- // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with | |
- // a peer whose Bitswap client doesn't support the DONT_HAVE response, | |
- // or when the peer takes too long to respond. | |
- // If the peer doesn't respond to a want-block within the timeout, the | |
- // local node assumes that the peer doesn't have the block. | |
- dontHaveTimeout = 5 * time.Second | |
- | |
- // maxExpectedWantProcessTime is the maximum amount of time we expect a | |
- // peer takes to process a want and initiate sending a response to us | |
- maxExpectedWantProcessTime = 2 * time.Second | |
- | |
- // maxTimeout is the maximum allowed timeout, regardless of latency | |
- maxTimeout = dontHaveTimeout + maxExpectedWantProcessTime | |
- | |
- // pingLatencyMultiplier is multiplied by the average ping time to | |
- // get an upper bound on how long we expect to wait for a peer's response | |
- // to arrive | |
- pingLatencyMultiplier = 3 | |
- | |
- // messageLatencyAlpha is the alpha supplied to the message latency EWMA | |
- messageLatencyAlpha = 0.5 | |
- | |
- // To give a margin for error, the timeout is calculated as | |
- // messageLatencyMultiplier * message latency | |
- messageLatencyMultiplier = 2 | |
-) | |
- | |
-// PeerConnection is a connection to a peer that can be pinged, and the | |
-// average latency measured | |
-type PeerConnection interface { | |
- // Ping the peer | |
- Ping(context.Context) ping.Result | |
- // The average latency of all pings | |
- Latency() time.Duration | |
-} | |
- | |
-// pendingWant keeps track of a want that has been sent and we're waiting | |
-// for a response or for a timeout to expire | |
-type pendingWant struct { | |
- c cid.Cid | |
- active bool | |
- sent time.Time | |
-} | |
- | |
-// dontHaveTimeoutMgr simulates a DONT_HAVE message if the peer takes too long | |
-// to respond to a message. | |
-// The timeout is based on latency - we start with a default latency, while | |
-// we ping the peer to estimate latency. If we receive a response from the | |
-// peer we use the response latency. | |
-type dontHaveTimeoutMgr struct { | |
- clock clock.Clock | |
- ctx context.Context | |
- shutdown func() | |
- peerConn PeerConnection | |
- onDontHaveTimeout func([]cid.Cid) | |
- defaultTimeout time.Duration | |
- maxTimeout time.Duration | |
- pingLatencyMultiplier int | |
- messageLatencyMultiplier int | |
- maxExpectedWantProcessTime time.Duration | |
- | |
- // All variables below here must be protected by the lock | |
- lk sync.RWMutex | |
- // has the timeout manager started | |
- started bool | |
- // wants that are active (waiting for a response or timeout) | |
- activeWants map[cid.Cid]*pendingWant | |
- // queue of wants, from oldest to newest | |
- wantQueue []*pendingWant | |
- // time to wait for a response (depends on latency) | |
- timeout time.Duration | |
- // ewma of message latency (time from message sent to response received) | |
- messageLatency *latencyEwma | |
- // timer used to wait until want at front of queue expires | |
- checkForTimeoutsTimer *clock.Timer | |
- // used for testing -- timeoutsTriggered when a scheduled dont have timeouts were triggered | |
- timeoutsTriggered chan struct{} | |
-} | |
- | |
-// newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr | |
-// onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) | |
-func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), clock clock.Clock) *dontHaveTimeoutMgr { | |
- return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, | |
- pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock, nil) | |
-} | |
- | |
-// newDontHaveTimeoutMgrWithParams is used by the tests | |
-func newDontHaveTimeoutMgrWithParams( | |
- pc PeerConnection, | |
- onDontHaveTimeout func([]cid.Cid), | |
- defaultTimeout time.Duration, | |
- maxTimeout time.Duration, | |
- pingLatencyMultiplier int, | |
- messageLatencyMultiplier int, | |
- maxExpectedWantProcessTime time.Duration, | |
- clock clock.Clock, | |
- timeoutsTriggered chan struct{}) *dontHaveTimeoutMgr { | |
- | |
- ctx, shutdown := context.WithCancel(context.Background()) | |
- mqp := &dontHaveTimeoutMgr{ | |
- clock: clock, | |
- ctx: ctx, | |
- shutdown: shutdown, | |
- peerConn: pc, | |
- activeWants: make(map[cid.Cid]*pendingWant), | |
- timeout: defaultTimeout, | |
- messageLatency: &latencyEwma{alpha: messageLatencyAlpha}, | |
- defaultTimeout: defaultTimeout, | |
- maxTimeout: maxTimeout, | |
- pingLatencyMultiplier: pingLatencyMultiplier, | |
- messageLatencyMultiplier: messageLatencyMultiplier, | |
- maxExpectedWantProcessTime: maxExpectedWantProcessTime, | |
- onDontHaveTimeout: onDontHaveTimeout, | |
- timeoutsTriggered: timeoutsTriggered, | |
- } | |
- | |
- return mqp | |
-} | |
- | |
-// Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored | |
-func (dhtm *dontHaveTimeoutMgr) Shutdown() { | |
- dhtm.shutdown() | |
- | |
- dhtm.lk.Lock() | |
- defer dhtm.lk.Unlock() | |
- | |
- // Clear any pending check for timeouts | |
- if dhtm.checkForTimeoutsTimer != nil { | |
- dhtm.checkForTimeoutsTimer.Stop() | |
- } | |
-} | |
- | |
-// Start the dontHaveTimeoutMgr. This method is idempotent | |
-func (dhtm *dontHaveTimeoutMgr) Start() { | |
- dhtm.lk.Lock() | |
- defer dhtm.lk.Unlock() | |
- | |
- // Make sure the dont have timeout manager hasn't already been started | |
- if dhtm.started { | |
- return | |
- } | |
- dhtm.started = true | |
- | |
- // If we already have a measure of latency to the peer, use it to | |
- // calculate a reasonable timeout | |
- latency := dhtm.peerConn.Latency() | |
- if latency.Nanoseconds() > 0 { | |
- dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) | |
- return | |
- } | |
- | |
- // Otherwise measure latency by pinging the peer | |
- go dhtm.measurePingLatency() | |
-} | |
- | |
-// UpdateMessageLatency is called when we receive a response from the peer. | |
-// It is the time between sending a request and receiving the corresponding | |
-// response. | |
-func (dhtm *dontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { | |
- dhtm.lk.Lock() | |
- defer dhtm.lk.Unlock() | |
- | |
- // Update the message latency and the timeout | |
- dhtm.messageLatency.update(elapsed) | |
- oldTimeout := dhtm.timeout | |
- dhtm.timeout = dhtm.calculateTimeoutFromMessageLatency() | |
- | |
- // If the timeout has decreased | |
- if dhtm.timeout < oldTimeout { | |
- // Check if after changing the timeout there are any pending wants that | |
- // are now over the timeout | |
- dhtm.checkForTimeouts() | |
- } | |
-} | |
- | |
-// measurePingLatency measures the latency to the peer by pinging it | |
-func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { | |
- // Wait up to defaultTimeout for a response to the ping | |
- ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) | |
- defer cancel() | |
- | |
- // Ping the peer | |
- res := dhtm.peerConn.Ping(ctx) | |
- if res.Error != nil { | |
- // If there was an error, we'll just leave the timeout as | |
- // defaultTimeout | |
- return | |
- } | |
- | |
- // Get the average latency to the peer | |
- latency := dhtm.peerConn.Latency() | |
- | |
- dhtm.lk.Lock() | |
- defer dhtm.lk.Unlock() | |
- | |
- // A message has arrived so we already set the timeout based on message latency | |
- if dhtm.messageLatency.samples > 0 { | |
- return | |
- } | |
- | |
- // Calculate a reasonable timeout based on latency | |
- dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) | |
- | |
- // Check if after changing the timeout there are any pending wants that are | |
- // now over the timeout | |
- dhtm.checkForTimeouts() | |
-} | |
- | |
-// checkForTimeouts checks pending wants to see if any are over the timeout. | |
-// Note: this function should only be called within the lock. | |
-func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { | |
- | |
- if len(dhtm.wantQueue) == 0 { | |
- return | |
- } | |
- | |
- // Figure out which of the blocks that were wanted were not received | |
- // within the timeout | |
- expired := make([]cid.Cid, 0, len(dhtm.activeWants)) | |
- for len(dhtm.wantQueue) > 0 { | |
- pw := dhtm.wantQueue[0] | |
- | |
- // If the want is still active | |
- if pw.active { | |
- // The queue is in order from earliest to latest, so if we | |
- // didn't find an expired entry we can stop iterating | |
- if dhtm.clock.Since(pw.sent) < dhtm.timeout { | |
- break | |
- } | |
- | |
- // Add the want to the expired list | |
- expired = append(expired, pw.c) | |
- // Remove the want from the activeWants map | |
- delete(dhtm.activeWants, pw.c) | |
- } | |
- | |
- // Remove expired or cancelled wants from the want queue | |
- dhtm.wantQueue = dhtm.wantQueue[1:] | |
- } | |
- | |
- // Fire the timeout event for the expired wants | |
- if len(expired) > 0 { | |
- go dhtm.fireTimeout(expired) | |
- } | |
- | |
- if len(dhtm.wantQueue) == 0 { | |
- return | |
- } | |
- | |
- // Make sure the timeout manager is still running | |
- if dhtm.ctx.Err() != nil { | |
- return | |
- } | |
- | |
- // Schedule the next check for the moment when the oldest pending want will | |
- // timeout | |
- oldestStart := dhtm.wantQueue[0].sent | |
- until := oldestStart.Add(dhtm.timeout).Sub(dhtm.clock.Now()) | |
- if dhtm.checkForTimeoutsTimer == nil { | |
- dhtm.checkForTimeoutsTimer = dhtm.clock.Timer(until) | |
- go dhtm.consumeTimeouts() | |
- } else { | |
- dhtm.checkForTimeoutsTimer.Stop() | |
- dhtm.checkForTimeoutsTimer.Reset(until) | |
- } | |
-} | |
- | |
-func (dhtm *dontHaveTimeoutMgr) consumeTimeouts() { | |
- for { | |
- select { | |
- case <-dhtm.ctx.Done(): | |
- return | |
- case <-dhtm.checkForTimeoutsTimer.C: | |
- dhtm.lk.Lock() | |
- dhtm.checkForTimeouts() | |
- dhtm.lk.Unlock() | |
- } | |
- } | |
-} | |
- | |
-// AddPending adds the given keys that will expire if not cancelled before | |
-// the timeout | |
-func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { | |
- if len(ks) == 0 { | |
- return | |
- } | |
- | |
- start := dhtm.clock.Now() | |
- | |
- dhtm.lk.Lock() | |
- defer dhtm.lk.Unlock() | |
- | |
- queueWasEmpty := len(dhtm.activeWants) == 0 | |
- | |
- // Record the start time for each key | |
- for _, c := range ks { | |
- if _, ok := dhtm.activeWants[c]; !ok { | |
- pw := pendingWant{ | |
- c: c, | |
- sent: start, | |
- active: true, | |
- } | |
- dhtm.activeWants[c] = &pw | |
- dhtm.wantQueue = append(dhtm.wantQueue, &pw) | |
- } | |
- } | |
- | |
- // If there was already an earlier pending item in the queue, then there | |
- // must already be a timeout check scheduled. If there is nothing in the | |
- // queue then we should make sure to schedule a check. | |
- if queueWasEmpty { | |
- dhtm.checkForTimeouts() | |
- } | |
-} | |
- | |
-// CancelPending is called when we receive a response for a key | |
-func (dhtm *dontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { | |
- dhtm.lk.Lock() | |
- defer dhtm.lk.Unlock() | |
- | |
- // Mark the wants as cancelled | |
- for _, c := range ks { | |
- if pw, ok := dhtm.activeWants[c]; ok { | |
- pw.active = false | |
- delete(dhtm.activeWants, c) | |
- } | |
- } | |
-} | |
- | |
-// fireTimeout fires the onDontHaveTimeout method with the timed out keys | |
-func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { | |
- // Make sure the timeout manager has not been shut down | |
- if dhtm.ctx.Err() != nil { | |
- return | |
- } | |
- | |
- // Fire the timeout | |
- dhtm.onDontHaveTimeout(pending) | |
- | |
- // signal a timeout fired | |
- if dhtm.timeoutsTriggered != nil { | |
- dhtm.timeoutsTriggered <- struct{}{} | |
- } | |
-} | |
- | |
-// calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency | |
-func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromPingLatency(latency time.Duration) time.Duration { | |
- // The maximum expected time for a response is | |
- // the expected time to process the want + (latency * multiplier) | |
- // The multiplier is to provide some padding for variable latency. | |
- timeout := dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.pingLatencyMultiplier)*latency | |
- if timeout > dhtm.maxTimeout { | |
- timeout = dhtm.maxTimeout | |
- } | |
- return timeout | |
-} | |
- | |
-// calculateTimeoutFromMessageLatency calculates a timeout derived from message latency | |
-func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromMessageLatency() time.Duration { | |
- timeout := dhtm.messageLatency.latency * time.Duration(dhtm.messageLatencyMultiplier) | |
- if timeout > dhtm.maxTimeout { | |
- timeout = dhtm.maxTimeout | |
- } | |
- return timeout | |
-} | |
- | |
-// latencyEwma is an EWMA of message latency | |
-type latencyEwma struct { | |
- alpha float64 | |
- samples uint64 | |
- latency time.Duration | |
-} | |
- | |
-// update the EWMA with the given sample | |
-func (le *latencyEwma) update(elapsed time.Duration) { | |
- le.samples++ | |
- | |
- // Initially set alpha to be 1.0 / <the number of samples> | |
- alpha := 1.0 / float64(le.samples) | |
- if alpha < le.alpha { | |
- // Once we have enough samples, clamp alpha | |
- alpha = le.alpha | |
- } | |
- le.latency = time.Duration(float64(elapsed)*alpha + (1-alpha)*float64(le.latency)) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/messagequeue.go a/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/messagequeue.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/messagequeue.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/messagequeue/messagequeue.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,843 +0,0 @@ | |
-package messagequeue | |
- | |
-import ( | |
- "context" | |
- "math" | |
- "sync" | |
- "time" | |
- | |
- "github.com/benbjohnson/clock" | |
- bswl "github.com/ipfs/go-bitswap/client/wantlist" | |
- bsmsg "github.com/ipfs/go-bitswap/message" | |
- pb "github.com/ipfs/go-bitswap/message/pb" | |
- bsnet "github.com/ipfs/go-bitswap/network" | |
- cid "github.com/ipfs/go-cid" | |
- logging "github.com/ipfs/go-log" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
- "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
- "go.uber.org/zap" | |
-) | |
- | |
-var log = logging.Logger("bitswap") | |
-var sflog = log.Desugar() | |
- | |
-const ( | |
- defaultRebroadcastInterval = 30 * time.Second | |
- // maxRetries is the number of times to attempt to send a message before | |
- // giving up | |
- maxRetries = 3 | |
- sendTimeout = 30 * time.Second | |
- // maxMessageSize is the maximum message size in bytes | |
- maxMessageSize = 1024 * 1024 * 2 | |
- // sendErrorBackoff is the time to wait before retrying to connect after | |
- // an error when trying to send a message | |
- sendErrorBackoff = 100 * time.Millisecond | |
- // maxPriority is the max priority as defined by the bitswap protocol | |
- maxPriority = math.MaxInt32 | |
- // sendMessageDebounce is the debounce duration when calling sendMessage() | |
- sendMessageDebounce = time.Millisecond | |
- // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. | |
- sendMessageCutoff = 256 | |
- // when we debounce for more than sendMessageMaxDelay, we'll send the | |
- // message immediately. | |
- sendMessageMaxDelay = 20 * time.Millisecond | |
- // The maximum amount of time in which to accept a response as being valid | |
- // for latency calculation (as opposed to discarding it as an outlier) | |
- maxValidLatency = 30 * time.Second | |
-) | |
- | |
-// MessageNetwork is any network that can connect peers and generate a message | |
-// sender. | |
-type MessageNetwork interface { | |
- ConnectTo(context.Context, peer.ID) error | |
- NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) | |
- Latency(peer.ID) time.Duration | |
- Ping(context.Context, peer.ID) ping.Result | |
- Self() peer.ID | |
-} | |
- | |
-// MessageQueue implements queue of want messages to send to peers. | |
-type MessageQueue struct { | |
- ctx context.Context | |
- shutdown func() | |
- p peer.ID | |
- network MessageNetwork | |
- dhTimeoutMgr DontHaveTimeoutManager | |
- | |
- // The maximum size of a message in bytes. Any overflow is put into the | |
- // next message | |
- maxMessageSize int | |
- | |
- // The amount of time to wait when there's an error sending to a peer | |
- // before retrying | |
- sendErrorBackoff time.Duration | |
- | |
- // The maximum amount of time in which to accept a response as being valid | |
- // for latency calculation | |
- maxValidLatency time.Duration | |
- | |
- // Signals that there are outgoing wants / cancels ready to be processed | |
- outgoingWork chan time.Time | |
- | |
- // Channel of CIDs of blocks / HAVEs / DONT_HAVEs received from the peer | |
- responses chan []cid.Cid | |
- | |
- // Take lock whenever any of these variables are modified | |
- wllock sync.Mutex | |
- bcstWants recallWantlist | |
- peerWants recallWantlist | |
- cancels *cid.Set | |
- priority int32 | |
- | |
- // Dont touch any of these variables outside of run loop | |
- sender bsnet.MessageSender | |
- rebroadcastIntervalLk sync.RWMutex | |
- rebroadcastInterval time.Duration | |
- rebroadcastTimer *clock.Timer | |
- // For performance reasons we just clear out the fields of the message | |
- // instead of creating a new one every time. | |
- msg bsmsg.BitSwapMessage | |
- | |
- // For simulating time -- uses mock in test | |
- clock clock.Clock | |
- | |
- // Used to track things that happen asynchronously -- used only in test | |
- events chan messageEvent | |
-} | |
- | |
-// recallWantlist keeps a list of pending wants and a list of sent wants | |
-type recallWantlist struct { | |
- // The list of wants that have not yet been sent | |
- pending *bswl.Wantlist | |
- // The list of wants that have been sent | |
- sent *bswl.Wantlist | |
- // The time at which each want was sent | |
- sentAt map[cid.Cid]time.Time | |
-} | |
- | |
-func newRecallWantList() recallWantlist { | |
- return recallWantlist{ | |
- pending: bswl.New(), | |
- sent: bswl.New(), | |
- sentAt: make(map[cid.Cid]time.Time), | |
- } | |
-} | |
- | |
-// Add want to the pending list | |
-func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { | |
- r.pending.Add(c, priority, wtype) | |
-} | |
- | |
-// Remove wants from both the pending list and the list of sent wants | |
-func (r *recallWantlist) Remove(c cid.Cid) { | |
- r.pending.Remove(c) | |
- r.sent.Remove(c) | |
- delete(r.sentAt, c) | |
-} | |
- | |
-// Remove wants by type from both the pending list and the list of sent wants | |
-func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { | |
- r.pending.RemoveType(c, wtype) | |
- r.sent.RemoveType(c, wtype) | |
- if _, ok := r.sent.Contains(c); !ok { | |
- delete(r.sentAt, c) | |
- } | |
-} | |
- | |
-// MarkSent moves the want from the pending to the sent list | |
-// | |
-// Returns true if the want was marked as sent. Returns false if the want wasn't | |
-// pending. | |
-func (r *recallWantlist) MarkSent(e bswl.Entry) bool { | |
- if !r.pending.RemoveType(e.Cid, e.WantType) { | |
- return false | |
- } | |
- r.sent.Add(e.Cid, e.Priority, e.WantType) | |
- return true | |
-} | |
- | |
-// SentAt records the time at which a want was sent | |
-func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { | |
- // The want may have been cancelled in the interim | |
- if _, ok := r.sent.Contains(c); ok { | |
- if _, ok := r.sentAt[c]; !ok { | |
- r.sentAt[c] = at | |
- } | |
- } | |
-} | |
- | |
-// ClearSentAt clears out the record of the time a want was sent. | |
-// We clear the sent at time when we receive a response for a key as we | |
-// only need the first response for latency measurement. | |
-func (r *recallWantlist) ClearSentAt(c cid.Cid) { | |
- delete(r.sentAt, c) | |
-} | |
- | |
-type peerConn struct { | |
- p peer.ID | |
- network MessageNetwork | |
-} | |
- | |
-func newPeerConnection(p peer.ID, network MessageNetwork) *peerConn { | |
- return &peerConn{p, network} | |
-} | |
- | |
-func (pc *peerConn) Ping(ctx context.Context) ping.Result { | |
- return pc.network.Ping(ctx, pc.p) | |
-} | |
- | |
-func (pc *peerConn) Latency() time.Duration { | |
- return pc.network.Latency(pc.p) | |
-} | |
- | |
-// Fires when a timeout occurs waiting for a response from a peer running an | |
-// older version of Bitswap that doesn't support DONT_HAVE messages. | |
-type OnDontHaveTimeout func(peer.ID, []cid.Cid) | |
- | |
-// DontHaveTimeoutManager pings a peer to estimate latency so it can set a reasonable | |
-// upper bound on when to consider a DONT_HAVE request as timed out (when connected to | |
-// a peer that doesn't support DONT_HAVE messages) | |
-type DontHaveTimeoutManager interface { | |
- // Start the manager (idempotent) | |
- Start() | |
- // Shutdown the manager (Shutdown is final, manager cannot be restarted) | |
- Shutdown() | |
- // AddPending adds the wants as pending a response. If the are not | |
- // cancelled before the timeout, the OnDontHaveTimeout method will be called. | |
- AddPending([]cid.Cid) | |
- // CancelPending removes the wants | |
- CancelPending([]cid.Cid) | |
- // UpdateMessageLatency informs the manager of a new latency measurement | |
- UpdateMessageLatency(time.Duration) | |
-} | |
- | |
-// New creates a new MessageQueue. | |
-func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { | |
- onTimeout := func(ks []cid.Cid) { | |
- log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) | |
- onDontHaveTimeout(p, ks) | |
- } | |
- clock := clock.New() | |
- dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout, clock) | |
- return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr, clock, nil) | |
-} | |
- | |
-type messageEvent int | |
- | |
-const ( | |
- messageQueued messageEvent = iota | |
- messageFinishedSending | |
- latenciesRecorded | |
-) | |
- | |
-// This constructor is used by the tests | |
-func newMessageQueue( | |
- ctx context.Context, | |
- p peer.ID, | |
- network MessageNetwork, | |
- maxMsgSize int, | |
- sendErrorBackoff time.Duration, | |
- maxValidLatency time.Duration, | |
- dhTimeoutMgr DontHaveTimeoutManager, | |
- clock clock.Clock, | |
- events chan messageEvent) *MessageQueue { | |
- | |
- ctx, cancel := context.WithCancel(ctx) | |
- return &MessageQueue{ | |
- ctx: ctx, | |
- shutdown: cancel, | |
- p: p, | |
- network: network, | |
- dhTimeoutMgr: dhTimeoutMgr, | |
- maxMessageSize: maxMsgSize, | |
- bcstWants: newRecallWantList(), | |
- peerWants: newRecallWantList(), | |
- cancels: cid.NewSet(), | |
- outgoingWork: make(chan time.Time, 1), | |
- responses: make(chan []cid.Cid, 8), | |
- rebroadcastInterval: defaultRebroadcastInterval, | |
- sendErrorBackoff: sendErrorBackoff, | |
- maxValidLatency: maxValidLatency, | |
- priority: maxPriority, | |
- // For performance reasons we just clear out the fields of the message | |
- // after using it, instead of creating a new one every time. | |
- msg: bsmsg.New(false), | |
- clock: clock, | |
- events: events, | |
- } | |
-} | |
- | |
-// Add want-haves that are part of a broadcast to all connected peers | |
-func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { | |
- if len(wantHaves) == 0 { | |
- return | |
- } | |
- | |
- mq.wllock.Lock() | |
- defer mq.wllock.Unlock() | |
- | |
- for _, c := range wantHaves { | |
- mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) | |
- mq.priority-- | |
- | |
- // We're adding a want-have for the cid, so clear any pending cancel | |
- // for the cid | |
- mq.cancels.Remove(c) | |
- } | |
- | |
- // Schedule a message send | |
- mq.signalWorkReady() | |
-} | |
- | |
-// Add want-haves and want-blocks for the peer for this message queue. | |
-func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
- if len(wantBlocks) == 0 && len(wantHaves) == 0 { | |
- return | |
- } | |
- | |
- mq.wllock.Lock() | |
- defer mq.wllock.Unlock() | |
- | |
- for _, c := range wantHaves { | |
- mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) | |
- mq.priority-- | |
- | |
- // We're adding a want-have for the cid, so clear any pending cancel | |
- // for the cid | |
- mq.cancels.Remove(c) | |
- } | |
- for _, c := range wantBlocks { | |
- mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) | |
- mq.priority-- | |
- | |
- // We're adding a want-block for the cid, so clear any pending cancel | |
- // for the cid | |
- mq.cancels.Remove(c) | |
- } | |
- | |
- // Schedule a message send | |
- mq.signalWorkReady() | |
-} | |
- | |
-// Add cancel messages for the given keys. | |
-func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { | |
- if len(cancelKs) == 0 { | |
- return | |
- } | |
- | |
- // Cancel any outstanding DONT_HAVE timers | |
- mq.dhTimeoutMgr.CancelPending(cancelKs) | |
- | |
- mq.wllock.Lock() | |
- | |
- workReady := false | |
- | |
- // Remove keys from broadcast and peer wants, and add to cancels | |
- for _, c := range cancelKs { | |
- // Check if a want for the key was sent | |
- _, wasSentBcst := mq.bcstWants.sent.Contains(c) | |
- _, wasSentPeer := mq.peerWants.sent.Contains(c) | |
- | |
- // Remove the want from tracking wantlists | |
- mq.bcstWants.Remove(c) | |
- mq.peerWants.Remove(c) | |
- | |
- // Only send a cancel if a want was sent | |
- if wasSentBcst || wasSentPeer { | |
- mq.cancels.Add(c) | |
- workReady = true | |
- } | |
- } | |
- | |
- mq.wllock.Unlock() | |
- | |
- // Unlock first to be nice to the scheduler. | |
- | |
- // Schedule a message send | |
- if workReady { | |
- mq.signalWorkReady() | |
- } | |
-} | |
- | |
-// ResponseReceived is called when a message is received from the network. | |
-// ks is the set of blocks, HAVEs and DONT_HAVEs in the message | |
-// Note that this is just used to calculate latency. | |
-func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { | |
- if len(ks) == 0 { | |
- return | |
- } | |
- | |
- // These messages are just used to approximate latency, so if we get so | |
- // many responses that they get backed up, just ignore the overflow. | |
- select { | |
- case mq.responses <- ks: | |
- default: | |
- } | |
-} | |
- | |
-// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist | |
-func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { | |
- mq.rebroadcastIntervalLk.Lock() | |
- mq.rebroadcastInterval = delay | |
- if mq.rebroadcastTimer != nil { | |
- mq.rebroadcastTimer.Reset(delay) | |
- } | |
- mq.rebroadcastIntervalLk.Unlock() | |
-} | |
- | |
-// Startup starts the processing of messages and rebroadcasting. | |
-func (mq *MessageQueue) Startup() { | |
- mq.rebroadcastIntervalLk.RLock() | |
- mq.rebroadcastTimer = mq.clock.Timer(mq.rebroadcastInterval) | |
- mq.rebroadcastIntervalLk.RUnlock() | |
- go mq.runQueue() | |
-} | |
- | |
-// Shutdown stops the processing of messages for a message queue. | |
-func (mq *MessageQueue) Shutdown() { | |
- mq.shutdown() | |
-} | |
- | |
-func (mq *MessageQueue) onShutdown() { | |
- // Shut down the DONT_HAVE timeout manager | |
- mq.dhTimeoutMgr.Shutdown() | |
- | |
- // Reset the streamMessageSender | |
- if mq.sender != nil { | |
- _ = mq.sender.Reset() | |
- } | |
-} | |
- | |
-func (mq *MessageQueue) runQueue() { | |
- defer mq.onShutdown() | |
- | |
- // Create a timer for debouncing scheduled work. | |
- scheduleWork := mq.clock.Timer(0) | |
- if !scheduleWork.Stop() { | |
- // Need to drain the timer if Stop() returns false | |
- // See: https://golang.org/pkg/time/#Timer.Stop | |
- <-scheduleWork.C | |
- } | |
- | |
- var workScheduled time.Time | |
- for mq.ctx.Err() == nil { | |
- select { | |
- case <-mq.rebroadcastTimer.C: | |
- mq.rebroadcastWantlist() | |
- | |
- case when := <-mq.outgoingWork: | |
- // If we have work scheduled, cancel the timer. If we | |
- // don't, record when the work was scheduled. | |
- // We send the time on the channel so we accurately | |
- // track delay. | |
- if workScheduled.IsZero() { | |
- workScheduled = when | |
- } else if !scheduleWork.Stop() { | |
- // Need to drain the timer if Stop() returns false | |
- <-scheduleWork.C | |
- } | |
- | |
- // If we have too many updates and/or we've waited too | |
- // long, send immediately. | |
- if mq.pendingWorkCount() > sendMessageCutoff || | |
- mq.clock.Since(workScheduled) >= sendMessageMaxDelay { | |
- mq.sendIfReady() | |
- workScheduled = time.Time{} | |
- } else { | |
- // Otherwise, extend the timer. | |
- scheduleWork.Reset(sendMessageDebounce) | |
- if mq.events != nil { | |
- mq.events <- messageQueued | |
- } | |
- } | |
- | |
- case <-scheduleWork.C: | |
- // We have work scheduled and haven't seen any updates | |
- // in sendMessageDebounce. Send immediately. | |
- workScheduled = time.Time{} | |
- mq.sendIfReady() | |
- | |
- case res := <-mq.responses: | |
- // We received a response from the peer, calculate latency | |
- mq.handleResponse(res) | |
- | |
- case <-mq.ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-// Periodically resend the list of wants to the peer | |
-func (mq *MessageQueue) rebroadcastWantlist() { | |
- mq.rebroadcastIntervalLk.RLock() | |
- mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) | |
- mq.rebroadcastIntervalLk.RUnlock() | |
- | |
- // If some wants were transferred from the rebroadcast list | |
- if mq.transferRebroadcastWants() { | |
- // Send them out | |
- mq.sendMessage() | |
- } | |
-} | |
- | |
-// Transfer wants from the rebroadcast lists into the pending lists. | |
-func (mq *MessageQueue) transferRebroadcastWants() bool { | |
- mq.wllock.Lock() | |
- defer mq.wllock.Unlock() | |
- | |
- // Check if there are any wants to rebroadcast | |
- if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { | |
- return false | |
- } | |
- | |
- // Copy sent wants into pending wants lists | |
- mq.bcstWants.pending.Absorb(mq.bcstWants.sent) | |
- mq.peerWants.pending.Absorb(mq.peerWants.sent) | |
- | |
- return true | |
-} | |
- | |
-func (mq *MessageQueue) signalWorkReady() { | |
- select { | |
- case mq.outgoingWork <- mq.clock.Now(): | |
- default: | |
- } | |
-} | |
- | |
-func (mq *MessageQueue) sendIfReady() { | |
- if mq.hasPendingWork() { | |
- mq.sendMessage() | |
- } | |
-} | |
- | |
-func (mq *MessageQueue) sendMessage() { | |
- sender, err := mq.initializeSender() | |
- if err != nil { | |
- // If we fail to initialize the sender, the networking layer will | |
- // emit a Disconnect event and the MessageQueue will get cleaned up | |
- log.Infof("Could not open message sender to peer %s: %s", mq.p, err) | |
- mq.Shutdown() | |
- return | |
- } | |
- | |
- // Make sure the DONT_HAVE timeout manager has started | |
- // Note: Start is idempotent | |
- mq.dhTimeoutMgr.Start() | |
- | |
- // Convert want lists to a Bitswap Message | |
- message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) | |
- | |
- // After processing the message, clear out its fields to save memory | |
- defer mq.msg.Reset(false) | |
- | |
- if message.Empty() { | |
- return | |
- } | |
- | |
- wantlist := message.Wantlist() | |
- mq.logOutgoingMessage(wantlist) | |
- | |
- if err := sender.SendMsg(mq.ctx, message); err != nil { | |
- // If the message couldn't be sent, the networking layer will | |
- // emit a Disconnect event and the MessageQueue will get cleaned up | |
- log.Infof("Could not send message to peer %s: %s", mq.p, err) | |
- mq.Shutdown() | |
- return | |
- } | |
- | |
- // Record sent time so as to calculate message latency | |
- onSent() | |
- | |
- // Set a timer to wait for responses | |
- mq.simulateDontHaveWithTimeout(wantlist) | |
- | |
- // If the message was too big and only a subset of wants could be | |
- // sent, schedule sending the rest of the wants in the next | |
- // iteration of the event loop. | |
- if mq.hasPendingWork() { | |
- mq.signalWorkReady() | |
- } | |
-} | |
- | |
-// If want-block times out, simulate a DONT_HAVE reponse. | |
-// This is necessary when making requests to peers running an older version of | |
-// Bitswap that doesn't support the DONT_HAVE response, and is also useful to | |
-// mitigate getting blocked by a peer that takes a long time to respond. | |
-func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { | |
- // Get the CID of each want-block that expects a DONT_HAVE response | |
- wants := make([]cid.Cid, 0, len(wantlist)) | |
- | |
- mq.wllock.Lock() | |
- | |
- for _, entry := range wantlist { | |
- if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { | |
- // Unlikely, but just in case check that the block hasn't been | |
- // received in the interim | |
- c := entry.Cid | |
- if _, ok := mq.peerWants.sent.Contains(c); ok { | |
- wants = append(wants, c) | |
- } | |
- } | |
- } | |
- | |
- mq.wllock.Unlock() | |
- | |
- // Add wants to DONT_HAVE timeout manager | |
- mq.dhTimeoutMgr.AddPending(wants) | |
-} | |
- | |
-// handleResponse is called when a response is received from the peer, | |
-// with the CIDs of received blocks / HAVEs / DONT_HAVEs | |
-func (mq *MessageQueue) handleResponse(ks []cid.Cid) { | |
- now := mq.clock.Now() | |
- earliest := time.Time{} | |
- | |
- mq.wllock.Lock() | |
- | |
- // Check if the keys in the response correspond to any request that was | |
- // sent to the peer. | |
- // | |
- // - Find the earliest request so as to calculate the longest latency as | |
- // we want to be conservative when setting the timeout | |
- // - Ignore latencies that are very long, as these are likely to be outliers | |
- // caused when | |
- // - we send a want to peer A | |
- // - peer A does not have the block | |
- // - peer A later receives the block from peer B | |
- // - peer A sends us HAVE / block | |
- for _, c := range ks { | |
- if at, ok := mq.bcstWants.sentAt[c]; ok { | |
- if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { | |
- earliest = at | |
- } | |
- mq.bcstWants.ClearSentAt(c) | |
- } | |
- if at, ok := mq.peerWants.sentAt[c]; ok { | |
- if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { | |
- earliest = at | |
- } | |
- // Clear out the sent time for the CID because we only want to | |
- // record the latency between the request and the first response | |
- // for that CID (not subsequent responses) | |
- mq.peerWants.ClearSentAt(c) | |
- } | |
- } | |
- | |
- mq.wllock.Unlock() | |
- | |
- if !earliest.IsZero() { | |
- // Inform the timeout manager of the calculated latency | |
- mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) | |
- } | |
- if mq.events != nil { | |
- mq.events <- latenciesRecorded | |
- } | |
-} | |
- | |
-func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { | |
- // Save some CPU cycles and allocations if log level is higher than debug | |
- if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { | |
- return | |
- } | |
- | |
- self := mq.network.Self() | |
- for _, e := range wantlist { | |
- if e.Cancel { | |
- if e.WantType == pb.Message_Wantlist_Have { | |
- log.Debugw("sent message", | |
- "type", "CANCEL_WANT_HAVE", | |
- "cid", e.Cid, | |
- "local", self, | |
- "to", mq.p, | |
- ) | |
- } else { | |
- log.Debugw("sent message", | |
- "type", "CANCEL_WANT_BLOCK", | |
- "cid", e.Cid, | |
- "local", self, | |
- "to", mq.p, | |
- ) | |
- } | |
- } else { | |
- if e.WantType == pb.Message_Wantlist_Have { | |
- log.Debugw("sent message", | |
- "type", "WANT_HAVE", | |
- "cid", e.Cid, | |
- "local", self, | |
- "to", mq.p, | |
- ) | |
- } else { | |
- log.Debugw("sent message", | |
- "type", "WANT_BLOCK", | |
- "cid", e.Cid, | |
- "local", self, | |
- "to", mq.p, | |
- ) | |
- } | |
- } | |
- } | |
-} | |
- | |
-// Whether there is work to be processed | |
-func (mq *MessageQueue) hasPendingWork() bool { | |
- return mq.pendingWorkCount() > 0 | |
-} | |
- | |
-// The amount of work that is waiting to be processed | |
-func (mq *MessageQueue) pendingWorkCount() int { | |
- mq.wllock.Lock() | |
- defer mq.wllock.Unlock() | |
- | |
- return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() | |
-} | |
- | |
-// Convert the lists of wants into a Bitswap message | |
-func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { | |
- // Get broadcast and regular wantlist entries. | |
- mq.wllock.Lock() | |
- peerEntries := mq.peerWants.pending.Entries() | |
- bcstEntries := mq.bcstWants.pending.Entries() | |
- cancels := mq.cancels.Keys() | |
- if !supportsHave { | |
- filteredPeerEntries := peerEntries[:0] | |
- // If the remote peer doesn't support HAVE / DONT_HAVE messages, | |
- // don't send want-haves (only send want-blocks) | |
- // | |
- // Doing this here under the lock makes everything else in this | |
- // function simpler. | |
- // | |
- // TODO: We should _try_ to avoid recording these in the first | |
- // place if possible. | |
- for _, e := range peerEntries { | |
- if e.WantType == pb.Message_Wantlist_Have { | |
- mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) | |
- } else { | |
- filteredPeerEntries = append(filteredPeerEntries, e) | |
- } | |
- } | |
- peerEntries = filteredPeerEntries | |
- } | |
- mq.wllock.Unlock() | |
- | |
- // We prioritize cancels, then regular wants, then broadcast wants. | |
- | |
- var ( | |
- msgSize = 0 // size of message so far | |
- sentCancels = 0 // number of cancels in message | |
- sentPeerEntries = 0 // number of peer entries in message | |
- sentBcstEntries = 0 // number of broadcast entries in message | |
- ) | |
- | |
- // Add each cancel to the message | |
- for _, c := range cancels { | |
- msgSize += mq.msg.Cancel(c) | |
- sentCancels++ | |
- | |
- if msgSize >= mq.maxMessageSize { | |
- goto FINISH | |
- } | |
- } | |
- | |
- // Next, add the wants. If we have too many entries to fit into a single | |
- // message, sort by priority and include the high priority ones first. | |
- | |
- for _, e := range peerEntries { | |
- msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) | |
- sentPeerEntries++ | |
- | |
- if msgSize >= mq.maxMessageSize { | |
- goto FINISH | |
- } | |
- } | |
- | |
- // Add each broadcast want-have to the message | |
- for _, e := range bcstEntries { | |
- // Broadcast wants are sent as want-have | |
- wantType := pb.Message_Wantlist_Have | |
- | |
- // If the remote peer doesn't support HAVE / DONT_HAVE messages, | |
- // send a want-block instead | |
- if !supportsHave { | |
- wantType = pb.Message_Wantlist_Block | |
- } | |
- | |
- msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) | |
- sentBcstEntries++ | |
- | |
- if msgSize >= mq.maxMessageSize { | |
- goto FINISH | |
- } | |
- } | |
- | |
-FINISH: | |
- | |
- // Finally, re-take the lock, mark sent and remove any entries from our | |
- // message that we've decided to cancel at the last minute. | |
- mq.wllock.Lock() | |
- for i, e := range peerEntries[:sentPeerEntries] { | |
- if !mq.peerWants.MarkSent(e) { | |
- // It changed. | |
- mq.msg.Remove(e.Cid) | |
- peerEntries[i].Cid = cid.Undef | |
- } | |
- } | |
- | |
- for i, e := range bcstEntries[:sentBcstEntries] { | |
- if !mq.bcstWants.MarkSent(e) { | |
- mq.msg.Remove(e.Cid) | |
- bcstEntries[i].Cid = cid.Undef | |
- } | |
- } | |
- | |
- for _, c := range cancels[:sentCancels] { | |
- if !mq.cancels.Has(c) { | |
- mq.msg.Remove(c) | |
- } else { | |
- mq.cancels.Remove(c) | |
- } | |
- } | |
- mq.wllock.Unlock() | |
- | |
- // When the message has been sent, record the time at which each want was | |
- // sent so we can calculate message latency | |
- onSent := func() { | |
- now := mq.clock.Now() | |
- | |
- mq.wllock.Lock() | |
- defer mq.wllock.Unlock() | |
- | |
- for _, e := range peerEntries[:sentPeerEntries] { | |
- if e.Cid.Defined() { // Check if want was cancelled in the interim | |
- mq.peerWants.SentAt(e.Cid, now) | |
- } | |
- } | |
- | |
- for _, e := range bcstEntries[:sentBcstEntries] { | |
- if e.Cid.Defined() { // Check if want was cancelled in the interim | |
- mq.bcstWants.SentAt(e.Cid, now) | |
- } | |
- } | |
- if mq.events != nil { | |
- mq.events <- messageFinishedSending | |
- } | |
- } | |
- | |
- return mq.msg, onSent | |
-} | |
- | |
-func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { | |
- if mq.sender == nil { | |
- opts := &bsnet.MessageSenderOpts{ | |
- MaxRetries: maxRetries, | |
- SendTimeout: sendTimeout, | |
- SendErrorBackoff: sendErrorBackoff, | |
- } | |
- nsender, err := mq.network.NewMessageSender(mq.ctx, mq.p, opts) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- mq.sender = nsender | |
- } | |
- return mq.sender, nil | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/notifications/notifications.go a/vendor/github.com/ipfs/go-bitswap/client/internal/notifications/notifications.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/notifications/notifications.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/notifications/notifications.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,139 +0,0 @@ | |
-package notifications | |
- | |
-import ( | |
- "context" | |
- "sync" | |
- | |
- pubsub "github.com/cskr/pubsub" | |
- blocks "github.com/ipfs/go-block-format" | |
- cid "github.com/ipfs/go-cid" | |
-) | |
- | |
-const bufferSize = 16 | |
- | |
-// PubSub is a simple interface for publishing blocks and being able to subscribe | |
-// for cids. It's used internally by bitswap to decouple receiving blocks | |
-// and actually providing them back to the GetBlocks caller. | |
-type PubSub interface { | |
- Publish(blocks ...blocks.Block) | |
- Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block | |
- Shutdown() | |
-} | |
- | |
-// New generates a new PubSub interface. | |
-func New() PubSub { | |
- return &impl{ | |
- wrapped: *pubsub.New(bufferSize), | |
- closed: make(chan struct{}), | |
- } | |
-} | |
- | |
-type impl struct { | |
- lk sync.RWMutex | |
- wrapped pubsub.PubSub | |
- | |
- closed chan struct{} | |
-} | |
- | |
-func (ps *impl) Publish(blocks ...blocks.Block) { | |
- ps.lk.RLock() | |
- defer ps.lk.RUnlock() | |
- select { | |
- case <-ps.closed: | |
- return | |
- default: | |
- } | |
- | |
- for _, block := range blocks { | |
- ps.wrapped.Pub(block, block.Cid().KeyString()) | |
- } | |
-} | |
- | |
-func (ps *impl) Shutdown() { | |
- ps.lk.Lock() | |
- defer ps.lk.Unlock() | |
- select { | |
- case <-ps.closed: | |
- return | |
- default: | |
- } | |
- close(ps.closed) | |
- ps.wrapped.Shutdown() | |
-} | |
- | |
-// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| | |
-// is closed if the |ctx| times out or is cancelled, or after receiving the blocks | |
-// corresponding to |keys|. | |
-func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { | |
- | |
- blocksCh := make(chan blocks.Block, len(keys)) | |
- valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking | |
- if len(keys) == 0 { | |
- close(blocksCh) | |
- return blocksCh | |
- } | |
- | |
- // prevent shutdown | |
- ps.lk.RLock() | |
- defer ps.lk.RUnlock() | |
- | |
- select { | |
- case <-ps.closed: | |
- close(blocksCh) | |
- return blocksCh | |
- default: | |
- } | |
- | |
- // AddSubOnceEach listens for each key in the list, and closes the channel | |
- // once all keys have been received | |
- ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) | |
- go func() { | |
- defer func() { | |
- close(blocksCh) | |
- | |
- ps.lk.RLock() | |
- defer ps.lk.RUnlock() | |
- // Don't touch the pubsub instance if we're | |
- // already closed. | |
- select { | |
- case <-ps.closed: | |
- return | |
- default: | |
- } | |
- | |
- ps.wrapped.Unsub(valuesCh) | |
- }() | |
- | |
- for { | |
- select { | |
- case <-ctx.Done(): | |
- return | |
- case <-ps.closed: | |
- case val, ok := <-valuesCh: | |
- if !ok { | |
- return | |
- } | |
- block, ok := val.(blocks.Block) | |
- if !ok { | |
- return | |
- } | |
- select { | |
- case <-ctx.Done(): | |
- return | |
- case blocksCh <- block: // continue | |
- case <-ps.closed: | |
- } | |
- } | |
- } | |
- }() | |
- | |
- return blocksCh | |
-} | |
- | |
-func toStrings(keys []cid.Cid) []string { | |
- strs := make([]string, 0, len(keys)) | |
- for _, key := range keys { | |
- strs = append(strs, key.KeyString()) | |
- } | |
- return strs | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peermanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peermanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peermanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peermanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,246 +0,0 @@ | |
-package peermanager | |
- | |
-import ( | |
- "context" | |
- "sync" | |
- | |
- logging "github.com/ipfs/go-log" | |
- "github.com/ipfs/go-metrics-interface" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-var log = logging.Logger("bs:peermgr") | |
- | |
-// PeerQueue provides a queue of messages to be sent for a single peer. | |
-type PeerQueue interface { | |
- AddBroadcastWantHaves([]cid.Cid) | |
- AddWants([]cid.Cid, []cid.Cid) | |
- AddCancels([]cid.Cid) | |
- ResponseReceived(ks []cid.Cid) | |
- Startup() | |
- Shutdown() | |
-} | |
- | |
-type Session interface { | |
- ID() uint64 | |
- SignalAvailability(peer.ID, bool) | |
-} | |
- | |
-// PeerQueueFactory provides a function that will create a PeerQueue. | |
-type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue | |
- | |
-// PeerManager manages a pool of peers and sends messages to peers in the pool. | |
-type PeerManager struct { | |
- // sync access to peerQueues and peerWantManager | |
- pqLk sync.RWMutex | |
- // peerQueues -- interact through internal utility functions get/set/remove/iterate | |
- peerQueues map[peer.ID]PeerQueue | |
- pwm *peerWantManager | |
- | |
- createPeerQueue PeerQueueFactory | |
- ctx context.Context | |
- | |
- psLk sync.RWMutex | |
- sessions map[uint64]Session | |
- peerSessions map[peer.ID]map[uint64]struct{} | |
- | |
- self peer.ID | |
-} | |
- | |
-// New creates a new PeerManager, given a context and a peerQueueFactory. | |
-func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { | |
- wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() | |
- wantBlockGauge := metrics.NewCtx(ctx, "want_blocks_total", "Number of want-blocks in wantlist.").Gauge() | |
- return &PeerManager{ | |
- peerQueues: make(map[peer.ID]PeerQueue), | |
- pwm: newPeerWantManager(wantGauge, wantBlockGauge), | |
- createPeerQueue: createPeerQueue, | |
- ctx: ctx, | |
- self: self, | |
- | |
- sessions: make(map[uint64]Session), | |
- peerSessions: make(map[peer.ID]map[uint64]struct{}), | |
- } | |
-} | |
- | |
-func (pm *PeerManager) AvailablePeers() []peer.ID { | |
- // TODO: Rate-limit peers | |
- return pm.ConnectedPeers() | |
-} | |
- | |
-// ConnectedPeers returns a list of peers this PeerManager is managing. | |
-func (pm *PeerManager) ConnectedPeers() []peer.ID { | |
- pm.pqLk.RLock() | |
- defer pm.pqLk.RUnlock() | |
- | |
- peers := make([]peer.ID, 0, len(pm.peerQueues)) | |
- for p := range pm.peerQueues { | |
- peers = append(peers, p) | |
- } | |
- return peers | |
-} | |
- | |
-// Connected is called to add a new peer to the pool, and send it an initial set | |
-// of wants. | |
-func (pm *PeerManager) Connected(p peer.ID) { | |
- pm.pqLk.Lock() | |
- defer pm.pqLk.Unlock() | |
- | |
- pq := pm.getOrCreate(p) | |
- | |
- // Inform the peer want manager that there's a new peer | |
- pm.pwm.addPeer(pq, p) | |
- | |
- // Inform the sessions that the peer has connected | |
- pm.signalAvailability(p, true) | |
-} | |
- | |
-// Disconnected is called to remove a peer from the pool. | |
-func (pm *PeerManager) Disconnected(p peer.ID) { | |
- pm.pqLk.Lock() | |
- defer pm.pqLk.Unlock() | |
- | |
- pq, ok := pm.peerQueues[p] | |
- | |
- if !ok { | |
- return | |
- } | |
- | |
- // Inform the sessions that the peer has disconnected | |
- pm.signalAvailability(p, false) | |
- | |
- // Clean up the peer | |
- delete(pm.peerQueues, p) | |
- pq.Shutdown() | |
- pm.pwm.removePeer(p) | |
-} | |
- | |
-// ResponseReceived is called when a message is received from the network. | |
-// ks is the set of blocks, HAVEs and DONT_HAVEs in the message | |
-// Note that this is just used to calculate latency. | |
-func (pm *PeerManager) ResponseReceived(p peer.ID, ks []cid.Cid) { | |
- pm.pqLk.Lock() | |
- pq, ok := pm.peerQueues[p] | |
- pm.pqLk.Unlock() | |
- | |
- if ok { | |
- pq.ResponseReceived(ks) | |
- } | |
-} | |
- | |
-// BroadcastWantHaves broadcasts want-haves to all peers (used by the session | |
-// to discover seeds). | |
-// For each peer it filters out want-haves that have previously been sent to | |
-// the peer. | |
-func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { | |
- pm.pqLk.Lock() | |
- defer pm.pqLk.Unlock() | |
- | |
- pm.pwm.broadcastWantHaves(wantHaves) | |
-} | |
- | |
-// SendWants sends the given want-blocks and want-haves to the given peer. | |
-// It filters out wants that have previously been sent to the peer. | |
-func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
- pm.pqLk.Lock() | |
- defer pm.pqLk.Unlock() | |
- | |
- if _, ok := pm.peerQueues[p]; ok { | |
- pm.pwm.sendWants(p, wantBlocks, wantHaves) | |
- } | |
-} | |
- | |
-// SendCancels sends cancels for the given keys to all peers who had previously | |
-// received a want for those keys. | |
-func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { | |
- pm.pqLk.Lock() | |
- defer pm.pqLk.Unlock() | |
- | |
- // Send a CANCEL to each peer that has been sent a want-block or want-have | |
- pm.pwm.sendCancels(cancelKs) | |
-} | |
- | |
-// CurrentWants returns the list of pending wants (both want-haves and want-blocks). | |
-func (pm *PeerManager) CurrentWants() []cid.Cid { | |
- pm.pqLk.RLock() | |
- defer pm.pqLk.RUnlock() | |
- | |
- return pm.pwm.getWants() | |
-} | |
- | |
-// CurrentWantBlocks returns the list of pending want-blocks | |
-func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { | |
- pm.pqLk.RLock() | |
- defer pm.pqLk.RUnlock() | |
- | |
- return pm.pwm.getWantBlocks() | |
-} | |
- | |
-// CurrentWantHaves returns the list of pending want-haves | |
-func (pm *PeerManager) CurrentWantHaves() []cid.Cid { | |
- pm.pqLk.RLock() | |
- defer pm.pqLk.RUnlock() | |
- | |
- return pm.pwm.getWantHaves() | |
-} | |
- | |
-func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { | |
- pq, ok := pm.peerQueues[p] | |
- if !ok { | |
- pq = pm.createPeerQueue(pm.ctx, p) | |
- pq.Startup() | |
- pm.peerQueues[p] = pq | |
- } | |
- return pq | |
-} | |
- | |
-// RegisterSession tells the PeerManager that the given session is interested | |
-// in events about the given peer. | |
-func (pm *PeerManager) RegisterSession(p peer.ID, s Session) { | |
- pm.psLk.Lock() | |
- defer pm.psLk.Unlock() | |
- | |
- if _, ok := pm.sessions[s.ID()]; !ok { | |
- pm.sessions[s.ID()] = s | |
- } | |
- | |
- if _, ok := pm.peerSessions[p]; !ok { | |
- pm.peerSessions[p] = make(map[uint64]struct{}) | |
- } | |
- pm.peerSessions[p][s.ID()] = struct{}{} | |
-} | |
- | |
-// UnregisterSession tells the PeerManager that the given session is no longer | |
-// interested in PeerManager events. | |
-func (pm *PeerManager) UnregisterSession(ses uint64) { | |
- pm.psLk.Lock() | |
- defer pm.psLk.Unlock() | |
- | |
- for p := range pm.peerSessions { | |
- delete(pm.peerSessions[p], ses) | |
- if len(pm.peerSessions[p]) == 0 { | |
- delete(pm.peerSessions, p) | |
- } | |
- } | |
- | |
- delete(pm.sessions, ses) | |
-} | |
- | |
-// signalAvailability is called when a peer's connectivity changes. | |
-// It informs interested sessions. | |
-func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { | |
- pm.psLk.Lock() | |
- defer pm.psLk.Unlock() | |
- | |
- sesIds, ok := pm.peerSessions[p] | |
- if !ok { | |
- return | |
- } | |
- for sesId := range sesIds { | |
- if s, ok := pm.sessions[sesId]; ok { | |
- s.SignalAvailability(p, isConnected) | |
- } | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peerwantmanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peerwantmanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peerwantmanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/peermanager/peerwantmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,464 +0,0 @@ | |
-package peermanager | |
- | |
-import ( | |
- "bytes" | |
- "fmt" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// Gauge can be used to keep track of a metric that increases and decreases | |
-// incrementally. It is used by the peerWantManager to track the number of | |
-// want-blocks that are active (ie sent but no response received) | |
-type Gauge interface { | |
- Inc() | |
- Dec() | |
-} | |
- | |
-// peerWantManager keeps track of which want-haves and want-blocks have been | |
-// sent to each peer, so that the PeerManager doesn't send duplicates. | |
-type peerWantManager struct { | |
- // peerWants maps peers to outstanding wants. | |
- // A peer's wants is the _union_ of the broadcast wants and the wants in | |
- // this list. | |
- peerWants map[peer.ID]*peerWant | |
- | |
- // Reverse index of all wants in peerWants. | |
- wantPeers map[cid.Cid]map[peer.ID]struct{} | |
- | |
- // broadcastWants tracks all the current broadcast wants. | |
- broadcastWants *cid.Set | |
- | |
- // Keeps track of the number of active want-haves & want-blocks | |
- wantGauge Gauge | |
- // Keeps track of the number of active want-blocks | |
- wantBlockGauge Gauge | |
-} | |
- | |
-type peerWant struct { | |
- wantBlocks *cid.Set | |
- wantHaves *cid.Set | |
- peerQueue PeerQueue | |
-} | |
- | |
-// New creates a new peerWantManager with a Gauge that keeps track of the | |
-// number of active want-blocks (ie sent but no response received) | |
-func newPeerWantManager(wantGauge Gauge, wantBlockGauge Gauge) *peerWantManager { | |
- return &peerWantManager{ | |
- broadcastWants: cid.NewSet(), | |
- peerWants: make(map[peer.ID]*peerWant), | |
- wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), | |
- wantGauge: wantGauge, | |
- wantBlockGauge: wantBlockGauge, | |
- } | |
-} | |
- | |
-// addPeer adds a peer whose wants we need to keep track of. It sends the | |
-// current list of broadcast wants to the peer. | |
-func (pwm *peerWantManager) addPeer(peerQueue PeerQueue, p peer.ID) { | |
- if _, ok := pwm.peerWants[p]; ok { | |
- return | |
- } | |
- | |
- pwm.peerWants[p] = &peerWant{ | |
- wantBlocks: cid.NewSet(), | |
- wantHaves: cid.NewSet(), | |
- peerQueue: peerQueue, | |
- } | |
- | |
- // Broadcast any live want-haves to the newly connected peer | |
- if pwm.broadcastWants.Len() > 0 { | |
- wants := pwm.broadcastWants.Keys() | |
- peerQueue.AddBroadcastWantHaves(wants) | |
- } | |
-} | |
- | |
-// RemovePeer removes a peer and its associated wants from tracking | |
-func (pwm *peerWantManager) removePeer(p peer.ID) { | |
- pws, ok := pwm.peerWants[p] | |
- if !ok { | |
- return | |
- } | |
- | |
- // Clean up want-blocks | |
- _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { | |
- // Clean up want-blocks from the reverse index | |
- pwm.reverseIndexRemove(c, p) | |
- | |
- // Decrement the gauges by the number of pending want-blocks to the peer | |
- peerCounts := pwm.wantPeerCounts(c) | |
- if peerCounts.wantBlock == 0 { | |
- pwm.wantBlockGauge.Dec() | |
- } | |
- if !peerCounts.wanted() { | |
- pwm.wantGauge.Dec() | |
- } | |
- | |
- return nil | |
- }) | |
- | |
- // Clean up want-haves | |
- _ = pws.wantHaves.ForEach(func(c cid.Cid) error { | |
- // Clean up want-haves from the reverse index | |
- pwm.reverseIndexRemove(c, p) | |
- | |
- // Decrement the gauge by the number of pending want-haves to the peer | |
- peerCounts := pwm.wantPeerCounts(c) | |
- if !peerCounts.wanted() { | |
- pwm.wantGauge.Dec() | |
- } | |
- return nil | |
- }) | |
- | |
- delete(pwm.peerWants, p) | |
-} | |
- | |
-// broadcastWantHaves sends want-haves to any peers that have not yet been sent them. | |
-func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { | |
- unsent := make([]cid.Cid, 0, len(wantHaves)) | |
- for _, c := range wantHaves { | |
- if pwm.broadcastWants.Has(c) { | |
- // Already a broadcast want, skip it. | |
- continue | |
- } | |
- pwm.broadcastWants.Add(c) | |
- unsent = append(unsent, c) | |
- | |
- // If no peer has a pending want for the key | |
- if _, ok := pwm.wantPeers[c]; !ok { | |
- // Increment the total wants gauge | |
- pwm.wantGauge.Inc() | |
- } | |
- } | |
- | |
- if len(unsent) == 0 { | |
- return | |
- } | |
- | |
- // Allocate a single buffer to filter broadcast wants for each peer | |
- bcstWantsBuffer := make([]cid.Cid, 0, len(unsent)) | |
- | |
- // Send broadcast wants to each peer | |
- for _, pws := range pwm.peerWants { | |
- peerUnsent := bcstWantsBuffer[:0] | |
- for _, c := range unsent { | |
- // If we've already sent a want to this peer, skip them. | |
- if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { | |
- peerUnsent = append(peerUnsent, c) | |
- } | |
- } | |
- | |
- if len(peerUnsent) > 0 { | |
- pws.peerQueue.AddBroadcastWantHaves(peerUnsent) | |
- } | |
- } | |
-} | |
- | |
-// sendWants only sends the peer the want-blocks and want-haves that have not | |
-// already been sent to it. | |
-func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
- fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) | |
- fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) | |
- | |
- // Get the existing want-blocks and want-haves for the peer | |
- pws, ok := pwm.peerWants[p] | |
- if !ok { | |
- // In practice this should never happen | |
- log.Errorf("sendWants() called with peer %s but peer not found in peerWantManager", string(p)) | |
- return | |
- } | |
- | |
- // Iterate over the requested want-blocks | |
- for _, c := range wantBlocks { | |
- // If the want-block hasn't been sent to the peer | |
- if pws.wantBlocks.Has(c) { | |
- continue | |
- } | |
- | |
- // Increment the want gauges | |
- peerCounts := pwm.wantPeerCounts(c) | |
- if peerCounts.wantBlock == 0 { | |
- pwm.wantBlockGauge.Inc() | |
- } | |
- if !peerCounts.wanted() { | |
- pwm.wantGauge.Inc() | |
- } | |
- | |
- // Make sure the CID is no longer recorded as a want-have | |
- pws.wantHaves.Remove(c) | |
- | |
- // Record that the CID was sent as a want-block | |
- pws.wantBlocks.Add(c) | |
- | |
- // Add the CID to the results | |
- fltWantBlks = append(fltWantBlks, c) | |
- | |
- // Update the reverse index | |
- pwm.reverseIndexAdd(c, p) | |
- } | |
- | |
- // Iterate over the requested want-haves | |
- for _, c := range wantHaves { | |
- // If we've already broadcasted this want, don't bother with a | |
- // want-have. | |
- if pwm.broadcastWants.Has(c) { | |
- continue | |
- } | |
- | |
- // If the CID has not been sent as a want-block or want-have | |
- if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { | |
- // Increment the total wants gauge | |
- peerCounts := pwm.wantPeerCounts(c) | |
- if !peerCounts.wanted() { | |
- pwm.wantGauge.Inc() | |
- } | |
- | |
- // Record that the CID was sent as a want-have | |
- pws.wantHaves.Add(c) | |
- | |
- // Add the CID to the results | |
- fltWantHvs = append(fltWantHvs, c) | |
- | |
- // Update the reverse index | |
- pwm.reverseIndexAdd(c, p) | |
- } | |
- } | |
- | |
- // Send the want-blocks and want-haves to the peer | |
- pws.peerQueue.AddWants(fltWantBlks, fltWantHvs) | |
-} | |
- | |
-// sendCancels sends a cancel to each peer to which a corresponding want was | |
-// sent | |
-func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { | |
- if len(cancelKs) == 0 { | |
- return | |
- } | |
- | |
- // Record how many peers have a pending want-block and want-have for each | |
- // key to be cancelled | |
- peerCounts := make(map[cid.Cid]wantPeerCnts, len(cancelKs)) | |
- for _, c := range cancelKs { | |
- peerCounts[c] = pwm.wantPeerCounts(c) | |
- } | |
- | |
- // Create a buffer to use for filtering cancels per peer, with the | |
- // broadcast wants at the front of the buffer (broadcast wants are sent to | |
- // all peers) | |
- broadcastCancels := make([]cid.Cid, 0, len(cancelKs)) | |
- for _, c := range cancelKs { | |
- if pwm.broadcastWants.Has(c) { | |
- broadcastCancels = append(broadcastCancels, c) | |
- } | |
- } | |
- | |
- // Send cancels to a particular peer | |
- send := func(p peer.ID, pws *peerWant) { | |
- // Start from the broadcast cancels | |
- toCancel := broadcastCancels | |
- | |
- // For each key to be cancelled | |
- for _, c := range cancelKs { | |
- // Check if a want was sent for the key | |
- if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { | |
- continue | |
- } | |
- | |
- // Unconditionally remove from the want lists. | |
- pws.wantBlocks.Remove(c) | |
- pws.wantHaves.Remove(c) | |
- | |
- // If it's a broadcast want, we've already added it to | |
- // the peer cancels. | |
- if !pwm.broadcastWants.Has(c) { | |
- toCancel = append(toCancel, c) | |
- } | |
- } | |
- | |
- // Send cancels to the peer | |
- if len(toCancel) > 0 { | |
- pws.peerQueue.AddCancels(toCancel) | |
- } | |
- } | |
- | |
- if len(broadcastCancels) > 0 { | |
- // If a broadcast want is being cancelled, send the cancel to all | |
- // peers | |
- for p, pws := range pwm.peerWants { | |
- send(p, pws) | |
- } | |
- } else { | |
- // Only send cancels to peers that received a corresponding want | |
- cancelPeers := make(map[peer.ID]struct{}, len(pwm.wantPeers[cancelKs[0]])) | |
- for _, c := range cancelKs { | |
- for p := range pwm.wantPeers[c] { | |
- cancelPeers[p] = struct{}{} | |
- } | |
- } | |
- for p := range cancelPeers { | |
- pws, ok := pwm.peerWants[p] | |
- if !ok { | |
- // Should never happen but check just in case | |
- log.Errorf("sendCancels - peerWantManager index missing peer %s", p) | |
- continue | |
- } | |
- | |
- send(p, pws) | |
- } | |
- } | |
- | |
- // Decrement the wants gauges | |
- for _, c := range cancelKs { | |
- peerCnts := peerCounts[c] | |
- | |
- // If there were any peers that had a pending want-block for the key | |
- if peerCnts.wantBlock > 0 { | |
- // Decrement the want-block gauge | |
- pwm.wantBlockGauge.Dec() | |
- } | |
- | |
- // If there was a peer that had a pending want or it was a broadcast want | |
- if peerCnts.wanted() { | |
- // Decrement the total wants gauge | |
- pwm.wantGauge.Dec() | |
- } | |
- } | |
- | |
- // Remove cancelled broadcast wants | |
- for _, c := range broadcastCancels { | |
- pwm.broadcastWants.Remove(c) | |
- } | |
- | |
- // Batch-remove the reverse-index. There's no need to clear this index | |
- // peer-by-peer. | |
- for _, c := range cancelKs { | |
- delete(pwm.wantPeers, c) | |
- } | |
-} | |
- | |
-// wantPeerCnts stores the number of peers that have pending wants for a CID | |
-type wantPeerCnts struct { | |
- // number of peers that have a pending want-block for the CID | |
- wantBlock int | |
- // number of peers that have a pending want-have for the CID | |
- wantHave int | |
- // whether the CID is a broadcast want | |
- isBroadcast bool | |
-} | |
- | |
-// wanted returns true if any peer wants the CID or it's a broadcast want | |
-func (pwm *wantPeerCnts) wanted() bool { | |
- return pwm.wantBlock > 0 || pwm.wantHave > 0 || pwm.isBroadcast | |
-} | |
- | |
-// wantPeerCounts counts how many peers have a pending want-block and want-have | |
-// for the given CID | |
-func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { | |
- blockCount := 0 | |
- haveCount := 0 | |
- for p := range pwm.wantPeers[c] { | |
- pws, ok := pwm.peerWants[p] | |
- if !ok { | |
- log.Errorf("reverse index has extra peer %s for key %s in peerWantManager", string(p), c) | |
- continue | |
- } | |
- | |
- if pws.wantBlocks.Has(c) { | |
- blockCount++ | |
- } else if pws.wantHaves.Has(c) { | |
- haveCount++ | |
- } | |
- } | |
- | |
- return wantPeerCnts{blockCount, haveCount, pwm.broadcastWants.Has(c)} | |
-} | |
- | |
-// Add the peer to the list of peers that have sent a want with the cid | |
-func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { | |
- peers, ok := pwm.wantPeers[c] | |
- if !ok { | |
- peers = make(map[peer.ID]struct{}, 10) | |
- pwm.wantPeers[c] = peers | |
- } | |
- peers[p] = struct{}{} | |
- return !ok | |
-} | |
- | |
-// Remove the peer from the list of peers that have sent a want with the cid | |
-func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { | |
- if peers, ok := pwm.wantPeers[c]; ok { | |
- delete(peers, p) | |
- if len(peers) == 0 { | |
- delete(pwm.wantPeers, c) | |
- } | |
- } | |
-} | |
- | |
-// GetWantBlocks returns the set of all want-blocks sent to all peers | |
-func (pwm *peerWantManager) getWantBlocks() []cid.Cid { | |
- res := cid.NewSet() | |
- | |
- // Iterate over all known peers | |
- for _, pws := range pwm.peerWants { | |
- // Iterate over all want-blocks | |
- _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { | |
- // Add the CID to the results | |
- res.Add(c) | |
- return nil | |
- }) | |
- } | |
- | |
- return res.Keys() | |
-} | |
- | |
-// GetWantHaves returns the set of all want-haves sent to all peers | |
-func (pwm *peerWantManager) getWantHaves() []cid.Cid { | |
- res := cid.NewSet() | |
- | |
- // Iterate over all peers with active wants. | |
- for _, pws := range pwm.peerWants { | |
- // Iterate over all want-haves | |
- _ = pws.wantHaves.ForEach(func(c cid.Cid) error { | |
- // Add the CID to the results | |
- res.Add(c) | |
- return nil | |
- }) | |
- } | |
- _ = pwm.broadcastWants.ForEach(func(c cid.Cid) error { | |
- res.Add(c) | |
- return nil | |
- }) | |
- | |
- return res.Keys() | |
-} | |
- | |
-// GetWants returns the set of all wants (both want-blocks and want-haves). | |
-func (pwm *peerWantManager) getWants() []cid.Cid { | |
- res := pwm.broadcastWants.Keys() | |
- | |
- // Iterate over all targeted wants, removing ones that are also in the | |
- // broadcast list. | |
- for c := range pwm.wantPeers { | |
- if pwm.broadcastWants.Has(c) { | |
- continue | |
- } | |
- res = append(res, c) | |
- } | |
- | |
- return res | |
-} | |
- | |
-func (pwm *peerWantManager) String() string { | |
- var b bytes.Buffer | |
- for p, ws := range pwm.peerWants { | |
- b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) | |
- for _, c := range ws.wantHaves.Keys() { | |
- b.WriteString(fmt.Sprintf(" want-have %s\n", c)) | |
- } | |
- for _, c := range ws.wantBlocks.Keys() { | |
- b.WriteString(fmt.Sprintf(" want-block %s\n", c)) | |
- } | |
- } | |
- return b.String() | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/providerquerymanager/providerquerymanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/providerquerymanager/providerquerymanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/providerquerymanager/providerquerymanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/providerquerymanager/providerquerymanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,430 +0,0 @@ | |
-package providerquerymanager | |
- | |
-import ( | |
- "context" | |
- "fmt" | |
- "sync" | |
- "time" | |
- | |
- "github.com/ipfs/go-cid" | |
- logging "github.com/ipfs/go-log" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-var log = logging.Logger("bitswap") | |
- | |
-const ( | |
- maxProviders = 10 | |
- maxInProcessRequests = 6 | |
- defaultTimeout = 10 * time.Second | |
-) | |
- | |
-type inProgressRequestStatus struct { | |
- ctx context.Context | |
- cancelFn func() | |
- providersSoFar []peer.ID | |
- listeners map[chan peer.ID]struct{} | |
-} | |
- | |
-type findProviderRequest struct { | |
- k cid.Cid | |
- ctx context.Context | |
-} | |
- | |
-// ProviderQueryNetwork is an interface for finding providers and connecting to | |
-// peers. | |
-type ProviderQueryNetwork interface { | |
- ConnectTo(context.Context, peer.ID) error | |
- FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID | |
-} | |
- | |
-type providerQueryMessage interface { | |
- debugMessage() string | |
- handle(pqm *ProviderQueryManager) | |
-} | |
- | |
-type receivedProviderMessage struct { | |
- ctx context.Context | |
- k cid.Cid | |
- p peer.ID | |
-} | |
- | |
-type finishedProviderQueryMessage struct { | |
- ctx context.Context | |
- k cid.Cid | |
-} | |
- | |
-type newProvideQueryMessage struct { | |
- ctx context.Context | |
- k cid.Cid | |
- inProgressRequestChan chan<- inProgressRequest | |
-} | |
- | |
-type cancelRequestMessage struct { | |
- incomingProviders chan peer.ID | |
- k cid.Cid | |
-} | |
- | |
-// ProviderQueryManager manages requests to find more providers for blocks | |
-// for bitswap sessions. It's main goals are to: | |
-// - rate limit requests -- don't have too many find provider calls running | |
-// simultaneously | |
-// - connect to found peers and filter them if it can't connect | |
-// - ensure two findprovider calls for the same block don't run concurrently | |
-// - manage timeouts | |
-type ProviderQueryManager struct { | |
- ctx context.Context | |
- network ProviderQueryNetwork | |
- providerQueryMessages chan providerQueryMessage | |
- providerRequestsProcessing chan *findProviderRequest | |
- incomingFindProviderRequests chan *findProviderRequest | |
- | |
- findProviderTimeout time.Duration | |
- timeoutMutex sync.RWMutex | |
- | |
- // do not touch outside the run loop | |
- inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus | |
-} | |
- | |
-// New initializes a new ProviderQueryManager for a given context and a given | |
-// network provider. | |
-func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { | |
- return &ProviderQueryManager{ | |
- ctx: ctx, | |
- network: network, | |
- providerQueryMessages: make(chan providerQueryMessage, 16), | |
- providerRequestsProcessing: make(chan *findProviderRequest), | |
- incomingFindProviderRequests: make(chan *findProviderRequest), | |
- inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), | |
- findProviderTimeout: defaultTimeout, | |
- } | |
-} | |
- | |
-// Startup starts processing for the ProviderQueryManager. | |
-func (pqm *ProviderQueryManager) Startup() { | |
- go pqm.run() | |
-} | |
- | |
-type inProgressRequest struct { | |
- providersSoFar []peer.ID | |
- incoming chan peer.ID | |
-} | |
- | |
-// SetFindProviderTimeout changes the timeout for finding providers | |
-func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { | |
- pqm.timeoutMutex.Lock() | |
- pqm.findProviderTimeout = findProviderTimeout | |
- pqm.timeoutMutex.Unlock() | |
-} | |
- | |
-// FindProvidersAsync finds providers for the given block. | |
-func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { | |
- inProgressRequestChan := make(chan inProgressRequest) | |
- | |
- select { | |
- case pqm.providerQueryMessages <- &newProvideQueryMessage{ | |
- ctx: sessionCtx, | |
- k: k, | |
- inProgressRequestChan: inProgressRequestChan, | |
- }: | |
- case <-pqm.ctx.Done(): | |
- ch := make(chan peer.ID) | |
- close(ch) | |
- return ch | |
- case <-sessionCtx.Done(): | |
- ch := make(chan peer.ID) | |
- close(ch) | |
- return ch | |
- } | |
- | |
- // DO NOT select on sessionCtx. We only want to abort here if we're | |
- // shutting down because we can't actually _cancel_ the request till we | |
- // get to receiveProviders. | |
- var receivedInProgressRequest inProgressRequest | |
- select { | |
- case <-pqm.ctx.Done(): | |
- ch := make(chan peer.ID) | |
- close(ch) | |
- return ch | |
- case receivedInProgressRequest = <-inProgressRequestChan: | |
- } | |
- | |
- return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) | |
-} | |
- | |
-func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { | |
- // maintains an unbuffered queue for incoming providers for given request for a given session | |
- // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all | |
- // sessions that queried that CID, without worrying about whether the client code is actually | |
- // reading from the returned channel -- so that the broadcast never blocks | |
- // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd | |
- returnedProviders := make(chan peer.ID) | |
- receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) | |
- incomingProviders := receivedInProgressRequest.incoming | |
- | |
- go func() { | |
- defer close(returnedProviders) | |
- outgoingProviders := func() chan<- peer.ID { | |
- if len(receivedProviders) == 0 { | |
- return nil | |
- } | |
- return returnedProviders | |
- } | |
- nextProvider := func() peer.ID { | |
- if len(receivedProviders) == 0 { | |
- return "" | |
- } | |
- return receivedProviders[0] | |
- } | |
- for len(receivedProviders) > 0 || incomingProviders != nil { | |
- select { | |
- case <-pqm.ctx.Done(): | |
- return | |
- case <-sessionCtx.Done(): | |
- if incomingProviders != nil { | |
- pqm.cancelProviderRequest(k, incomingProviders) | |
- } | |
- return | |
- case provider, ok := <-incomingProviders: | |
- if !ok { | |
- incomingProviders = nil | |
- } else { | |
- receivedProviders = append(receivedProviders, provider) | |
- } | |
- case outgoingProviders() <- nextProvider(): | |
- receivedProviders = receivedProviders[1:] | |
- } | |
- } | |
- }() | |
- return returnedProviders | |
-} | |
- | |
-func (pqm *ProviderQueryManager) cancelProviderRequest(k cid.Cid, incomingProviders chan peer.ID) { | |
- cancelMessageChannel := pqm.providerQueryMessages | |
- for { | |
- select { | |
- case cancelMessageChannel <- &cancelRequestMessage{ | |
- incomingProviders: incomingProviders, | |
- k: k, | |
- }: | |
- cancelMessageChannel = nil | |
- // clear out any remaining providers, in case and "incoming provider" | |
- // messages get processed before our cancel message | |
- case _, ok := <-incomingProviders: | |
- if !ok { | |
- return | |
- } | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (pqm *ProviderQueryManager) findProviderWorker() { | |
- // findProviderWorker just cycles through incoming provider queries one | |
- // at a time. We have six of these workers running at once | |
- // to let requests go in parallel but keep them rate limited | |
- for { | |
- select { | |
- case fpr, ok := <-pqm.providerRequestsProcessing: | |
- if !ok { | |
- return | |
- } | |
- k := fpr.k | |
- log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) | |
- pqm.timeoutMutex.RLock() | |
- findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) | |
- pqm.timeoutMutex.RUnlock() | |
- providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) | |
- wg := &sync.WaitGroup{} | |
- for p := range providers { | |
- wg.Add(1) | |
- go func(p peer.ID) { | |
- defer wg.Done() | |
- err := pqm.network.ConnectTo(findProviderCtx, p) | |
- if err != nil { | |
- log.Debugf("failed to connect to provider %s: %s", p, err) | |
- return | |
- } | |
- select { | |
- case pqm.providerQueryMessages <- &receivedProviderMessage{ | |
- ctx: findProviderCtx, | |
- k: k, | |
- p: p, | |
- }: | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- }(p) | |
- } | |
- wg.Wait() | |
- cancel() | |
- select { | |
- case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ | |
- ctx: findProviderCtx, | |
- k: k, | |
- }: | |
- case <-pqm.ctx.Done(): | |
- } | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (pqm *ProviderQueryManager) providerRequestBufferWorker() { | |
- // the provider request buffer worker just maintains an unbounded | |
- // buffer for incoming provider queries and dispatches to the find | |
- // provider workers as they become available | |
- // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd | |
- var providerQueryRequestBuffer []*findProviderRequest | |
- nextProviderQuery := func() *findProviderRequest { | |
- if len(providerQueryRequestBuffer) == 0 { | |
- return nil | |
- } | |
- return providerQueryRequestBuffer[0] | |
- } | |
- outgoingRequests := func() chan<- *findProviderRequest { | |
- if len(providerQueryRequestBuffer) == 0 { | |
- return nil | |
- } | |
- return pqm.providerRequestsProcessing | |
- } | |
- | |
- for { | |
- select { | |
- case incomingRequest, ok := <-pqm.incomingFindProviderRequests: | |
- if !ok { | |
- return | |
- } | |
- providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) | |
- case outgoingRequests() <- nextProviderQuery(): | |
- providerQueryRequestBuffer = providerQueryRequestBuffer[1:] | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (pqm *ProviderQueryManager) cleanupInProcessRequests() { | |
- for _, requestStatus := range pqm.inProgressRequestStatuses { | |
- for listener := range requestStatus.listeners { | |
- close(listener) | |
- } | |
- requestStatus.cancelFn() | |
- } | |
-} | |
- | |
-func (pqm *ProviderQueryManager) run() { | |
- defer pqm.cleanupInProcessRequests() | |
- | |
- go pqm.providerRequestBufferWorker() | |
- for i := 0; i < maxInProcessRequests; i++ { | |
- go pqm.findProviderWorker() | |
- } | |
- | |
- for { | |
- select { | |
- case nextMessage := <-pqm.providerQueryMessages: | |
- log.Debug(nextMessage.debugMessage()) | |
- nextMessage.handle(pqm) | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (rpm *receivedProviderMessage) debugMessage() string { | |
- return fmt.Sprintf("Received provider (%s) for cid (%s)", rpm.p.String(), rpm.k.String()) | |
-} | |
- | |
-func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { | |
- requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] | |
- if !ok { | |
- log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) | |
- return | |
- } | |
- requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) | |
- for listener := range requestStatus.listeners { | |
- select { | |
- case listener <- rpm.p: | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (fpqm *finishedProviderQueryMessage) debugMessage() string { | |
- return fmt.Sprintf("Finished Provider Query on cid: %s", fpqm.k.String()) | |
-} | |
- | |
-func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { | |
- requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] | |
- if !ok { | |
- // we canceled the request as it finished. | |
- return | |
- } | |
- for listener := range requestStatus.listeners { | |
- close(listener) | |
- } | |
- delete(pqm.inProgressRequestStatuses, fpqm.k) | |
- requestStatus.cancelFn() | |
-} | |
- | |
-func (npqm *newProvideQueryMessage) debugMessage() string { | |
- return fmt.Sprintf("New Provider Query on cid: %s", npqm.k.String()) | |
-} | |
- | |
-func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { | |
- requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] | |
- if !ok { | |
- | |
- ctx, cancelFn := context.WithCancel(pqm.ctx) | |
- requestStatus = &inProgressRequestStatus{ | |
- listeners: make(map[chan peer.ID]struct{}), | |
- ctx: ctx, | |
- cancelFn: cancelFn, | |
- } | |
- pqm.inProgressRequestStatuses[npqm.k] = requestStatus | |
- select { | |
- case pqm.incomingFindProviderRequests <- &findProviderRequest{ | |
- k: npqm.k, | |
- ctx: ctx, | |
- }: | |
- case <-pqm.ctx.Done(): | |
- return | |
- } | |
- } | |
- inProgressChan := make(chan peer.ID) | |
- requestStatus.listeners[inProgressChan] = struct{}{} | |
- select { | |
- case npqm.inProgressRequestChan <- inProgressRequest{ | |
- providersSoFar: requestStatus.providersSoFar, | |
- incoming: inProgressChan, | |
- }: | |
- case <-pqm.ctx.Done(): | |
- } | |
-} | |
- | |
-func (crm *cancelRequestMessage) debugMessage() string { | |
- return fmt.Sprintf("Cancel provider query on cid: %s", crm.k.String()) | |
-} | |
- | |
-func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { | |
- requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] | |
- if !ok { | |
- // Request finished while queued. | |
- return | |
- } | |
- _, ok = requestStatus.listeners[crm.incomingProviders] | |
- if !ok { | |
- // Request finished and _restarted_ while queued. | |
- return | |
- } | |
- delete(requestStatus.listeners, crm.incomingProviders) | |
- close(crm.incomingProviders) | |
- if len(requestStatus.listeners) == 0 { | |
- delete(pqm.inProgressRequestStatuses, crm.k) | |
- requestStatus.cancelFn() | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/session/cidqueue.go a/vendor/github.com/ipfs/go-bitswap/client/internal/session/cidqueue.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/session/cidqueue.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/session/cidqueue.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,63 +0,0 @@ | |
-package session | |
- | |
-import cid "github.com/ipfs/go-cid" | |
- | |
-type cidQueue struct { | |
- elems []cid.Cid | |
- eset *cid.Set | |
-} | |
- | |
-func newCidQueue() *cidQueue { | |
- return &cidQueue{eset: cid.NewSet()} | |
-} | |
- | |
-func (cq *cidQueue) Pop() cid.Cid { | |
- for { | |
- if len(cq.elems) == 0 { | |
- return cid.Cid{} | |
- } | |
- | |
- out := cq.elems[0] | |
- cq.elems = cq.elems[1:] | |
- | |
- if cq.eset.Has(out) { | |
- cq.eset.Remove(out) | |
- return out | |
- } | |
- } | |
-} | |
- | |
-func (cq *cidQueue) Cids() []cid.Cid { | |
- // Lazily delete from the list any cids that were removed from the set | |
- if len(cq.elems) > cq.eset.Len() { | |
- i := 0 | |
- for _, c := range cq.elems { | |
- if cq.eset.Has(c) { | |
- cq.elems[i] = c | |
- i++ | |
- } | |
- } | |
- cq.elems = cq.elems[:i] | |
- } | |
- | |
- // Make a copy of the cids | |
- return append([]cid.Cid{}, cq.elems...) | |
-} | |
- | |
-func (cq *cidQueue) Push(c cid.Cid) { | |
- if cq.eset.Visit(c) { | |
- cq.elems = append(cq.elems, c) | |
- } | |
-} | |
- | |
-func (cq *cidQueue) Remove(c cid.Cid) { | |
- cq.eset.Remove(c) | |
-} | |
- | |
-func (cq *cidQueue) Has(c cid.Cid) bool { | |
- return cq.eset.Has(c) | |
-} | |
- | |
-func (cq *cidQueue) Len() int { | |
- return cq.eset.Len() | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/session/peerresponsetracker.go a/vendor/github.com/ipfs/go-bitswap/client/internal/session/peerresponsetracker.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/session/peerresponsetracker.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/session/peerresponsetracker.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,70 +0,0 @@ | |
-package session | |
- | |
-import ( | |
- "math/rand" | |
- | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// peerResponseTracker keeps track of how many times each peer was the first | |
-// to send us a block for a given CID (used to rank peers) | |
-type peerResponseTracker struct { | |
- firstResponder map[peer.ID]int | |
-} | |
- | |
-func newPeerResponseTracker() *peerResponseTracker { | |
- return &peerResponseTracker{ | |
- firstResponder: make(map[peer.ID]int), | |
- } | |
-} | |
- | |
-// receivedBlockFrom is called when a block is received from a peer | |
-// (only called first time block is received) | |
-func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { | |
- prt.firstResponder[from]++ | |
-} | |
- | |
-// choose picks a peer from the list of candidate peers, favouring those peers | |
-// that were first to send us previous blocks | |
-func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { | |
- if len(peers) == 0 { | |
- return "" | |
- } | |
- | |
- rnd := rand.Float64() | |
- | |
- // Find the total received blocks for all candidate peers | |
- total := 0 | |
- for _, p := range peers { | |
- total += prt.getPeerCount(p) | |
- } | |
- | |
- // Choose one of the peers with a chance proportional to the number | |
- // of blocks received from that peer | |
- counted := 0.0 | |
- for _, p := range peers { | |
- counted += float64(prt.getPeerCount(p)) / float64(total) | |
- if counted > rnd { | |
- return p | |
- } | |
- } | |
- | |
- // We shouldn't get here unless there is some weirdness with floating point | |
- // math that doesn't quite cover the whole range of peers in the for loop | |
- // so just choose the last peer. | |
- index := len(peers) - 1 | |
- return peers[index] | |
-} | |
- | |
-// getPeerCount returns the number of times the peer was first to send us a | |
-// block | |
-func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { | |
- count, ok := prt.firstResponder[p] | |
- if ok { | |
- return count | |
- } | |
- | |
- // Make sure there is always at least a small chance a new peer | |
- // will be chosen | |
- return 1 | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/session/sentwantblockstracker.go a/vendor/github.com/ipfs/go-bitswap/client/internal/session/sentwantblockstracker.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/session/sentwantblockstracker.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/session/sentwantblockstracker.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,33 +0,0 @@ | |
-package session | |
- | |
-import ( | |
- cid "github.com/ipfs/go-cid" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// sentWantBlocksTracker keeps track of which peers we've sent a want-block to | |
-type sentWantBlocksTracker struct { | |
- sentWantBlocks map[peer.ID]map[cid.Cid]struct{} | |
-} | |
- | |
-func newSentWantBlocksTracker() *sentWantBlocksTracker { | |
- return &sentWantBlocksTracker{ | |
- sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), | |
- } | |
-} | |
- | |
-func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { | |
- cids, ok := s.sentWantBlocks[p] | |
- if !ok { | |
- cids = make(map[cid.Cid]struct{}, len(ks)) | |
- s.sentWantBlocks[p] = cids | |
- } | |
- for _, c := range ks { | |
- cids[c] = struct{}{} | |
- } | |
-} | |
- | |
-func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { | |
- _, ok := s.sentWantBlocks[p][c] | |
- return ok | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/session/session.go a/vendor/github.com/ipfs/go-bitswap/client/internal/session/session.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/session/session.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/session/session.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,508 +0,0 @@ | |
-package session | |
- | |
-import ( | |
- "context" | |
- "time" | |
- | |
- "github.com/ipfs/go-bitswap/client/internal" | |
- bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" | |
- bsgetter "github.com/ipfs/go-bitswap/client/internal/getter" | |
- notifications "github.com/ipfs/go-bitswap/client/internal/notifications" | |
- bspm "github.com/ipfs/go-bitswap/client/internal/peermanager" | |
- bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" | |
- blocks "github.com/ipfs/go-block-format" | |
- cid "github.com/ipfs/go-cid" | |
- delay "github.com/ipfs/go-ipfs-delay" | |
- logging "github.com/ipfs/go-log" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
- "go.uber.org/zap" | |
-) | |
- | |
-var log = logging.Logger("bs:sess") | |
-var sflog = log.Desugar() | |
- | |
-const ( | |
- broadcastLiveWantsLimit = 64 | |
-) | |
- | |
-// PeerManager keeps track of which sessions are interested in which peers | |
-// and takes care of sending wants for the sessions | |
-type PeerManager interface { | |
- // RegisterSession tells the PeerManager that the session is interested | |
- // in a peer's connection state | |
- RegisterSession(peer.ID, bspm.Session) | |
- // UnregisterSession tells the PeerManager that the session is no longer | |
- // interested in a peer's connection state | |
- UnregisterSession(uint64) | |
- // SendWants tells the PeerManager to send wants to the given peer | |
- SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) | |
- // BroadcastWantHaves sends want-haves to all connected peers (used for | |
- // session discovery) | |
- BroadcastWantHaves(context.Context, []cid.Cid) | |
- // SendCancels tells the PeerManager to send cancels to all peers | |
- SendCancels(context.Context, []cid.Cid) | |
-} | |
- | |
-// SessionManager manages all the sessions | |
-type SessionManager interface { | |
- // Remove a session (called when the session shuts down) | |
- RemoveSession(sesid uint64) | |
- // Cancel wants (called when a call to GetBlocks() is cancelled) | |
- CancelSessionWants(sid uint64, wants []cid.Cid) | |
-} | |
- | |
-// SessionPeerManager keeps track of peers in the session | |
-type SessionPeerManager interface { | |
- // PeersDiscovered indicates if any peers have been discovered yet | |
- PeersDiscovered() bool | |
- // Shutdown the SessionPeerManager | |
- Shutdown() | |
- // Adds a peer to the session, returning true if the peer is new | |
- AddPeer(peer.ID) bool | |
- // Removes a peer from the session, returning true if the peer existed | |
- RemovePeer(peer.ID) bool | |
- // All peers in the session | |
- Peers() []peer.ID | |
- // Whether there are any peers in the session | |
- HasPeers() bool | |
- // Protect connection from being pruned by the connection manager | |
- ProtectConnection(peer.ID) | |
-} | |
- | |
-// ProviderFinder is used to find providers for a given key | |
-type ProviderFinder interface { | |
- // FindProvidersAsync searches for peers that provide the given CID | |
- FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID | |
-} | |
- | |
-// opType is the kind of operation that is being processed by the event loop | |
-type opType int | |
- | |
-const ( | |
- // Receive blocks | |
- opReceive opType = iota | |
- // Want blocks | |
- opWant | |
- // Cancel wants | |
- opCancel | |
- // Broadcast want-haves | |
- opBroadcast | |
- // Wants sent to peers | |
- opWantsSent | |
-) | |
- | |
-type op struct { | |
- op opType | |
- keys []cid.Cid | |
-} | |
- | |
-// Session holds state for an individual bitswap transfer operation. | |
-// This allows bitswap to make smarter decisions about who to send wantlist | |
-// info to, and who to request blocks from. | |
-type Session struct { | |
- // dependencies | |
- ctx context.Context | |
- shutdown func() | |
- sm SessionManager | |
- pm PeerManager | |
- sprm SessionPeerManager | |
- providerFinder ProviderFinder | |
- sim *bssim.SessionInterestManager | |
- | |
- sw sessionWants | |
- sws sessionWantSender | |
- | |
- latencyTrkr latencyTracker | |
- | |
- // channels | |
- incoming chan op | |
- tickDelayReqs chan time.Duration | |
- | |
- // do not touch outside run loop | |
- idleTick *time.Timer | |
- periodicSearchTimer *time.Timer | |
- baseTickDelay time.Duration | |
- consecutiveTicks int | |
- initialSearchDelay time.Duration | |
- periodicSearchDelay delay.D | |
- // identifiers | |
- notif notifications.PubSub | |
- id uint64 | |
- | |
- self peer.ID | |
-} | |
- | |
-// New creates a new bitswap session whose lifetime is bounded by the | |
-// given context. | |
-func New( | |
- ctx context.Context, | |
- sm SessionManager, | |
- id uint64, | |
- sprm SessionPeerManager, | |
- providerFinder ProviderFinder, | |
- sim *bssim.SessionInterestManager, | |
- pm PeerManager, | |
- bpm *bsbpm.BlockPresenceManager, | |
- notif notifications.PubSub, | |
- initialSearchDelay time.Duration, | |
- periodicSearchDelay delay.D, | |
- self peer.ID) *Session { | |
- | |
- ctx, cancel := context.WithCancel(ctx) | |
- s := &Session{ | |
- sw: newSessionWants(broadcastLiveWantsLimit), | |
- tickDelayReqs: make(chan time.Duration), | |
- ctx: ctx, | |
- shutdown: cancel, | |
- sm: sm, | |
- pm: pm, | |
- sprm: sprm, | |
- providerFinder: providerFinder, | |
- sim: sim, | |
- incoming: make(chan op, 128), | |
- latencyTrkr: latencyTracker{}, | |
- notif: notif, | |
- baseTickDelay: time.Millisecond * 500, | |
- id: id, | |
- initialSearchDelay: initialSearchDelay, | |
- periodicSearchDelay: periodicSearchDelay, | |
- self: self, | |
- } | |
- s.sws = newSessionWantSender(id, pm, sprm, sm, bpm, s.onWantsSent, s.onPeersExhausted) | |
- | |
- go s.run(ctx) | |
- | |
- return s | |
-} | |
- | |
-func (s *Session) ID() uint64 { | |
- return s.id | |
-} | |
- | |
-func (s *Session) Shutdown() { | |
- s.shutdown() | |
-} | |
- | |
-// ReceiveFrom receives incoming blocks from the given peer. | |
-func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
- // The SessionManager tells each Session about all keys that it may be | |
- // interested in. Here the Session filters the keys to the ones that this | |
- // particular Session is interested in. | |
- interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) | |
- ks = interestedRes[0] | |
- haves = interestedRes[1] | |
- dontHaves = interestedRes[2] | |
- s.logReceiveFrom(from, ks, haves, dontHaves) | |
- | |
- // Inform the session want sender that a message has been received | |
- s.sws.Update(from, ks, haves, dontHaves) | |
- | |
- if len(ks) == 0 { | |
- return | |
- } | |
- | |
- // Inform the session that blocks have been received | |
- select { | |
- case s.incoming <- op{op: opReceive, keys: ks}: | |
- case <-s.ctx.Done(): | |
- } | |
-} | |
- | |
-func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
- // Save some CPU cycles if log level is higher than debug | |
- if ce := sflog.Check(zap.DebugLevel, "Bitswap <- rcv message"); ce == nil { | |
- return | |
- } | |
- | |
- for _, c := range interestedKs { | |
- log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) | |
- } | |
- for _, c := range haves { | |
- log.Debugw("Bitswap <- HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) | |
- } | |
- for _, c := range dontHaves { | |
- log.Debugw("Bitswap <- DONT_HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) | |
- } | |
-} | |
- | |
-// GetBlock fetches a single block. | |
-func (s *Session) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { | |
- ctx, span := internal.StartSpan(ctx, "Session.GetBlock") | |
- defer span.End() | |
- return bsgetter.SyncGetBlock(ctx, k, s.GetBlocks) | |
-} | |
- | |
-// GetBlocks fetches a set of blocks within the context of this session and | |
-// returns a channel that found blocks will be returned on. No order is | |
-// guaranteed on the returned blocks. | |
-func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { | |
- ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") | |
- defer span.End() | |
- | |
- return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, | |
- func(ctx context.Context, keys []cid.Cid) { | |
- select { | |
- case s.incoming <- op{op: opWant, keys: keys}: | |
- case <-ctx.Done(): | |
- case <-s.ctx.Done(): | |
- } | |
- }, | |
- func(keys []cid.Cid) { | |
- select { | |
- case s.incoming <- op{op: opCancel, keys: keys}: | |
- case <-s.ctx.Done(): | |
- } | |
- }, | |
- ) | |
-} | |
- | |
-// SetBaseTickDelay changes the rate at which ticks happen. | |
-func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { | |
- select { | |
- case s.tickDelayReqs <- baseTickDelay: | |
- case <-s.ctx.Done(): | |
- } | |
-} | |
- | |
-// onWantsSent is called when wants are sent to a peer by the session wants sender | |
-func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
- allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) | |
- s.nonBlockingEnqueue(op{op: opWantsSent, keys: allBlks}) | |
-} | |
- | |
-// onPeersExhausted is called when all available peers have sent DONT_HAVE for | |
-// a set of cids (or all peers become unavailable) | |
-func (s *Session) onPeersExhausted(ks []cid.Cid) { | |
- s.nonBlockingEnqueue(op{op: opBroadcast, keys: ks}) | |
-} | |
- | |
-// We don't want to block the sessionWantSender if the incoming channel | |
-// is full. So if we can't immediately send on the incoming channel spin | |
-// it off into a go-routine. | |
-func (s *Session) nonBlockingEnqueue(o op) { | |
- select { | |
- case s.incoming <- o: | |
- default: | |
- go func() { | |
- select { | |
- case s.incoming <- o: | |
- case <-s.ctx.Done(): | |
- } | |
- }() | |
- } | |
-} | |
- | |
-// Session run loop -- everything in this function should not be called | |
-// outside of this loop | |
-func (s *Session) run(ctx context.Context) { | |
- go s.sws.Run() | |
- | |
- s.idleTick = time.NewTimer(s.initialSearchDelay) | |
- s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) | |
- for { | |
- select { | |
- case oper := <-s.incoming: | |
- switch oper.op { | |
- case opReceive: | |
- // Received blocks | |
- s.handleReceive(oper.keys) | |
- case opWant: | |
- // Client wants blocks | |
- s.wantBlocks(ctx, oper.keys) | |
- case opCancel: | |
- // Wants were cancelled | |
- s.sw.CancelPending(oper.keys) | |
- s.sws.Cancel(oper.keys) | |
- case opWantsSent: | |
- // Wants were sent to a peer | |
- s.sw.WantsSent(oper.keys) | |
- case opBroadcast: | |
- // Broadcast want-haves to all peers | |
- s.broadcast(ctx, oper.keys) | |
- default: | |
- panic("unhandled operation") | |
- } | |
- case <-s.idleTick.C: | |
- // The session hasn't received blocks for a while, broadcast | |
- s.broadcast(ctx, nil) | |
- case <-s.periodicSearchTimer.C: | |
- // Periodically search for a random live want | |
- s.handlePeriodicSearch(ctx) | |
- case baseTickDelay := <-s.tickDelayReqs: | |
- // Set the base tick delay | |
- s.baseTickDelay = baseTickDelay | |
- case <-ctx.Done(): | |
- // Shutdown | |
- s.handleShutdown() | |
- return | |
- } | |
- } | |
-} | |
- | |
-// Called when the session hasn't received any blocks for some time, or when | |
-// all peers in the session have sent DONT_HAVE for a particular set of CIDs. | |
-// Send want-haves to all connected peers, and search for new peers with the CID. | |
-func (s *Session) broadcast(ctx context.Context, wants []cid.Cid) { | |
- // If this broadcast is because of an idle timeout (we haven't received | |
- // any blocks for a while) then broadcast all pending wants | |
- if wants == nil { | |
- wants = s.sw.PrepareBroadcast() | |
- } | |
- | |
- // Broadcast a want-have for the live wants to everyone we're connected to | |
- s.broadcastWantHaves(ctx, wants) | |
- | |
- // do not find providers on consecutive ticks | |
- // -- just rely on periodic search widening | |
- if len(wants) > 0 && (s.consecutiveTicks == 0) { | |
- // Search for providers who have the first want in the list. | |
- // Typically if the provider has the first block they will have | |
- // the rest of the blocks also. | |
- log.Debugw("FindMorePeers", "session", s.id, "cid", wants[0], "pending", len(wants)) | |
- s.findMorePeers(ctx, wants[0]) | |
- } | |
- s.resetIdleTick() | |
- | |
- // If we have live wants record a consecutive tick | |
- if s.sw.HasLiveWants() { | |
- s.consecutiveTicks++ | |
- } | |
-} | |
- | |
-// handlePeriodicSearch is called periodically to search for providers of a | |
-// randomly chosen CID in the sesssion. | |
-func (s *Session) handlePeriodicSearch(ctx context.Context) { | |
- randomWant := s.sw.RandomLiveWant() | |
- if !randomWant.Defined() { | |
- return | |
- } | |
- | |
- // TODO: come up with a better strategy for determining when to search | |
- // for new providers for blocks. | |
- s.findMorePeers(ctx, randomWant) | |
- | |
- s.broadcastWantHaves(ctx, []cid.Cid{randomWant}) | |
- | |
- s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) | |
-} | |
- | |
-// findMorePeers attempts to find more peers for a session by searching for | |
-// providers for the given Cid | |
-func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { | |
- go func(k cid.Cid) { | |
- for p := range s.providerFinder.FindProvidersAsync(ctx, k) { | |
- // When a provider indicates that it has a cid, it's equivalent to | |
- // the providing peer sending a HAVE | |
- s.sws.Update(p, nil, []cid.Cid{c}, nil) | |
- } | |
- }(c) | |
-} | |
- | |
-// handleShutdown is called when the session shuts down | |
-func (s *Session) handleShutdown() { | |
- // Stop the idle timer | |
- s.idleTick.Stop() | |
- // Shut down the session peer manager | |
- s.sprm.Shutdown() | |
- // Shut down the sessionWantSender (blocks until sessionWantSender stops | |
- // sending) | |
- s.sws.Shutdown() | |
- // Signal to the SessionManager that the session has been shutdown | |
- // and can be cleaned up | |
- s.sm.RemoveSession(s.id) | |
-} | |
- | |
-// handleReceive is called when the session receives blocks from a peer | |
-func (s *Session) handleReceive(ks []cid.Cid) { | |
- // Record which blocks have been received and figure out the total latency | |
- // for fetching the blocks | |
- wanted, totalLatency := s.sw.BlocksReceived(ks) | |
- if len(wanted) == 0 { | |
- return | |
- } | |
- | |
- // Record latency | |
- s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) | |
- | |
- // Inform the SessionInterestManager that this session is no longer | |
- // expecting to receive the wanted keys | |
- s.sim.RemoveSessionWants(s.id, wanted) | |
- | |
- s.idleTick.Stop() | |
- | |
- // We've received new wanted blocks, so reset the number of ticks | |
- // that have occurred since the last new block | |
- s.consecutiveTicks = 0 | |
- | |
- s.resetIdleTick() | |
-} | |
- | |
-// wantBlocks is called when blocks are requested by the client | |
-func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { | |
- if len(newks) > 0 { | |
- // Inform the SessionInterestManager that this session is interested in the keys | |
- s.sim.RecordSessionInterest(s.id, newks) | |
- // Tell the sessionWants tracker that that the wants have been requested | |
- s.sw.BlocksRequested(newks) | |
- // Tell the sessionWantSender that the blocks have been requested | |
- s.sws.Add(newks) | |
- } | |
- | |
- // If we have discovered peers already, the sessionWantSender will | |
- // send wants to them | |
- if s.sprm.PeersDiscovered() { | |
- return | |
- } | |
- | |
- // No peers discovered yet, broadcast some want-haves | |
- ks := s.sw.GetNextWants() | |
- if len(ks) > 0 { | |
- log.Infow("No peers - broadcasting", "session", s.id, "want-count", len(ks)) | |
- s.broadcastWantHaves(ctx, ks) | |
- } | |
-} | |
- | |
-// Send want-haves to all connected peers | |
-func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { | |
- log.Debugw("broadcastWantHaves", "session", s.id, "cids", wants) | |
- s.pm.BroadcastWantHaves(ctx, wants) | |
-} | |
- | |
-// The session will broadcast if it has outstanding wants and doesn't receive | |
-// any blocks for some time. | |
-// The length of time is calculated | |
-// - initially | |
-// as a fixed delay | |
-// - once some blocks are received | |
-// from a base delay and average latency, with a backoff | |
-func (s *Session) resetIdleTick() { | |
- var tickDelay time.Duration | |
- if !s.latencyTrkr.hasLatency() { | |
- tickDelay = s.initialSearchDelay | |
- } else { | |
- avLat := s.latencyTrkr.averageLatency() | |
- tickDelay = s.baseTickDelay + (3 * avLat) | |
- } | |
- tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) | |
- s.idleTick.Reset(tickDelay) | |
-} | |
- | |
-// latencyTracker keeps track of the average latency between sending a want | |
-// and receiving the corresponding block | |
-type latencyTracker struct { | |
- totalLatency time.Duration | |
- count int | |
-} | |
- | |
-func (lt *latencyTracker) hasLatency() bool { | |
- return lt.totalLatency > 0 && lt.count > 0 | |
-} | |
- | |
-func (lt *latencyTracker) averageLatency() time.Duration { | |
- return lt.totalLatency / time.Duration(lt.count) | |
-} | |
- | |
-func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { | |
- lt.totalLatency += totalLatency | |
- lt.count += count | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwantsender.go a/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwantsender.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwantsender.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwantsender.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,766 +0,0 @@ | |
-package session | |
- | |
-import ( | |
- "context" | |
- | |
- bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-const ( | |
- // Maximum number of changes to accept before blocking | |
- changesBufferSize = 128 | |
- // If the session receives this many DONT_HAVEs in a row from a peer, | |
- // it prunes the peer from the session | |
- peerDontHaveLimit = 16 | |
-) | |
- | |
-// BlockPresence indicates whether a peer has a block. | |
-// Note that the order is important, we decide which peer to send a want to | |
-// based on knowing whether peer has the block. eg we're more likely to send | |
-// a want to a peer that has the block than a peer that doesnt have the block | |
-// so BPHave > BPDontHave | |
-type BlockPresence int | |
- | |
-const ( | |
- BPDontHave BlockPresence = iota | |
- BPUnknown | |
- BPHave | |
-) | |
- | |
-// SessionWantsCanceller provides a method to cancel wants | |
-type SessionWantsCanceller interface { | |
- // Cancel wants for this session | |
- CancelSessionWants(sid uint64, wants []cid.Cid) | |
-} | |
- | |
-// update encapsulates a message received by the session | |
-type update struct { | |
- // Which peer sent the update | |
- from peer.ID | |
- // cids of blocks received | |
- ks []cid.Cid | |
- // HAVE message | |
- haves []cid.Cid | |
- // DONT_HAVE message | |
- dontHaves []cid.Cid | |
-} | |
- | |
-// peerAvailability indicates a peer's connection state | |
-type peerAvailability struct { | |
- target peer.ID | |
- available bool | |
-} | |
- | |
-// change can be new wants, a new message received by the session, | |
-// or a change in the connect status of a peer | |
-type change struct { | |
- // new wants requested | |
- add []cid.Cid | |
- // wants cancelled | |
- cancel []cid.Cid | |
- // new message received by session (blocks / HAVEs / DONT_HAVEs) | |
- update update | |
- // peer has connected / disconnected | |
- availability peerAvailability | |
-} | |
- | |
-type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) | |
-type onPeersExhaustedFn func([]cid.Cid) | |
- | |
-// sessionWantSender is responsible for sending want-have and want-block to | |
-// peers. For each want, it sends a single optimistic want-block request to | |
-// one peer and want-have requests to all other peers in the session. | |
-// To choose the best peer for the optimistic want-block it maintains a list | |
-// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and | |
-// consults the peer response tracker (records which peers sent us blocks). | |
-type sessionWantSender struct { | |
- // The context is used when sending wants | |
- ctx context.Context | |
- // Called to shutdown the sessionWantSender | |
- shutdown func() | |
- // The sessionWantSender uses the closed channel to signal when it's | |
- // finished shutting down | |
- closed chan struct{} | |
- // The session ID | |
- sessionID uint64 | |
- // A channel that collects incoming changes (events) | |
- changes chan change | |
- // Information about each want indexed by CID | |
- wants map[cid.Cid]*wantInfo | |
- // Keeps track of how many consecutive DONT_HAVEs a peer has sent | |
- peerConsecutiveDontHaves map[peer.ID]int | |
- // Tracks which peers we have send want-block to | |
- swbt *sentWantBlocksTracker | |
- // Tracks the number of blocks each peer sent us | |
- peerRspTrkr *peerResponseTracker | |
- // Sends wants to peers | |
- pm PeerManager | |
- // Keeps track of peers in the session | |
- spm SessionPeerManager | |
- // Cancels wants | |
- canceller SessionWantsCanceller | |
- // Keeps track of which peer has / doesn't have a block | |
- bpm *bsbpm.BlockPresenceManager | |
- // Called when wants are sent | |
- onSend onSendFn | |
- // Called when all peers explicitly don't have a block | |
- onPeersExhausted onPeersExhaustedFn | |
-} | |
- | |
-func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, | |
- bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { | |
- | |
- ctx, cancel := context.WithCancel(context.Background()) | |
- sws := sessionWantSender{ | |
- ctx: ctx, | |
- shutdown: cancel, | |
- closed: make(chan struct{}), | |
- sessionID: sid, | |
- changes: make(chan change, changesBufferSize), | |
- wants: make(map[cid.Cid]*wantInfo), | |
- peerConsecutiveDontHaves: make(map[peer.ID]int), | |
- swbt: newSentWantBlocksTracker(), | |
- peerRspTrkr: newPeerResponseTracker(), | |
- | |
- pm: pm, | |
- spm: spm, | |
- canceller: canceller, | |
- bpm: bpm, | |
- onSend: onSend, | |
- onPeersExhausted: onPeersExhausted, | |
- } | |
- | |
- return sws | |
-} | |
- | |
-func (sws *sessionWantSender) ID() uint64 { | |
- return sws.sessionID | |
-} | |
- | |
-// Add is called when new wants are added to the session | |
-func (sws *sessionWantSender) Add(ks []cid.Cid) { | |
- if len(ks) == 0 { | |
- return | |
- } | |
- sws.addChange(change{add: ks}) | |
-} | |
- | |
-// Cancel is called when a request is cancelled | |
-func (sws *sessionWantSender) Cancel(ks []cid.Cid) { | |
- if len(ks) == 0 { | |
- return | |
- } | |
- sws.addChange(change{cancel: ks}) | |
-} | |
- | |
-// Update is called when the session receives a message with incoming blocks | |
-// or HAVE / DONT_HAVE | |
-func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
- hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 | |
- if !hasUpdate { | |
- return | |
- } | |
- | |
- sws.addChange(change{ | |
- update: update{from, ks, haves, dontHaves}, | |
- }) | |
-} | |
- | |
-// SignalAvailability is called by the PeerManager to signal that a peer has | |
-// connected / disconnected | |
-func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { | |
- availability := peerAvailability{p, isAvailable} | |
- // Add the change in a non-blocking manner to avoid the possibility of a | |
- // deadlock | |
- sws.addChangeNonBlocking(change{availability: availability}) | |
-} | |
- | |
-// Run is the main loop for processing incoming changes | |
-func (sws *sessionWantSender) Run() { | |
- for { | |
- select { | |
- case ch := <-sws.changes: | |
- sws.onChange([]change{ch}) | |
- case <-sws.ctx.Done(): | |
- // Unregister the session with the PeerManager | |
- sws.pm.UnregisterSession(sws.sessionID) | |
- | |
- // Close the 'closed' channel to signal to Shutdown() that the run | |
- // loop has exited | |
- close(sws.closed) | |
- return | |
- } | |
- } | |
-} | |
- | |
-// Shutdown the sessionWantSender | |
-func (sws *sessionWantSender) Shutdown() { | |
- // Signal to the run loop to stop processing | |
- sws.shutdown() | |
- // Wait for run loop to complete | |
- <-sws.closed | |
-} | |
- | |
-// addChange adds a new change to the queue | |
-func (sws *sessionWantSender) addChange(c change) { | |
- select { | |
- case sws.changes <- c: | |
- case <-sws.ctx.Done(): | |
- } | |
-} | |
- | |
-// addChangeNonBlocking adds a new change to the queue, using a go-routine | |
-// if the change blocks, so as to avoid potential deadlocks | |
-func (sws *sessionWantSender) addChangeNonBlocking(c change) { | |
- select { | |
- case sws.changes <- c: | |
- default: | |
- // changes channel is full, so add change in a go routine instead | |
- go func() { | |
- select { | |
- case sws.changes <- c: | |
- case <-sws.ctx.Done(): | |
- } | |
- }() | |
- } | |
-} | |
- | |
-// collectChanges collects all the changes that have occurred since the last | |
-// invocation of onChange | |
-func (sws *sessionWantSender) collectChanges(changes []change) []change { | |
- for len(changes) < changesBufferSize { | |
- select { | |
- case next := <-sws.changes: | |
- changes = append(changes, next) | |
- default: | |
- return changes | |
- } | |
- } | |
- return changes | |
-} | |
- | |
-// onChange processes the next set of changes | |
-func (sws *sessionWantSender) onChange(changes []change) { | |
- // Several changes may have been recorded since the last time we checked, | |
- // so pop all outstanding changes from the channel | |
- changes = sws.collectChanges(changes) | |
- | |
- // Apply each change | |
- availability := make(map[peer.ID]bool, len(changes)) | |
- cancels := make([]cid.Cid, 0) | |
- var updates []update | |
- for _, chng := range changes { | |
- // Initialize info for new wants | |
- for _, c := range chng.add { | |
- sws.trackWant(c) | |
- } | |
- | |
- // Remove cancelled wants | |
- for _, c := range chng.cancel { | |
- sws.untrackWant(c) | |
- cancels = append(cancels, c) | |
- } | |
- | |
- // Consolidate updates and changes to availability | |
- if chng.update.from != "" { | |
- // If the update includes blocks or haves, treat it as signaling that | |
- // the peer is available | |
- if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { | |
- p := chng.update.from | |
- availability[p] = true | |
- | |
- // Register with the PeerManager | |
- sws.pm.RegisterSession(p, sws) | |
- } | |
- | |
- updates = append(updates, chng.update) | |
- } | |
- if chng.availability.target != "" { | |
- availability[chng.availability.target] = chng.availability.available | |
- } | |
- } | |
- | |
- // Update peer availability | |
- newlyAvailable, newlyUnavailable := sws.processAvailability(availability) | |
- | |
- // Update wants | |
- dontHaves := sws.processUpdates(updates) | |
- | |
- // Check if there are any wants for which all peers have indicated they | |
- // don't have the want | |
- sws.checkForExhaustedWants(dontHaves, newlyUnavailable) | |
- | |
- // If there are any cancels, send them | |
- if len(cancels) > 0 { | |
- sws.canceller.CancelSessionWants(sws.sessionID, cancels) | |
- } | |
- | |
- // If there are some connected peers, send any pending wants | |
- if sws.spm.HasPeers() { | |
- sws.sendNextWants(newlyAvailable) | |
- } | |
-} | |
- | |
-// processAvailability updates the want queue with any changes in | |
-// peer availability | |
-// It returns the peers that have become | |
-// - newly available | |
-// - newly unavailable | |
-func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { | |
- var newlyAvailable []peer.ID | |
- var newlyUnavailable []peer.ID | |
- for p, isNowAvailable := range availability { | |
- stateChange := false | |
- if isNowAvailable { | |
- isNewPeer := sws.spm.AddPeer(p) | |
- if isNewPeer { | |
- stateChange = true | |
- newlyAvailable = append(newlyAvailable, p) | |
- } | |
- } else { | |
- wasAvailable := sws.spm.RemovePeer(p) | |
- if wasAvailable { | |
- stateChange = true | |
- newlyUnavailable = append(newlyUnavailable, p) | |
- } | |
- } | |
- | |
- // If the state has changed | |
- if stateChange { | |
- sws.updateWantsPeerAvailability(p, isNowAvailable) | |
- // Reset the count of consecutive DONT_HAVEs received from the | |
- // peer | |
- delete(sws.peerConsecutiveDontHaves, p) | |
- } | |
- } | |
- | |
- return newlyAvailable, newlyUnavailable | |
-} | |
- | |
-// trackWant creates a new entry in the map of CID -> want info | |
-func (sws *sessionWantSender) trackWant(c cid.Cid) { | |
- if _, ok := sws.wants[c]; ok { | |
- return | |
- } | |
- | |
- // Create the want info | |
- wi := newWantInfo(sws.peerRspTrkr) | |
- sws.wants[c] = wi | |
- | |
- // For each available peer, register any information we know about | |
- // whether the peer has the block | |
- for _, p := range sws.spm.Peers() { | |
- sws.updateWantBlockPresence(c, p) | |
- } | |
-} | |
- | |
-// untrackWant removes an entry from the map of CID -> want info | |
-func (sws *sessionWantSender) untrackWant(c cid.Cid) { | |
- delete(sws.wants, c) | |
-} | |
- | |
-// processUpdates processes incoming blocks and HAVE / DONT_HAVEs. | |
-// It returns all DONT_HAVEs. | |
-func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { | |
- // Process received blocks keys | |
- blkCids := cid.NewSet() | |
- for _, upd := range updates { | |
- for _, c := range upd.ks { | |
- blkCids.Add(c) | |
- | |
- // Remove the want | |
- removed := sws.removeWant(c) | |
- if removed != nil { | |
- // Inform the peer tracker that this peer was the first to send | |
- // us the block | |
- sws.peerRspTrkr.receivedBlockFrom(upd.from) | |
- | |
- // Protect the connection to this peer so that we can ensure | |
- // that the connection doesn't get pruned by the connection | |
- // manager | |
- sws.spm.ProtectConnection(upd.from) | |
- } | |
- delete(sws.peerConsecutiveDontHaves, upd.from) | |
- } | |
- } | |
- | |
- // Process received DONT_HAVEs | |
- dontHaves := cid.NewSet() | |
- prunePeers := make(map[peer.ID]struct{}) | |
- for _, upd := range updates { | |
- for _, c := range upd.dontHaves { | |
- // Track the number of consecutive DONT_HAVEs each peer receives | |
- if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { | |
- prunePeers[upd.from] = struct{}{} | |
- } else { | |
- sws.peerConsecutiveDontHaves[upd.from]++ | |
- } | |
- | |
- // If we already received a block for the want, there's no need to | |
- // update block presence etc | |
- if blkCids.Has(c) { | |
- continue | |
- } | |
- | |
- dontHaves.Add(c) | |
- | |
- // Update the block presence for the peer | |
- sws.updateWantBlockPresence(c, upd.from) | |
- | |
- // Check if the DONT_HAVE is in response to a want-block | |
- // (could also be in response to want-have) | |
- if sws.swbt.haveSentWantBlockTo(upd.from, c) { | |
- // If we were waiting for a response from this peer, clear | |
- // sentTo so that we can send the want to another peer | |
- if sentTo, ok := sws.getWantSentTo(c); ok && sentTo == upd.from { | |
- sws.setWantSentTo(c, "") | |
- } | |
- } | |
- } | |
- } | |
- | |
- // Process received HAVEs | |
- for _, upd := range updates { | |
- for _, c := range upd.haves { | |
- // If we haven't already received a block for the want | |
- if !blkCids.Has(c) { | |
- // Update the block presence for the peer | |
- sws.updateWantBlockPresence(c, upd.from) | |
- } | |
- | |
- // Clear the consecutive DONT_HAVE count for the peer | |
- delete(sws.peerConsecutiveDontHaves, upd.from) | |
- delete(prunePeers, upd.from) | |
- } | |
- } | |
- | |
- // If any peers have sent us too many consecutive DONT_HAVEs, remove them | |
- // from the session | |
- for p := range prunePeers { | |
- // Before removing the peer from the session, check if the peer | |
- // sent us a HAVE for a block that we want | |
- for c := range sws.wants { | |
- if sws.bpm.PeerHasBlock(p, c) { | |
- delete(prunePeers, p) | |
- break | |
- } | |
- } | |
- } | |
- if len(prunePeers) > 0 { | |
- go func() { | |
- for p := range prunePeers { | |
- // Peer doesn't have anything we want, so remove it | |
- log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) | |
- sws.SignalAvailability(p, false) | |
- } | |
- }() | |
- } | |
- | |
- return dontHaves.Keys() | |
-} | |
- | |
-// checkForExhaustedWants checks if there are any wants for which all peers | |
-// have sent a DONT_HAVE. We call these "exhausted" wants. | |
-func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { | |
- // If there are no new DONT_HAVEs, and no peers became unavailable, then | |
- // we don't need to check for exhausted wants | |
- if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { | |
- return | |
- } | |
- | |
- // We need to check each want for which we just received a DONT_HAVE | |
- wants := dontHaves | |
- | |
- // If a peer just became unavailable, then we need to check all wants | |
- // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) | |
- if len(newlyUnavailable) > 0 { | |
- // Collect all pending wants | |
- wants = make([]cid.Cid, len(sws.wants)) | |
- for c := range sws.wants { | |
- wants = append(wants, c) | |
- } | |
- | |
- // If the last available peer in the session has become unavailable | |
- // then we need to broadcast all pending wants | |
- if !sws.spm.HasPeers() { | |
- sws.processExhaustedWants(wants) | |
- return | |
- } | |
- } | |
- | |
- // If all available peers for a cid sent a DONT_HAVE, signal to the session | |
- // that we've exhausted available peers | |
- if len(wants) > 0 { | |
- exhausted := sws.bpm.AllPeersDoNotHaveBlock(sws.spm.Peers(), wants) | |
- sws.processExhaustedWants(exhausted) | |
- } | |
-} | |
- | |
-// processExhaustedWants filters the list so that only those wants that haven't | |
-// already been marked as exhausted are passed to onPeersExhausted() | |
-func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { | |
- newlyExhausted := sws.newlyExhausted(exhausted) | |
- if len(newlyExhausted) > 0 { | |
- sws.onPeersExhausted(newlyExhausted) | |
- } | |
-} | |
- | |
-// convenience structs for passing around want-blocks and want-haves for a peer | |
-type wantSets struct { | |
- wantBlocks *cid.Set | |
- wantHaves *cid.Set | |
-} | |
- | |
-type allWants map[peer.ID]*wantSets | |
- | |
-func (aw allWants) forPeer(p peer.ID) *wantSets { | |
- if _, ok := aw[p]; !ok { | |
- aw[p] = &wantSets{ | |
- wantBlocks: cid.NewSet(), | |
- wantHaves: cid.NewSet(), | |
- } | |
- } | |
- return aw[p] | |
-} | |
- | |
-// sendNextWants sends wants to peers according to the latest information | |
-// about which peers have / dont have blocks | |
-func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { | |
- toSend := make(allWants) | |
- | |
- for c, wi := range sws.wants { | |
- // Ensure we send want-haves to any newly available peers | |
- for _, p := range newlyAvailable { | |
- toSend.forPeer(p).wantHaves.Add(c) | |
- } | |
- | |
- // We already sent a want-block to a peer and haven't yet received a | |
- // response yet | |
- if wi.sentTo != "" { | |
- continue | |
- } | |
- | |
- // All the peers have indicated that they don't have the block | |
- // corresponding to this want, so we must wait to discover more peers | |
- if wi.bestPeer == "" { | |
- // TODO: work this out in real time instead of using bestP? | |
- continue | |
- } | |
- | |
- // Record that we are sending a want-block for this want to the peer | |
- sws.setWantSentTo(c, wi.bestPeer) | |
- | |
- // Send a want-block to the chosen peer | |
- toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) | |
- | |
- // Send a want-have to each other peer | |
- for _, op := range sws.spm.Peers() { | |
- if op != wi.bestPeer { | |
- toSend.forPeer(op).wantHaves.Add(c) | |
- } | |
- } | |
- } | |
- | |
- // Send any wants we've collected | |
- sws.sendWants(toSend) | |
-} | |
- | |
-// sendWants sends want-have and want-blocks to the appropriate peers | |
-func (sws *sessionWantSender) sendWants(sends allWants) { | |
- // For each peer we're sending a request to | |
- for p, snd := range sends { | |
- // Piggyback some other want-haves onto the request to the peer | |
- for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { | |
- snd.wantHaves.Add(c) | |
- } | |
- | |
- // Send the wants to the peer. | |
- // Note that the PeerManager ensures that we don't sent duplicate | |
- // want-haves / want-blocks to a peer, and that want-blocks take | |
- // precedence over want-haves. | |
- wblks := snd.wantBlocks.Keys() | |
- whaves := snd.wantHaves.Keys() | |
- sws.pm.SendWants(sws.ctx, p, wblks, whaves) | |
- | |
- // Inform the session that we've sent the wants | |
- sws.onSend(p, wblks, whaves) | |
- | |
- // Record which peers we send want-block to | |
- sws.swbt.addSentWantBlocksTo(p, wblks) | |
- } | |
-} | |
- | |
-// getPiggybackWantHaves gets the want-haves that should be piggybacked onto | |
-// a request that we are making to send want-blocks to a peer | |
-func (sws *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { | |
- var whs []cid.Cid | |
- for c := range sws.wants { | |
- // Don't send want-have if we're already sending a want-block | |
- // (or have previously) | |
- if !wantBlocks.Has(c) && !sws.swbt.haveSentWantBlockTo(p, c) { | |
- whs = append(whs, c) | |
- } | |
- } | |
- return whs | |
-} | |
- | |
-// newlyExhausted filters the list of keys for wants that have not already | |
-// been marked as exhausted (all peers indicated they don't have the block) | |
-func (sws *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { | |
- var res []cid.Cid | |
- for _, c := range ks { | |
- if wi, ok := sws.wants[c]; ok { | |
- if !wi.exhausted { | |
- res = append(res, c) | |
- wi.exhausted = true | |
- } | |
- } | |
- } | |
- return res | |
-} | |
- | |
-// removeWant is called when the corresponding block is received | |
-func (sws *sessionWantSender) removeWant(c cid.Cid) *wantInfo { | |
- if wi, ok := sws.wants[c]; ok { | |
- delete(sws.wants, c) | |
- return wi | |
- } | |
- return nil | |
-} | |
- | |
-// updateWantsPeerAvailability is called when the availability changes for a | |
-// peer. It updates all the wants accordingly. | |
-func (sws *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { | |
- for c, wi := range sws.wants { | |
- if isNowAvailable { | |
- sws.updateWantBlockPresence(c, p) | |
- } else { | |
- wi.removePeer(p) | |
- } | |
- } | |
-} | |
- | |
-// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given | |
-// want / peer | |
-func (sws *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { | |
- wi, ok := sws.wants[c] | |
- if !ok { | |
- return | |
- } | |
- | |
- // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the | |
- // block presence for the peer / cid combination | |
- if sws.bpm.PeerHasBlock(p, c) { | |
- wi.setPeerBlockPresence(p, BPHave) | |
- } else if sws.bpm.PeerDoesNotHaveBlock(p, c) { | |
- wi.setPeerBlockPresence(p, BPDontHave) | |
- } else { | |
- wi.setPeerBlockPresence(p, BPUnknown) | |
- } | |
-} | |
- | |
-// Which peer was the want sent to | |
-func (sws *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { | |
- if wi, ok := sws.wants[c]; ok { | |
- return wi.sentTo, true | |
- } | |
- return "", false | |
-} | |
- | |
-// Record which peer the want was sent to | |
-func (sws *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { | |
- if wi, ok := sws.wants[c]; ok { | |
- wi.sentTo = p | |
- } | |
-} | |
- | |
-// wantInfo keeps track of the information for a want | |
-type wantInfo struct { | |
- // Tracks HAVE / DONT_HAVE sent to us for the want by each peer | |
- blockPresence map[peer.ID]BlockPresence | |
- // The peer that we've sent a want-block to (cleared when we get a response) | |
- sentTo peer.ID | |
- // The "best" peer to send the want to next | |
- bestPeer peer.ID | |
- // Keeps track of how many hits / misses each peer has sent us for wants | |
- // in the session | |
- peerRspTrkr *peerResponseTracker | |
- // true if all known peers have sent a DONT_HAVE for this want | |
- exhausted bool | |
-} | |
- | |
-// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { | |
-func newWantInfo(prt *peerResponseTracker) *wantInfo { | |
- return &wantInfo{ | |
- blockPresence: make(map[peer.ID]BlockPresence), | |
- peerRspTrkr: prt, | |
- exhausted: false, | |
- } | |
-} | |
- | |
-// setPeerBlockPresence sets the block presence for the given peer | |
-func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { | |
- wi.blockPresence[p] = bp | |
- wi.calculateBestPeer() | |
- | |
- // If a peer informed us that it has a block then make sure the want is no | |
- // longer flagged as exhausted (exhausted means no peers have the block) | |
- if bp == BPHave { | |
- wi.exhausted = false | |
- } | |
-} | |
- | |
-// removePeer deletes the given peer from the want info | |
-func (wi *wantInfo) removePeer(p peer.ID) { | |
- // If we were waiting to hear back from the peer that is being removed, | |
- // clear the sentTo field so we no longer wait | |
- if p == wi.sentTo { | |
- wi.sentTo = "" | |
- } | |
- delete(wi.blockPresence, p) | |
- wi.calculateBestPeer() | |
-} | |
- | |
-// calculateBestPeer finds the best peer to send the want to next | |
-func (wi *wantInfo) calculateBestPeer() { | |
- // Recalculate the best peer | |
- bestBP := BPDontHave | |
- bestPeer := peer.ID("") | |
- | |
- // Find the peer with the best block presence, recording how many peers | |
- // share the block presence | |
- countWithBest := 0 | |
- for p, bp := range wi.blockPresence { | |
- if bp > bestBP { | |
- bestBP = bp | |
- bestPeer = p | |
- countWithBest = 1 | |
- } else if bp == bestBP { | |
- countWithBest++ | |
- } | |
- } | |
- wi.bestPeer = bestPeer | |
- | |
- // If no peer has a block presence better than DONT_HAVE, bail out | |
- if bestPeer == "" { | |
- return | |
- } | |
- | |
- // If there was only one peer with the best block presence, we're done | |
- if countWithBest <= 1 { | |
- return | |
- } | |
- | |
- // There were multiple peers with the best block presence, so choose one of | |
- // them to be the best | |
- var peersWithBest []peer.ID | |
- for p, bp := range wi.blockPresence { | |
- if bp == bestBP { | |
- peersWithBest = append(peersWithBest, p) | |
- } | |
- } | |
- wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwants.go a/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwants.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwants.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/session/sessionwants.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,193 +0,0 @@ | |
-package session | |
- | |
-import ( | |
- "fmt" | |
- "math/rand" | |
- "time" | |
- | |
- cid "github.com/ipfs/go-cid" | |
-) | |
- | |
-// liveWantsOrder and liveWants will get out of sync as blocks are received. | |
-// This constant is the maximum amount to allow them to be out of sync before | |
-// cleaning up the ordering array. | |
-const liveWantsOrderGCLimit = 32 | |
- | |
-// sessionWants keeps track of which cids are waiting to be sent out, and which | |
-// peers are "live" - ie, we've sent a request but haven't received a block yet | |
-type sessionWants struct { | |
- // The wants that have not yet been sent out | |
- toFetch *cidQueue | |
- // Wants that have been sent but have not received a response | |
- liveWants map[cid.Cid]time.Time | |
- // The order in which wants were requested | |
- liveWantsOrder []cid.Cid | |
- // The maximum number of want-haves to send in a broadcast | |
- broadcastLimit int | |
-} | |
- | |
-func newSessionWants(broadcastLimit int) sessionWants { | |
- return sessionWants{ | |
- toFetch: newCidQueue(), | |
- liveWants: make(map[cid.Cid]time.Time), | |
- broadcastLimit: broadcastLimit, | |
- } | |
-} | |
- | |
-func (sw *sessionWants) String() string { | |
- return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) | |
-} | |
- | |
-// BlocksRequested is called when the client makes a request for blocks | |
-func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { | |
- for _, k := range newWants { | |
- sw.toFetch.Push(k) | |
- } | |
-} | |
- | |
-// GetNextWants is called when the session has not yet discovered peers with | |
-// the blocks that it wants. It moves as many CIDs from the fetch queue to | |
-// the live wants queue as possible (given the broadcast limit). | |
-// Returns the newly live wants. | |
-func (sw *sessionWants) GetNextWants() []cid.Cid { | |
- now := time.Now() | |
- | |
- // Move CIDs from fetch queue to the live wants queue (up to the broadcast | |
- // limit) | |
- currentLiveCount := len(sw.liveWants) | |
- toAdd := sw.broadcastLimit - currentLiveCount | |
- | |
- var live []cid.Cid | |
- for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { | |
- c := sw.toFetch.Pop() | |
- live = append(live, c) | |
- sw.liveWantsOrder = append(sw.liveWantsOrder, c) | |
- sw.liveWants[c] = now | |
- } | |
- | |
- return live | |
-} | |
- | |
-// WantsSent is called when wants are sent to a peer | |
-func (sw *sessionWants) WantsSent(ks []cid.Cid) { | |
- now := time.Now() | |
- for _, c := range ks { | |
- if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { | |
- sw.toFetch.Remove(c) | |
- sw.liveWantsOrder = append(sw.liveWantsOrder, c) | |
- sw.liveWants[c] = now | |
- } | |
- } | |
-} | |
- | |
-// BlocksReceived removes received block CIDs from the live wants list and | |
-// measures latency. It returns the CIDs of blocks that were actually | |
-// wanted (as opposed to duplicates) and the total latency for all incoming blocks. | |
-func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { | |
- wanted := make([]cid.Cid, 0, len(ks)) | |
- totalLatency := time.Duration(0) | |
- if len(ks) == 0 { | |
- return wanted, totalLatency | |
- } | |
- | |
- // Filter for blocks that were actually wanted (as opposed to duplicates) | |
- now := time.Now() | |
- for _, c := range ks { | |
- if sw.isWanted(c) { | |
- wanted = append(wanted, c) | |
- | |
- // Measure latency | |
- sentAt, ok := sw.liveWants[c] | |
- if ok && !sentAt.IsZero() { | |
- totalLatency += now.Sub(sentAt) | |
- } | |
- | |
- // Remove the CID from the live wants / toFetch queue | |
- delete(sw.liveWants, c) | |
- sw.toFetch.Remove(c) | |
- } | |
- } | |
- | |
- // If the live wants ordering array is a long way out of sync with the | |
- // live wants map, clean up the ordering array | |
- if len(sw.liveWantsOrder)-len(sw.liveWants) > liveWantsOrderGCLimit { | |
- cleaned := sw.liveWantsOrder[:0] | |
- for _, c := range sw.liveWantsOrder { | |
- if _, ok := sw.liveWants[c]; ok { | |
- cleaned = append(cleaned, c) | |
- } | |
- } | |
- sw.liveWantsOrder = cleaned | |
- } | |
- | |
- return wanted, totalLatency | |
-} | |
- | |
-// PrepareBroadcast saves the current time for each live want and returns the | |
-// live want CIDs up to the broadcast limit. | |
-func (sw *sessionWants) PrepareBroadcast() []cid.Cid { | |
- now := time.Now() | |
- live := make([]cid.Cid, 0, len(sw.liveWants)) | |
- for _, c := range sw.liveWantsOrder { | |
- if _, ok := sw.liveWants[c]; ok { | |
- // No response was received for the want, so reset the sent time | |
- // to now as we're about to broadcast | |
- sw.liveWants[c] = now | |
- | |
- live = append(live, c) | |
- if len(live) == sw.broadcastLimit { | |
- break | |
- } | |
- } | |
- } | |
- | |
- return live | |
-} | |
- | |
-// CancelPending removes the given CIDs from the fetch queue. | |
-func (sw *sessionWants) CancelPending(keys []cid.Cid) { | |
- for _, k := range keys { | |
- sw.toFetch.Remove(k) | |
- } | |
-} | |
- | |
-// LiveWants returns a list of live wants | |
-func (sw *sessionWants) LiveWants() []cid.Cid { | |
- live := make([]cid.Cid, 0, len(sw.liveWants)) | |
- for c := range sw.liveWants { | |
- live = append(live, c) | |
- } | |
- | |
- return live | |
-} | |
- | |
-// RandomLiveWant returns a randomly selected live want | |
-func (sw *sessionWants) RandomLiveWant() cid.Cid { | |
- if len(sw.liveWants) == 0 { | |
- return cid.Cid{} | |
- } | |
- | |
- // picking a random live want | |
- i := rand.Intn(len(sw.liveWants)) | |
- for k := range sw.liveWants { | |
- if i == 0 { | |
- return k | |
- } | |
- i-- | |
- } | |
- return cid.Cid{} | |
-} | |
- | |
-// Has live wants indicates if there are any live wants | |
-func (sw *sessionWants) HasLiveWants() bool { | |
- return len(sw.liveWants) > 0 | |
-} | |
- | |
-// Indicates whether the want is in either of the fetch or live queues | |
-func (sw *sessionWants) isWanted(c cid.Cid) bool { | |
- _, ok := sw.liveWants[c] | |
- if !ok { | |
- ok = sw.toFetch.Has(c) | |
- } | |
- return ok | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,201 +0,0 @@ | |
-package sessioninterestmanager | |
- | |
-import ( | |
- "sync" | |
- | |
- blocks "github.com/ipfs/go-block-format" | |
- | |
- cid "github.com/ipfs/go-cid" | |
-) | |
- | |
-// SessionInterestManager records the CIDs that each session is interested in. | |
-type SessionInterestManager struct { | |
- lk sync.RWMutex | |
- wants map[cid.Cid]map[uint64]bool | |
-} | |
- | |
-// New initializes a new SessionInterestManager. | |
-func New() *SessionInterestManager { | |
- return &SessionInterestManager{ | |
- // Map of cids -> sessions -> bool | |
- // | |
- // The boolean indicates whether the session still wants the block | |
- // or is just interested in receiving messages about it. | |
- // | |
- // Note that once the block is received the session no longer wants | |
- // the block, but still wants to receive messages from peers who have | |
- // the block as they may have other blocks the session is interested in. | |
- wants: make(map[cid.Cid]map[uint64]bool), | |
- } | |
-} | |
- | |
-// When the client asks the session for blocks, the session calls | |
-// RecordSessionInterest() with those cids. | |
-func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { | |
- sim.lk.Lock() | |
- defer sim.lk.Unlock() | |
- | |
- // For each key | |
- for _, c := range ks { | |
- // Record that the session wants the blocks | |
- if want, ok := sim.wants[c]; ok { | |
- want[ses] = true | |
- } else { | |
- sim.wants[c] = map[uint64]bool{ses: true} | |
- } | |
- } | |
-} | |
- | |
-// When the session shuts down it calls RemoveSessionInterest(). | |
-// Returns the keys that no session is interested in any more. | |
-func (sim *SessionInterestManager) RemoveSession(ses uint64) []cid.Cid { | |
- sim.lk.Lock() | |
- defer sim.lk.Unlock() | |
- | |
- // The keys that no session is interested in | |
- deletedKs := make([]cid.Cid, 0) | |
- | |
- // For each known key | |
- for c := range sim.wants { | |
- // Remove the session from the list of sessions that want the key | |
- delete(sim.wants[c], ses) | |
- | |
- // If there are no more sessions that want the key | |
- if len(sim.wants[c]) == 0 { | |
- // Clean up the list memory | |
- delete(sim.wants, c) | |
- // Add the key to the list of keys that no session is interested in | |
- deletedKs = append(deletedKs, c) | |
- } | |
- } | |
- | |
- return deletedKs | |
-} | |
- | |
-// When the session receives blocks, it calls RemoveSessionWants(). | |
-func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, ks []cid.Cid) { | |
- sim.lk.Lock() | |
- defer sim.lk.Unlock() | |
- | |
- // For each key | |
- for _, c := range ks { | |
- // If the session wanted the block | |
- if wanted, ok := sim.wants[c][ses]; ok && wanted { | |
- // Mark the block as unwanted | |
- sim.wants[c][ses] = false | |
- } | |
- } | |
-} | |
- | |
-// When a request is cancelled, the session calls RemoveSessionInterested(). | |
-// Returns the keys that no session is interested in any more. | |
-func (sim *SessionInterestManager) RemoveSessionInterested(ses uint64, ks []cid.Cid) []cid.Cid { | |
- sim.lk.Lock() | |
- defer sim.lk.Unlock() | |
- | |
- // The keys that no session is interested in | |
- deletedKs := make([]cid.Cid, 0, len(ks)) | |
- | |
- // For each key | |
- for _, c := range ks { | |
- // If there is a list of sessions that want the key | |
- if _, ok := sim.wants[c]; ok { | |
- // Remove the session from the list of sessions that want the key | |
- delete(sim.wants[c], ses) | |
- | |
- // If there are no more sessions that want the key | |
- if len(sim.wants[c]) == 0 { | |
- // Clean up the list memory | |
- delete(sim.wants, c) | |
- // Add the key to the list of keys that no session is interested in | |
- deletedKs = append(deletedKs, c) | |
- } | |
- } | |
- } | |
- | |
- return deletedKs | |
-} | |
- | |
-// The session calls FilterSessionInterested() to filter the sets of keys for | |
-// those that the session is interested in | |
-func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { | |
- sim.lk.RLock() | |
- defer sim.lk.RUnlock() | |
- | |
- // For each set of keys | |
- kres := make([][]cid.Cid, len(ksets)) | |
- for i, ks := range ksets { | |
- // The set of keys that at least one session is interested in | |
- has := make([]cid.Cid, 0, len(ks)) | |
- | |
- // For each key in the list | |
- for _, c := range ks { | |
- // If there is a session that's interested, add the key to the set | |
- if _, ok := sim.wants[c][ses]; ok { | |
- has = append(has, c) | |
- } | |
- } | |
- kres[i] = has | |
- } | |
- return kres | |
-} | |
- | |
-// When bitswap receives blocks it calls SplitWantedUnwanted() to discard | |
-// unwanted blocks | |
-func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { | |
- sim.lk.RLock() | |
- defer sim.lk.RUnlock() | |
- | |
- // Get the wanted block keys as a set | |
- wantedKs := cid.NewSet() | |
- for _, b := range blks { | |
- c := b.Cid() | |
- // For each session that is interested in the key | |
- for ses := range sim.wants[c] { | |
- // If the session wants the key (rather than just being interested) | |
- if wanted, ok := sim.wants[c][ses]; ok && wanted { | |
- // Add the key to the set | |
- wantedKs.Add(c) | |
- } | |
- } | |
- } | |
- | |
- // Separate the blocks into wanted and unwanted | |
- wantedBlks := make([]blocks.Block, 0, len(blks)) | |
- notWantedBlks := make([]blocks.Block, 0) | |
- for _, b := range blks { | |
- if wantedKs.Has(b.Cid()) { | |
- wantedBlks = append(wantedBlks, b) | |
- } else { | |
- notWantedBlks = append(notWantedBlks, b) | |
- } | |
- } | |
- return wantedBlks, notWantedBlks | |
-} | |
- | |
-// When the SessionManager receives a message it calls InterestedSessions() to | |
-// find out which sessions are interested in the message. | |
-func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { | |
- sim.lk.RLock() | |
- defer sim.lk.RUnlock() | |
- | |
- ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) | |
- ks = append(ks, blks...) | |
- ks = append(ks, haves...) | |
- ks = append(ks, dontHaves...) | |
- | |
- // Create a set of sessions that are interested in the keys | |
- sesSet := make(map[uint64]struct{}) | |
- for _, c := range ks { | |
- for s := range sim.wants[c] { | |
- sesSet[s] = struct{}{} | |
- } | |
- } | |
- | |
- // Convert the set into a list | |
- ses := make([]uint64, 0, len(sesSet)) | |
- for s := range sesSet { | |
- ses = append(ses, s) | |
- } | |
- return ses | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/sessionmanager/sessionmanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/sessionmanager/sessionmanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/sessionmanager/sessionmanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/sessionmanager/sessionmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,196 +0,0 @@ | |
-package sessionmanager | |
- | |
-import ( | |
- "context" | |
- "strconv" | |
- "sync" | |
- "time" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- delay "github.com/ipfs/go-ipfs-delay" | |
- "go.opentelemetry.io/otel/attribute" | |
- "go.opentelemetry.io/otel/trace" | |
- | |
- "github.com/ipfs/go-bitswap/client/internal" | |
- bsbpm "github.com/ipfs/go-bitswap/client/internal/blockpresencemanager" | |
- notifications "github.com/ipfs/go-bitswap/client/internal/notifications" | |
- bssession "github.com/ipfs/go-bitswap/client/internal/session" | |
- bssim "github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager" | |
- exchange "github.com/ipfs/go-ipfs-exchange-interface" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// Session is a session that is managed by the session manager | |
-type Session interface { | |
- exchange.Fetcher | |
- ID() uint64 | |
- ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) | |
- Shutdown() | |
-} | |
- | |
-// SessionFactory generates a new session for the SessionManager to track. | |
-type SessionFactory func( | |
- ctx context.Context, | |
- sm bssession.SessionManager, | |
- id uint64, | |
- sprm bssession.SessionPeerManager, | |
- sim *bssim.SessionInterestManager, | |
- pm bssession.PeerManager, | |
- bpm *bsbpm.BlockPresenceManager, | |
- notif notifications.PubSub, | |
- provSearchDelay time.Duration, | |
- rebroadcastDelay delay.D, | |
- self peer.ID) Session | |
- | |
-// PeerManagerFactory generates a new peer manager for a session. | |
-type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager | |
- | |
-// SessionManager is responsible for creating, managing, and dispatching to | |
-// sessions. | |
-type SessionManager struct { | |
- ctx context.Context | |
- sessionFactory SessionFactory | |
- sessionInterestManager *bssim.SessionInterestManager | |
- peerManagerFactory PeerManagerFactory | |
- blockPresenceManager *bsbpm.BlockPresenceManager | |
- peerManager bssession.PeerManager | |
- notif notifications.PubSub | |
- | |
- // Sessions | |
- sessLk sync.RWMutex | |
- sessions map[uint64]Session | |
- | |
- // Session Index | |
- sessIDLk sync.Mutex | |
- sessID uint64 | |
- | |
- self peer.ID | |
-} | |
- | |
-// New creates a new SessionManager. | |
-func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, | |
- blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { | |
- | |
- return &SessionManager{ | |
- ctx: ctx, | |
- sessionFactory: sessionFactory, | |
- sessionInterestManager: sessionInterestManager, | |
- peerManagerFactory: peerManagerFactory, | |
- blockPresenceManager: blockPresenceManager, | |
- peerManager: peerManager, | |
- notif: notif, | |
- sessions: make(map[uint64]Session), | |
- self: self, | |
- } | |
-} | |
- | |
-// NewSession initializes a session with the given context, and adds to the | |
-// session manager. | |
-func (sm *SessionManager) NewSession(ctx context.Context, | |
- provSearchDelay time.Duration, | |
- rebroadcastDelay delay.D) exchange.Fetcher { | |
- id := sm.GetNextSessionID() | |
- | |
- ctx, span := internal.StartSpan(ctx, "SessionManager.NewSession", trace.WithAttributes(attribute.String("ID", strconv.FormatUint(id, 10)))) | |
- defer span.End() | |
- | |
- pm := sm.peerManagerFactory(ctx, id) | |
- session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) | |
- | |
- sm.sessLk.Lock() | |
- if sm.sessions != nil { // check if SessionManager was shutdown | |
- sm.sessions[id] = session | |
- } | |
- sm.sessLk.Unlock() | |
- | |
- return session | |
-} | |
- | |
-func (sm *SessionManager) Shutdown() { | |
- sm.sessLk.Lock() | |
- | |
- sessions := make([]Session, 0, len(sm.sessions)) | |
- for _, ses := range sm.sessions { | |
- sessions = append(sessions, ses) | |
- } | |
- | |
- // Ensure that if Shutdown() is called twice we only shut down | |
- // the sessions once | |
- sm.sessions = nil | |
- | |
- sm.sessLk.Unlock() | |
- | |
- for _, ses := range sessions { | |
- ses.Shutdown() | |
- } | |
-} | |
- | |
-func (sm *SessionManager) RemoveSession(sesid uint64) { | |
- // Remove session from SessionInterestManager - returns the keys that no | |
- // session is interested in anymore. | |
- cancelKs := sm.sessionInterestManager.RemoveSession(sesid) | |
- | |
- // Cancel keys that no session is interested in anymore | |
- sm.cancelWants(cancelKs) | |
- | |
- sm.sessLk.Lock() | |
- defer sm.sessLk.Unlock() | |
- | |
- // Clean up session | |
- if sm.sessions != nil { // check if SessionManager was shutdown | |
- delete(sm.sessions, sesid) | |
- } | |
-} | |
- | |
-// GetNextSessionID returns the next sequential identifier for a session. | |
-func (sm *SessionManager) GetNextSessionID() uint64 { | |
- sm.sessIDLk.Lock() | |
- defer sm.sessIDLk.Unlock() | |
- | |
- sm.sessID++ | |
- return sm.sessID | |
-} | |
- | |
-// ReceiveFrom is called when a new message is received | |
-func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
- // Record block presence for HAVE / DONT_HAVE | |
- sm.blockPresenceManager.ReceiveFrom(p, haves, dontHaves) | |
- | |
- // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs | |
- for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { | |
- sm.sessLk.RLock() | |
- if sm.sessions == nil { // check if SessionManager was shutdown | |
- sm.sessLk.RUnlock() | |
- return | |
- } | |
- sess, ok := sm.sessions[id] | |
- sm.sessLk.RUnlock() | |
- | |
- if ok { | |
- sess.ReceiveFrom(p, blks, haves, dontHaves) | |
- } | |
- } | |
- | |
- // Send CANCEL to all peers with want-have / want-block | |
- sm.peerManager.SendCancels(ctx, blks) | |
-} | |
- | |
-// CancelSessionWants is called when a session cancels wants because a call to | |
-// GetBlocks() is cancelled | |
-func (sm *SessionManager) CancelSessionWants(sesid uint64, wants []cid.Cid) { | |
- // Remove session's interest in the given blocks - returns the keys that no | |
- // session is interested in anymore. | |
- cancelKs := sm.sessionInterestManager.RemoveSessionInterested(sesid, wants) | |
- sm.cancelWants(cancelKs) | |
-} | |
- | |
-func (sm *SessionManager) cancelWants(wants []cid.Cid) { | |
- // Free up block presence tracking for keys that no session is interested | |
- // in anymore | |
- sm.blockPresenceManager.RemoveKeys(wants) | |
- | |
- // Send CANCEL to all peers for blocks that no session is interested in | |
- // anymore. | |
- // Note: use bitswap context because session context may already be Done. | |
- sm.peerManager.SendCancels(sm.ctx, wants) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/sessionpeermanager/sessionpeermanager.go a/vendor/github.com/ipfs/go-bitswap/client/internal/sessionpeermanager/sessionpeermanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/sessionpeermanager/sessionpeermanager.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/sessionpeermanager/sessionpeermanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,150 +0,0 @@ | |
-package sessionpeermanager | |
- | |
-import ( | |
- "fmt" | |
- "sync" | |
- | |
- logging "github.com/ipfs/go-log" | |
- | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-var log = logging.Logger("bs:sprmgr") | |
- | |
-const ( | |
- // Connection Manager tag value for session peers. Indicates to connection | |
- // manager that it should keep the connection to the peer. | |
- sessionPeerTagValue = 5 | |
-) | |
- | |
-// PeerTagger is an interface for tagging peers with metadata | |
-type PeerTagger interface { | |
- TagPeer(peer.ID, string, int) | |
- UntagPeer(p peer.ID, tag string) | |
- Protect(peer.ID, string) | |
- Unprotect(peer.ID, string) bool | |
-} | |
- | |
-// SessionPeerManager keeps track of peers for a session, and takes care of | |
-// ConnectionManager tagging. | |
-type SessionPeerManager struct { | |
- tagger PeerTagger | |
- tag string | |
- | |
- id uint64 | |
- plk sync.RWMutex | |
- peers map[peer.ID]struct{} | |
- peersDiscovered bool | |
-} | |
- | |
-// New creates a new SessionPeerManager | |
-func New(id uint64, tagger PeerTagger) *SessionPeerManager { | |
- return &SessionPeerManager{ | |
- id: id, | |
- tag: fmt.Sprint("bs-ses-", id), | |
- tagger: tagger, | |
- peers: make(map[peer.ID]struct{}), | |
- } | |
-} | |
- | |
-// AddPeer adds the peer to the SessionPeerManager. | |
-// Returns true if the peer is a new peer, false if it already existed. | |
-func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { | |
- spm.plk.Lock() | |
- defer spm.plk.Unlock() | |
- | |
- // Check if the peer is a new peer | |
- if _, ok := spm.peers[p]; ok { | |
- return false | |
- } | |
- | |
- spm.peers[p] = struct{}{} | |
- spm.peersDiscovered = true | |
- | |
- // Tag the peer with the ConnectionManager so it doesn't discard the | |
- // connection | |
- spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) | |
- | |
- log.Debugw("Bitswap: Added peer to session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) | |
- return true | |
-} | |
- | |
-// Protect connection to this peer from being pruned by the connection manager | |
-func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { | |
- spm.plk.Lock() | |
- defer spm.plk.Unlock() | |
- | |
- if _, ok := spm.peers[p]; !ok { | |
- return | |
- } | |
- | |
- spm.tagger.Protect(p, spm.tag) | |
-} | |
- | |
-// RemovePeer removes the peer from the SessionPeerManager. | |
-// Returns true if the peer was removed, false if it did not exist. | |
-func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { | |
- spm.plk.Lock() | |
- defer spm.plk.Unlock() | |
- | |
- if _, ok := spm.peers[p]; !ok { | |
- return false | |
- } | |
- | |
- delete(spm.peers, p) | |
- spm.tagger.UntagPeer(p, spm.tag) | |
- spm.tagger.Unprotect(p, spm.tag) | |
- | |
- log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) | |
- return true | |
-} | |
- | |
-// PeersDiscovered indicates whether peers have been discovered yet. | |
-// Returns true once a peer has been discovered by the session (even if all | |
-// peers are later removed from the session). | |
-func (spm *SessionPeerManager) PeersDiscovered() bool { | |
- spm.plk.RLock() | |
- defer spm.plk.RUnlock() | |
- | |
- return spm.peersDiscovered | |
-} | |
- | |
-func (spm *SessionPeerManager) Peers() []peer.ID { | |
- spm.plk.RLock() | |
- defer spm.plk.RUnlock() | |
- | |
- peers := make([]peer.ID, 0, len(spm.peers)) | |
- for p := range spm.peers { | |
- peers = append(peers, p) | |
- } | |
- | |
- return peers | |
-} | |
- | |
-func (spm *SessionPeerManager) HasPeers() bool { | |
- spm.plk.RLock() | |
- defer spm.plk.RUnlock() | |
- | |
- return len(spm.peers) > 0 | |
-} | |
- | |
-func (spm *SessionPeerManager) HasPeer(p peer.ID) bool { | |
- spm.plk.RLock() | |
- defer spm.plk.RUnlock() | |
- | |
- _, ok := spm.peers[p] | |
- return ok | |
-} | |
- | |
-// Shutdown untags all the peers | |
-func (spm *SessionPeerManager) Shutdown() { | |
- spm.plk.Lock() | |
- defer spm.plk.Unlock() | |
- | |
- // Untag the peers with the ConnectionManager so that it can release | |
- // connections to those peers | |
- for p := range spm.peers { | |
- spm.tagger.UntagPeer(p, spm.tag) | |
- spm.tagger.Unprotect(p, spm.tag) | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/internal/tracing.go a/vendor/github.com/ipfs/go-bitswap/client/internal/tracing.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/internal/tracing.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/internal/tracing.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,13 +0,0 @@ | |
-package internal | |
- | |
-import ( | |
- "context" | |
- "fmt" | |
- | |
- "go.opentelemetry.io/otel" | |
- "go.opentelemetry.io/otel/trace" | |
-) | |
- | |
-func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { | |
- return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/stat.go a/vendor/github.com/ipfs/go-bitswap/client/stat.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/stat.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/stat.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,30 +0,0 @@ | |
-package client | |
- | |
-import ( | |
- cid "github.com/ipfs/go-cid" | |
-) | |
- | |
-// Stat is a struct that provides various statistics on bitswap operations | |
-type Stat struct { | |
- Wantlist []cid.Cid | |
- BlocksReceived uint64 | |
- DataReceived uint64 | |
- DupBlksReceived uint64 | |
- DupDataReceived uint64 | |
- MessagesReceived uint64 | |
-} | |
- | |
-// Stat returns aggregated statistics about bitswap operations | |
-func (bs *Client) Stat() (st Stat, err error) { | |
- bs.counterLk.Lock() | |
- c := bs.counters | |
- st.BlocksReceived = c.blocksRecvd | |
- st.DupBlksReceived = c.dupBlocksRecvd | |
- st.DupDataReceived = c.dupDataRecvd | |
- st.DataReceived = c.dataRecvd | |
- st.MessagesReceived = c.messagesRecvd | |
- bs.counterLk.Unlock() | |
- st.Wantlist = bs.GetWantlist() | |
- | |
- return st, nil | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/client/wantlist/wantlist.go a/vendor/github.com/ipfs/go-bitswap/client/wantlist/wantlist.go | |
--- b/vendor/github.com/ipfs/go-bitswap/client/wantlist/wantlist.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/client/wantlist/wantlist.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,142 +0,0 @@ | |
-// Package wantlist implements an object for bitswap that contains the keys | |
-// that a given peer wants. | |
-package wantlist | |
- | |
-import ( | |
- "sort" | |
- | |
- pb "github.com/ipfs/go-bitswap/message/pb" | |
- | |
- cid "github.com/ipfs/go-cid" | |
-) | |
- | |
-// Wantlist is a raw list of wanted blocks and their priorities | |
-type Wantlist struct { | |
- set map[cid.Cid]Entry | |
- | |
- // Re-computing this can get expensive so we memoize it. | |
- cached []Entry | |
-} | |
- | |
-// Entry is an entry in a want list, consisting of a cid and its priority | |
-type Entry struct { | |
- Cid cid.Cid | |
- Priority int32 | |
- WantType pb.Message_Wantlist_WantType | |
-} | |
- | |
-// NewRefEntry creates a new reference tracked wantlist entry. | |
-func NewRefEntry(c cid.Cid, p int32) Entry { | |
- return Entry{ | |
- Cid: c, | |
- Priority: p, | |
- WantType: pb.Message_Wantlist_Block, | |
- } | |
-} | |
- | |
-type entrySlice []Entry | |
- | |
-func (es entrySlice) Len() int { return len(es) } | |
-func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } | |
-func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } | |
- | |
-// New generates a new raw Wantlist | |
-func New() *Wantlist { | |
- return &Wantlist{ | |
- set: make(map[cid.Cid]Entry), | |
- } | |
-} | |
- | |
-// Len returns the number of entries in a wantlist. | |
-func (w *Wantlist) Len() int { | |
- return len(w.set) | |
-} | |
- | |
-// Add adds an entry in a wantlist from CID & Priority, if not already present. | |
-func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) bool { | |
- e, ok := w.set[c] | |
- | |
- // Adding want-have should not override want-block | |
- if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { | |
- return false | |
- } | |
- | |
- w.put(c, Entry{ | |
- Cid: c, | |
- Priority: priority, | |
- WantType: wantType, | |
- }) | |
- | |
- return true | |
-} | |
- | |
-// Remove removes the given cid from the wantlist. | |
-func (w *Wantlist) Remove(c cid.Cid) bool { | |
- _, ok := w.set[c] | |
- if !ok { | |
- return false | |
- } | |
- | |
- w.delete(c) | |
- return true | |
-} | |
- | |
-// Remove removes the given cid from the wantlist, respecting the type: | |
-// Remove with want-have will not remove an existing want-block. | |
-func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { | |
- e, ok := w.set[c] | |
- if !ok { | |
- return false | |
- } | |
- | |
- // Removing want-have should not remove want-block | |
- if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { | |
- return false | |
- } | |
- | |
- w.delete(c) | |
- return true | |
-} | |
- | |
-func (w *Wantlist) delete(c cid.Cid) { | |
- delete(w.set, c) | |
- w.cached = nil | |
-} | |
- | |
-func (w *Wantlist) put(c cid.Cid, e Entry) { | |
- w.cached = nil | |
- w.set[c] = e | |
-} | |
- | |
-// Contains returns the entry, if present, for the given CID, plus whether it | |
-// was present. | |
-func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { | |
- e, ok := w.set[c] | |
- return e, ok | |
-} | |
- | |
-// Entries returns all wantlist entries for a want list, sorted by priority. | |
-// | |
-// DO NOT MODIFY. The returned list is cached. | |
-func (w *Wantlist) Entries() []Entry { | |
- if w.cached != nil { | |
- return w.cached | |
- } | |
- es := make([]Entry, 0, len(w.set)) | |
- for _, e := range w.set { | |
- es = append(es, e) | |
- } | |
- sort.Sort(entrySlice(es)) | |
- w.cached = es | |
- return es[0:len(es):len(es)] | |
-} | |
- | |
-// Absorb all the entries in other into this want list | |
-func (w *Wantlist) Absorb(other *Wantlist) { | |
- // Invalidate the cache up-front to avoid doing any work trying to keep it up-to-date. | |
- w.cached = nil | |
- | |
- for _, e := range other.Entries() { | |
- w.Add(e.Cid, e.Priority, e.WantType) | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/forward.go a/vendor/github.com/ipfs/go-bitswap/forward.go | |
--- b/vendor/github.com/ipfs/go-bitswap/forward.go 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/forward.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,17 +0,0 @@ | |
-package bitswap | |
- | |
-import ( | |
- "github.com/ipfs/go-bitswap/server" | |
- "github.com/ipfs/go-bitswap/tracer" | |
-) | |
- | |
-type ( | |
- // DEPRECATED | |
- PeerBlockRequestFilter = server.PeerBlockRequestFilter | |
- // DEPRECATED | |
- TaskComparator = server.TaskComparator | |
- // DEPRECATED | |
- TaskInfo = server.TaskInfo | |
- // DEPRECATED | |
- Tracer = tracer.Tracer | |
-) | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/.gitignore a/vendor/github.com/ipfs/go-bitswap/.gitignore | |
--- b/vendor/github.com/ipfs/go-bitswap/.gitignore 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/.gitignore 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1 +0,0 @@ | |
-tmp | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/internal/defaults/defaults.go a/vendor/github.com/ipfs/go-bitswap/internal/defaults/defaults.go | |
--- b/vendor/github.com/ipfs/go-bitswap/internal/defaults/defaults.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/internal/defaults/defaults.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,27 +0,0 @@ | |
-package defaults | |
- | |
-import ( | |
- "time" | |
-) | |
- | |
-const ( | |
- // these requests take at _least_ two minutes at the moment. | |
- ProvideTimeout = time.Minute * 3 | |
- ProvSearchDelay = time.Second | |
- | |
- // Number of concurrent workers in decision engine that process requests to the blockstore | |
- BitswapEngineBlockstoreWorkerCount = 128 | |
- // the total number of simultaneous threads sending outgoing messages | |
- BitswapTaskWorkerCount = 8 | |
- // how many worker threads to start for decision engine task worker | |
- BitswapEngineTaskWorkerCount = 8 | |
- // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine | |
- BitswapMaxOutstandingBytesPerPeer = 1 << 20 | |
- // the number of bytes we attempt to make each outgoing bitswap message | |
- BitswapEngineTargetMessageSize = 16 * 1024 | |
- // HasBlockBufferSize is the buffer size of the channel for new blocks | |
- // that need to be provided. They should get pulled over by the | |
- // provideCollector even before they are actually provided. | |
- // TODO: Does this need to be this large givent that? | |
- HasBlockBufferSize = 256 | |
-) | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/internal/tracing.go a/vendor/github.com/ipfs/go-bitswap/internal/tracing.go | |
--- b/vendor/github.com/ipfs/go-bitswap/internal/tracing.go 2023-01-30 20:34:50.528809007 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/internal/tracing.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,13 +0,0 @@ | |
-package internal | |
- | |
-import ( | |
- "context" | |
- "fmt" | |
- | |
- "go.opentelemetry.io/otel" | |
- "go.opentelemetry.io/otel/trace" | |
-) | |
- | |
-func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { | |
- return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/LICENSE a/vendor/github.com/ipfs/go-bitswap/LICENSE | |
--- b/vendor/github.com/ipfs/go-bitswap/LICENSE 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/LICENSE 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,21 +0,0 @@ | |
-The MIT License (MIT) | |
- | |
-Copyright (c) 2014-2018 Juan Batiz-Benet | |
- | |
-Permission is hereby granted, free of charge, to any person obtaining a copy | |
-of this software and associated documentation files (the "Software"), to deal | |
-in the Software without restriction, including without limitation the rights | |
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
-copies of the Software, and to permit persons to whom the Software is | |
-furnished to do so, subject to the following conditions: | |
- | |
-The above copyright notice and this permission notice shall be included in | |
-all copies or substantial portions of the Software. | |
- | |
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
-THE SOFTWARE. | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/message/message.go a/vendor/github.com/ipfs/go-bitswap/message/message.go | |
--- b/vendor/github.com/ipfs/go-bitswap/message/message.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/message/message.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,500 +0,0 @@ | |
-package message | |
- | |
-import ( | |
- "encoding/binary" | |
- "errors" | |
- "io" | |
- | |
- "github.com/ipfs/go-bitswap/client/wantlist" | |
- pb "github.com/ipfs/go-bitswap/message/pb" | |
- | |
- blocks "github.com/ipfs/go-block-format" | |
- cid "github.com/ipfs/go-cid" | |
- pool "github.com/libp2p/go-buffer-pool" | |
- msgio "github.com/libp2p/go-msgio" | |
- | |
- u "github.com/ipfs/go-ipfs-util" | |
- "github.com/libp2p/go-libp2p/core/network" | |
-) | |
- | |
-// BitSwapMessage is the basic interface for interacting building, encoding, | |
-// and decoding messages sent on the BitSwap protocol. | |
-type BitSwapMessage interface { | |
- // Wantlist returns a slice of unique keys that represent data wanted by | |
- // the sender. | |
- Wantlist() []Entry | |
- | |
- // Blocks returns a slice of unique blocks. | |
- Blocks() []blocks.Block | |
- // BlockPresences returns the list of HAVE / DONT_HAVE in the message | |
- BlockPresences() []BlockPresence | |
- // Haves returns the Cids for each HAVE | |
- Haves() []cid.Cid | |
- // DontHaves returns the Cids for each DONT_HAVE | |
- DontHaves() []cid.Cid | |
- // PendingBytes returns the number of outstanding bytes of data that the | |
- // engine has yet to send to the client (because they didn't fit in this | |
- // message) | |
- PendingBytes() int32 | |
- | |
- // AddEntry adds an entry to the Wantlist. | |
- AddEntry(key cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int | |
- | |
- // Cancel adds a CANCEL for the given CID to the message | |
- // Returns the size of the CANCEL entry in the protobuf | |
- Cancel(key cid.Cid) int | |
- | |
- // Remove removes any entries for the given CID. Useful when the want | |
- // status for the CID changes when preparing a message. | |
- Remove(key cid.Cid) | |
- | |
- // Empty indicates whether the message has any information | |
- Empty() bool | |
- // Size returns the size of the message in bytes | |
- Size() int | |
- | |
- // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set | |
- Full() bool | |
- | |
- // AddBlock adds a block to the message | |
- AddBlock(blocks.Block) | |
- // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message | |
- AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) | |
- // AddHave adds a HAVE for the given Cid to the message | |
- AddHave(cid.Cid) | |
- // AddDontHave adds a DONT_HAVE for the given Cid to the message | |
- AddDontHave(cid.Cid) | |
- // SetPendingBytes sets the number of bytes of data that are yet to be sent | |
- // to the client (because they didn't fit in this message) | |
- SetPendingBytes(int32) | |
- Exportable | |
- | |
- Loggable() map[string]interface{} | |
- | |
- // Reset the values in the message back to defaults, so it can be reused | |
- Reset(bool) | |
- | |
- // Clone the message fields | |
- Clone() BitSwapMessage | |
-} | |
- | |
-// Exportable is an interface for structures than can be | |
-// encoded in a bitswap protobuf. | |
-type Exportable interface { | |
- // Note that older Bitswap versions use a different wire format, so we need | |
- // to convert the message to the appropriate format depending on which | |
- // version of the protocol the remote peer supports. | |
- ToProtoV0() *pb.Message | |
- ToProtoV1() *pb.Message | |
- ToNetV0(w io.Writer) error | |
- ToNetV1(w io.Writer) error | |
-} | |
- | |
-// BlockPresence represents a HAVE / DONT_HAVE for a given Cid | |
-type BlockPresence struct { | |
- Cid cid.Cid | |
- Type pb.Message_BlockPresenceType | |
-} | |
- | |
-// Entry is a wantlist entry in a Bitswap message, with flags indicating | |
-// - whether message is a cancel | |
-// - whether requester wants a DONT_HAVE message | |
-// - whether requester wants a HAVE message (instead of the block) | |
-type Entry struct { | |
- wantlist.Entry | |
- Cancel bool | |
- SendDontHave bool | |
-} | |
- | |
-// Get the size of the entry on the wire | |
-func (e *Entry) Size() int { | |
- epb := e.ToPB() | |
- return epb.Size() | |
-} | |
- | |
-// Get the entry in protobuf form | |
-func (e *Entry) ToPB() pb.Message_Wantlist_Entry { | |
- return pb.Message_Wantlist_Entry{ | |
- Block: pb.Cid{Cid: e.Cid}, | |
- Priority: int32(e.Priority), | |
- Cancel: e.Cancel, | |
- WantType: e.WantType, | |
- SendDontHave: e.SendDontHave, | |
- } | |
-} | |
- | |
-var MaxEntrySize = maxEntrySize() | |
- | |
-func maxEntrySize() int { | |
- var maxInt32 int32 = (1 << 31) - 1 | |
- | |
- c := cid.NewCidV0(u.Hash([]byte("cid"))) | |
- e := Entry{ | |
- Entry: wantlist.Entry{ | |
- Cid: c, | |
- Priority: maxInt32, | |
- WantType: pb.Message_Wantlist_Have, | |
- }, | |
- SendDontHave: true, // true takes up more space than false | |
- Cancel: true, | |
- } | |
- return e.Size() | |
-} | |
- | |
-type impl struct { | |
- full bool | |
- wantlist map[cid.Cid]*Entry | |
- blocks map[cid.Cid]blocks.Block | |
- blockPresences map[cid.Cid]pb.Message_BlockPresenceType | |
- pendingBytes int32 | |
-} | |
- | |
-// New returns a new, empty bitswap message | |
-func New(full bool) BitSwapMessage { | |
- return newMsg(full) | |
-} | |
- | |
-func newMsg(full bool) *impl { | |
- return &impl{ | |
- full: full, | |
- wantlist: make(map[cid.Cid]*Entry), | |
- blocks: make(map[cid.Cid]blocks.Block), | |
- blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), | |
- } | |
-} | |
- | |
-// Clone the message fields | |
-func (m *impl) Clone() BitSwapMessage { | |
- msg := newMsg(m.full) | |
- for k := range m.wantlist { | |
- msg.wantlist[k] = m.wantlist[k] | |
- } | |
- for k := range m.blocks { | |
- msg.blocks[k] = m.blocks[k] | |
- } | |
- for k := range m.blockPresences { | |
- msg.blockPresences[k] = m.blockPresences[k] | |
- } | |
- msg.pendingBytes = m.pendingBytes | |
- return msg | |
-} | |
- | |
-// Reset the values in the message back to defaults, so it can be reused | |
-func (m *impl) Reset(full bool) { | |
- m.full = full | |
- for k := range m.wantlist { | |
- delete(m.wantlist, k) | |
- } | |
- for k := range m.blocks { | |
- delete(m.blocks, k) | |
- } | |
- for k := range m.blockPresences { | |
- delete(m.blockPresences, k) | |
- } | |
- m.pendingBytes = 0 | |
-} | |
- | |
-var errCidMissing = errors.New("missing cid") | |
- | |
-func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { | |
- m := newMsg(pbm.Wantlist.Full) | |
- for _, e := range pbm.Wantlist.Entries { | |
- if !e.Block.Cid.Defined() { | |
- return nil, errCidMissing | |
- } | |
- m.addEntry(e.Block.Cid, e.Priority, e.Cancel, e.WantType, e.SendDontHave) | |
- } | |
- | |
- // deprecated | |
- for _, d := range pbm.Blocks { | |
- // CIDv0, sha256, protobuf only | |
- b := blocks.NewBlock(d) | |
- m.AddBlock(b) | |
- } | |
- // | |
- | |
- for _, b := range pbm.GetPayload() { | |
- pref, err := cid.PrefixFromBytes(b.GetPrefix()) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- c, err := pref.Sum(b.GetData()) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- blk, err := blocks.NewBlockWithCid(b.GetData(), c) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- m.AddBlock(blk) | |
- } | |
- | |
- for _, bi := range pbm.GetBlockPresences() { | |
- if !bi.Cid.Cid.Defined() { | |
- return nil, errCidMissing | |
- } | |
- m.AddBlockPresence(bi.Cid.Cid, bi.Type) | |
- } | |
- | |
- m.pendingBytes = pbm.PendingBytes | |
- | |
- return m, nil | |
-} | |
- | |
-func (m *impl) Full() bool { | |
- return m.full | |
-} | |
- | |
-func (m *impl) Empty() bool { | |
- return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 | |
-} | |
- | |
-func (m *impl) Wantlist() []Entry { | |
- out := make([]Entry, 0, len(m.wantlist)) | |
- for _, e := range m.wantlist { | |
- out = append(out, *e) | |
- } | |
- return out | |
-} | |
- | |
-func (m *impl) Blocks() []blocks.Block { | |
- bs := make([]blocks.Block, 0, len(m.blocks)) | |
- for _, block := range m.blocks { | |
- bs = append(bs, block) | |
- } | |
- return bs | |
-} | |
- | |
-func (m *impl) BlockPresences() []BlockPresence { | |
- bps := make([]BlockPresence, 0, len(m.blockPresences)) | |
- for c, t := range m.blockPresences { | |
- bps = append(bps, BlockPresence{c, t}) | |
- } | |
- return bps | |
-} | |
- | |
-func (m *impl) Haves() []cid.Cid { | |
- return m.getBlockPresenceByType(pb.Message_Have) | |
-} | |
- | |
-func (m *impl) DontHaves() []cid.Cid { | |
- return m.getBlockPresenceByType(pb.Message_DontHave) | |
-} | |
- | |
-func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { | |
- cids := make([]cid.Cid, 0, len(m.blockPresences)) | |
- for c, bpt := range m.blockPresences { | |
- if bpt == t { | |
- cids = append(cids, c) | |
- } | |
- } | |
- return cids | |
-} | |
- | |
-func (m *impl) PendingBytes() int32 { | |
- return m.pendingBytes | |
-} | |
- | |
-func (m *impl) SetPendingBytes(pendingBytes int32) { | |
- m.pendingBytes = pendingBytes | |
-} | |
- | |
-func (m *impl) Remove(k cid.Cid) { | |
- delete(m.wantlist, k) | |
-} | |
- | |
-func (m *impl) Cancel(k cid.Cid) int { | |
- return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) | |
-} | |
- | |
-func (m *impl) AddEntry(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { | |
- return m.addEntry(k, priority, false, wantType, sendDontHave) | |
-} | |
- | |
-func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { | |
- e, exists := m.wantlist[c] | |
- if exists { | |
- // Only change priority if want is of the same type | |
- if e.WantType == wantType { | |
- e.Priority = priority | |
- } | |
- // Only change from "dont cancel" to "do cancel" | |
- if cancel { | |
- e.Cancel = cancel | |
- } | |
- // Only change from "dont send" to "do send" DONT_HAVE | |
- if sendDontHave { | |
- e.SendDontHave = sendDontHave | |
- } | |
- // want-block overrides existing want-have | |
- if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { | |
- e.WantType = wantType | |
- } | |
- m.wantlist[c] = e | |
- return 0 | |
- } | |
- | |
- e = &Entry{ | |
- Entry: wantlist.Entry{ | |
- Cid: c, | |
- Priority: priority, | |
- WantType: wantType, | |
- }, | |
- SendDontHave: sendDontHave, | |
- Cancel: cancel, | |
- } | |
- m.wantlist[c] = e | |
- | |
- return e.Size() | |
-} | |
- | |
-func (m *impl) AddBlock(b blocks.Block) { | |
- delete(m.blockPresences, b.Cid()) | |
- m.blocks[b.Cid()] = b | |
-} | |
- | |
-func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { | |
- if _, ok := m.blocks[c]; ok { | |
- return | |
- } | |
- m.blockPresences[c] = t | |
-} | |
- | |
-func (m *impl) AddHave(c cid.Cid) { | |
- m.AddBlockPresence(c, pb.Message_Have) | |
-} | |
- | |
-func (m *impl) AddDontHave(c cid.Cid) { | |
- m.AddBlockPresence(c, pb.Message_DontHave) | |
-} | |
- | |
-func (m *impl) Size() int { | |
- size := 0 | |
- for _, block := range m.blocks { | |
- size += len(block.RawData()) | |
- } | |
- for c := range m.blockPresences { | |
- size += BlockPresenceSize(c) | |
- } | |
- for _, e := range m.wantlist { | |
- size += e.Size() | |
- } | |
- | |
- return size | |
-} | |
- | |
-func BlockPresenceSize(c cid.Cid) int { | |
- return (&pb.Message_BlockPresence{ | |
- Cid: pb.Cid{Cid: c}, | |
- Type: pb.Message_Have, | |
- }).Size() | |
-} | |
- | |
-// FromNet generates a new BitswapMessage from incoming data on an io.Reader. | |
-func FromNet(r io.Reader) (BitSwapMessage, error) { | |
- reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) | |
- return FromMsgReader(reader) | |
-} | |
- | |
-// FromPBReader generates a new Bitswap message from a gogo-protobuf reader | |
-func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { | |
- msg, err := r.ReadMsg() | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- var pb pb.Message | |
- err = pb.Unmarshal(msg) | |
- r.ReleaseMsg(msg) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- return newMessageFromProto(pb) | |
-} | |
- | |
-func (m *impl) ToProtoV0() *pb.Message { | |
- pbm := new(pb.Message) | |
- pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) | |
- for _, e := range m.wantlist { | |
- pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) | |
- } | |
- pbm.Wantlist.Full = m.full | |
- | |
- blocks := m.Blocks() | |
- pbm.Blocks = make([][]byte, 0, len(blocks)) | |
- for _, b := range blocks { | |
- pbm.Blocks = append(pbm.Blocks, b.RawData()) | |
- } | |
- return pbm | |
-} | |
- | |
-func (m *impl) ToProtoV1() *pb.Message { | |
- pbm := new(pb.Message) | |
- pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) | |
- for _, e := range m.wantlist { | |
- pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) | |
- } | |
- pbm.Wantlist.Full = m.full | |
- | |
- blocks := m.Blocks() | |
- pbm.Payload = make([]pb.Message_Block, 0, len(blocks)) | |
- for _, b := range blocks { | |
- pbm.Payload = append(pbm.Payload, pb.Message_Block{ | |
- Data: b.RawData(), | |
- Prefix: b.Cid().Prefix().Bytes(), | |
- }) | |
- } | |
- | |
- pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) | |
- for c, t := range m.blockPresences { | |
- pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ | |
- Cid: pb.Cid{Cid: c}, | |
- Type: t, | |
- }) | |
- } | |
- | |
- pbm.PendingBytes = m.PendingBytes() | |
- | |
- return pbm | |
-} | |
- | |
-func (m *impl) ToNetV0(w io.Writer) error { | |
- return write(w, m.ToProtoV0()) | |
-} | |
- | |
-func (m *impl) ToNetV1(w io.Writer) error { | |
- return write(w, m.ToProtoV1()) | |
-} | |
- | |
-func write(w io.Writer, m *pb.Message) error { | |
- size := m.Size() | |
- | |
- buf := pool.Get(size + binary.MaxVarintLen64) | |
- defer pool.Put(buf) | |
- | |
- n := binary.PutUvarint(buf, uint64(size)) | |
- | |
- written, err := m.MarshalTo(buf[n:]) | |
- if err != nil { | |
- return err | |
- } | |
- n += written | |
- | |
- _, err = w.Write(buf[:n]) | |
- return err | |
-} | |
- | |
-func (m *impl) Loggable() map[string]interface{} { | |
- blocks := make([]string, 0, len(m.blocks)) | |
- for _, v := range m.blocks { | |
- blocks = append(blocks, v.Cid().String()) | |
- } | |
- return map[string]interface{}{ | |
- "blocks": blocks, | |
- "wants": m.Wantlist(), | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/message/pb/cid.go a/vendor/github.com/ipfs/go-bitswap/message/pb/cid.go | |
--- b/vendor/github.com/ipfs/go-bitswap/message/pb/cid.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/message/pb/cid.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,44 +0,0 @@ | |
-package bitswap_message_pb | |
- | |
-import ( | |
- "github.com/ipfs/go-cid" | |
-) | |
- | |
-// NOTE: Don't "embed" the cid, wrap it like we're doing here. Otherwise, gogo | |
-// will try to use the Bytes() function. | |
- | |
-// Cid is a custom type for CIDs in protobufs, that allows us to avoid | |
-// reallocating. | |
-type Cid struct { | |
- Cid cid.Cid | |
-} | |
- | |
-func (c Cid) Marshal() ([]byte, error) { | |
- return c.Cid.Bytes(), nil | |
-} | |
- | |
-func (c *Cid) MarshalTo(data []byte) (int, error) { | |
- // intentionally using KeyString here to avoid allocating. | |
- return copy(data[:c.Size()], c.Cid.KeyString()), nil | |
-} | |
- | |
-func (c *Cid) Unmarshal(data []byte) (err error) { | |
- c.Cid, err = cid.Cast(data) | |
- return err | |
-} | |
- | |
-func (c *Cid) Size() int { | |
- return len(c.Cid.KeyString()) | |
-} | |
- | |
-func (c Cid) MarshalJSON() ([]byte, error) { | |
- return c.Cid.MarshalJSON() | |
-} | |
- | |
-func (c *Cid) UnmarshalJSON(data []byte) error { | |
- return c.Cid.UnmarshalJSON(data) | |
-} | |
- | |
-func (c Cid) Equal(other Cid) bool { | |
- return c.Cid.Equals(c.Cid) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/message/pb/Makefile a/vendor/github.com/ipfs/go-bitswap/message/pb/Makefile | |
--- b/vendor/github.com/ipfs/go-bitswap/message/pb/Makefile 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/message/pb/Makefile 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,11 +0,0 @@ | |
-PB = $(wildcard *.proto) | |
-GO = $(PB:.proto=.pb.go) | |
- | |
-all: $(GO) | |
- | |
-%.pb.go: %.proto | |
- protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< | |
- | |
-clean: | |
- rm -f *.pb.go | |
- rm -f *.go | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/message/pb/message.pb.go a/vendor/github.com/ipfs/go-bitswap/message/pb/message.pb.go | |
--- b/vendor/github.com/ipfs/go-bitswap/message/pb/message.pb.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/message/pb/message.pb.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,1569 +0,0 @@ | |
-// Code generated by protoc-gen-gogo. DO NOT EDIT. | |
-// source: message.proto | |
- | |
-package bitswap_message_pb | |
- | |
-import ( | |
- fmt "fmt" | |
- _ "github.com/gogo/protobuf/gogoproto" | |
- proto "github.com/gogo/protobuf/proto" | |
- io "io" | |
- math "math" | |
- math_bits "math/bits" | |
-) | |
- | |
-// Reference imports to suppress errors if they are not otherwise used. | |
-var _ = proto.Marshal | |
-var _ = fmt.Errorf | |
-var _ = math.Inf | |
- | |
-// This is a compile-time assertion to ensure that this generated file | |
-// is compatible with the proto package it is being compiled against. | |
-// A compilation error at this line likely means your copy of the | |
-// proto package needs to be updated. | |
-const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package | |
- | |
-type Message_BlockPresenceType int32 | |
- | |
-const ( | |
- Message_Have Message_BlockPresenceType = 0 | |
- Message_DontHave Message_BlockPresenceType = 1 | |
-) | |
- | |
-var Message_BlockPresenceType_name = map[int32]string{ | |
- 0: "Have", | |
- 1: "DontHave", | |
-} | |
- | |
-var Message_BlockPresenceType_value = map[string]int32{ | |
- "Have": 0, | |
- "DontHave": 1, | |
-} | |
- | |
-func (x Message_BlockPresenceType) String() string { | |
- return proto.EnumName(Message_BlockPresenceType_name, int32(x)) | |
-} | |
- | |
-func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} | |
-} | |
- | |
-type Message_Wantlist_WantType int32 | |
- | |
-const ( | |
- Message_Wantlist_Block Message_Wantlist_WantType = 0 | |
- Message_Wantlist_Have Message_Wantlist_WantType = 1 | |
-) | |
- | |
-var Message_Wantlist_WantType_name = map[int32]string{ | |
- 0: "Block", | |
- 1: "Have", | |
-} | |
- | |
-var Message_Wantlist_WantType_value = map[string]int32{ | |
- "Block": 0, | |
- "Have": 1, | |
-} | |
- | |
-func (x Message_Wantlist_WantType) String() string { | |
- return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) | |
-} | |
- | |
-func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} | |
-} | |
- | |
-type Message struct { | |
- Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` | |
- Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` | |
- Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` | |
- BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` | |
- PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` | |
-} | |
- | |
-func (m *Message) Reset() { *m = Message{} } | |
-func (m *Message) String() string { return proto.CompactTextString(m) } | |
-func (*Message) ProtoMessage() {} | |
-func (*Message) Descriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0} | |
-} | |
-func (m *Message) XXX_Unmarshal(b []byte) error { | |
- return m.Unmarshal(b) | |
-} | |
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
- if deterministic { | |
- return xxx_messageInfo_Message.Marshal(b, m, deterministic) | |
- } else { | |
- b = b[:cap(b)] | |
- n, err := m.MarshalToSizedBuffer(b) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return b[:n], nil | |
- } | |
-} | |
-func (m *Message) XXX_Merge(src proto.Message) { | |
- xxx_messageInfo_Message.Merge(m, src) | |
-} | |
-func (m *Message) XXX_Size() int { | |
- return m.Size() | |
-} | |
-func (m *Message) XXX_DiscardUnknown() { | |
- xxx_messageInfo_Message.DiscardUnknown(m) | |
-} | |
- | |
-var xxx_messageInfo_Message proto.InternalMessageInfo | |
- | |
-func (m *Message) GetWantlist() Message_Wantlist { | |
- if m != nil { | |
- return m.Wantlist | |
- } | |
- return Message_Wantlist{} | |
-} | |
- | |
-func (m *Message) GetBlocks() [][]byte { | |
- if m != nil { | |
- return m.Blocks | |
- } | |
- return nil | |
-} | |
- | |
-func (m *Message) GetPayload() []Message_Block { | |
- if m != nil { | |
- return m.Payload | |
- } | |
- return nil | |
-} | |
- | |
-func (m *Message) GetBlockPresences() []Message_BlockPresence { | |
- if m != nil { | |
- return m.BlockPresences | |
- } | |
- return nil | |
-} | |
- | |
-func (m *Message) GetPendingBytes() int32 { | |
- if m != nil { | |
- return m.PendingBytes | |
- } | |
- return 0 | |
-} | |
- | |
-type Message_Wantlist struct { | |
- Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` | |
- Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` | |
-} | |
- | |
-func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } | |
-func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } | |
-func (*Message_Wantlist) ProtoMessage() {} | |
-func (*Message_Wantlist) Descriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} | |
-} | |
-func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { | |
- return m.Unmarshal(b) | |
-} | |
-func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
- if deterministic { | |
- return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) | |
- } else { | |
- b = b[:cap(b)] | |
- n, err := m.MarshalToSizedBuffer(b) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return b[:n], nil | |
- } | |
-} | |
-func (m *Message_Wantlist) XXX_Merge(src proto.Message) { | |
- xxx_messageInfo_Message_Wantlist.Merge(m, src) | |
-} | |
-func (m *Message_Wantlist) XXX_Size() int { | |
- return m.Size() | |
-} | |
-func (m *Message_Wantlist) XXX_DiscardUnknown() { | |
- xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) | |
-} | |
- | |
-var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo | |
- | |
-func (m *Message_Wantlist) GetEntries() []Message_Wantlist_Entry { | |
- if m != nil { | |
- return m.Entries | |
- } | |
- return nil | |
-} | |
- | |
-func (m *Message_Wantlist) GetFull() bool { | |
- if m != nil { | |
- return m.Full | |
- } | |
- return false | |
-} | |
- | |
-type Message_Wantlist_Entry struct { | |
- Block Cid `protobuf:"bytes,1,opt,name=block,proto3,customtype=Cid" json:"block"` | |
- Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` | |
- Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` | |
- WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` | |
- SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } | |
-func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } | |
-func (*Message_Wantlist_Entry) ProtoMessage() {} | |
-func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} | |
-} | |
-func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { | |
- return m.Unmarshal(b) | |
-} | |
-func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
- if deterministic { | |
- return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) | |
- } else { | |
- b = b[:cap(b)] | |
- n, err := m.MarshalToSizedBuffer(b) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return b[:n], nil | |
- } | |
-} | |
-func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { | |
- xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) | |
-} | |
-func (m *Message_Wantlist_Entry) XXX_Size() int { | |
- return m.Size() | |
-} | |
-func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { | |
- xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) | |
-} | |
- | |
-var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo | |
- | |
-func (m *Message_Wantlist_Entry) GetPriority() int32 { | |
- if m != nil { | |
- return m.Priority | |
- } | |
- return 0 | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) GetCancel() bool { | |
- if m != nil { | |
- return m.Cancel | |
- } | |
- return false | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { | |
- if m != nil { | |
- return m.WantType | |
- } | |
- return Message_Wantlist_Block | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) GetSendDontHave() bool { | |
- if m != nil { | |
- return m.SendDontHave | |
- } | |
- return false | |
-} | |
- | |
-type Message_Block struct { | |
- Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` | |
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` | |
-} | |
- | |
-func (m *Message_Block) Reset() { *m = Message_Block{} } | |
-func (m *Message_Block) String() string { return proto.CompactTextString(m) } | |
-func (*Message_Block) ProtoMessage() {} | |
-func (*Message_Block) Descriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} | |
-} | |
-func (m *Message_Block) XXX_Unmarshal(b []byte) error { | |
- return m.Unmarshal(b) | |
-} | |
-func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
- if deterministic { | |
- return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) | |
- } else { | |
- b = b[:cap(b)] | |
- n, err := m.MarshalToSizedBuffer(b) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return b[:n], nil | |
- } | |
-} | |
-func (m *Message_Block) XXX_Merge(src proto.Message) { | |
- xxx_messageInfo_Message_Block.Merge(m, src) | |
-} | |
-func (m *Message_Block) XXX_Size() int { | |
- return m.Size() | |
-} | |
-func (m *Message_Block) XXX_DiscardUnknown() { | |
- xxx_messageInfo_Message_Block.DiscardUnknown(m) | |
-} | |
- | |
-var xxx_messageInfo_Message_Block proto.InternalMessageInfo | |
- | |
-func (m *Message_Block) GetPrefix() []byte { | |
- if m != nil { | |
- return m.Prefix | |
- } | |
- return nil | |
-} | |
- | |
-func (m *Message_Block) GetData() []byte { | |
- if m != nil { | |
- return m.Data | |
- } | |
- return nil | |
-} | |
- | |
-type Message_BlockPresence struct { | |
- Cid Cid `protobuf:"bytes,1,opt,name=cid,proto3,customtype=Cid" json:"cid"` | |
- Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` | |
-} | |
- | |
-func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } | |
-func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } | |
-func (*Message_BlockPresence) ProtoMessage() {} | |
-func (*Message_BlockPresence) Descriptor() ([]byte, []int) { | |
- return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} | |
-} | |
-func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { | |
- return m.Unmarshal(b) | |
-} | |
-func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
- if deterministic { | |
- return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) | |
- } else { | |
- b = b[:cap(b)] | |
- n, err := m.MarshalToSizedBuffer(b) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return b[:n], nil | |
- } | |
-} | |
-func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { | |
- xxx_messageInfo_Message_BlockPresence.Merge(m, src) | |
-} | |
-func (m *Message_BlockPresence) XXX_Size() int { | |
- return m.Size() | |
-} | |
-func (m *Message_BlockPresence) XXX_DiscardUnknown() { | |
- xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) | |
-} | |
- | |
-var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo | |
- | |
-func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { | |
- if m != nil { | |
- return m.Type | |
- } | |
- return Message_Have | |
-} | |
- | |
-func init() { | |
- proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) | |
- proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) | |
- proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") | |
- proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") | |
- proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") | |
- proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") | |
- proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") | |
-} | |
- | |
-func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } | |
- | |
-var fileDescriptor_33c57e4bae7b9afd = []byte{ | |
- // 497 bytes of a gzipped FileDescriptorProto | |
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, | |
- 0x14, 0xc6, 0x33, 0x4d, 0xd2, 0xc6, 0xd3, 0xee, 0x52, 0xe7, 0x42, 0x42, 0xc0, 0x34, 0x5b, 0xbc, | |
- 0x88, 0xca, 0x66, 0xa1, 0xfb, 0x04, 0x5b, 0xff, 0xa0, 0x82, 0x20, 0x83, 0xd0, 0xeb, 0xfc, 0x99, | |
- 0xd6, 0xc1, 0x6c, 0x12, 0x33, 0x53, 0xd7, 0xbe, 0x85, 0x8f, 0xb5, 0x37, 0xc2, 0x5e, 0x8a, 0xca, | |
- 0x22, 0xed, 0x8b, 0x48, 0x4e, 0xa6, 0x85, 0xba, 0xe2, 0xee, 0xdd, 0x9c, 0x33, 0xe7, 0xfb, 0x65, | |
- 0xbe, 0xef, 0x10, 0x38, 0x38, 0xe7, 0x52, 0xc6, 0x0b, 0x1e, 0x55, 0x75, 0xa9, 0x4a, 0x4a, 0x13, | |
- 0xa1, 0xe4, 0x45, 0x5c, 0x45, 0xbb, 0x76, 0xe2, 0x1d, 0x2f, 0x84, 0xfa, 0xb0, 0x4c, 0xa2, 0xb4, | |
- 0x3c, 0x3f, 0x59, 0x94, 0x8b, 0xf2, 0x04, 0x47, 0x93, 0xe5, 0x1c, 0x2b, 0x2c, 0xf0, 0xd4, 0x22, | |
- 0xc6, 0xbf, 0xba, 0xd0, 0x7b, 0xdb, 0xaa, 0xe9, 0x4b, 0x70, 0x2e, 0xe2, 0x42, 0xe5, 0x42, 0x2a, | |
- 0x97, 0x04, 0x24, 0xec, 0x4f, 0x1e, 0x45, 0x37, 0xbf, 0x10, 0xe9, 0xf1, 0x68, 0xa6, 0x67, 0xa7, | |
- 0xd6, 0xe5, 0xf5, 0xc8, 0x60, 0x3b, 0x2d, 0x7d, 0x00, 0xdd, 0x24, 0x2f, 0xd3, 0x8f, 0xd2, 0xed, | |
- 0x04, 0x66, 0x38, 0x60, 0xba, 0xa2, 0x67, 0xd0, 0xab, 0xe2, 0x55, 0x5e, 0xc6, 0x99, 0x6b, 0x06, | |
- 0x66, 0xd8, 0x9f, 0x1c, 0xfd, 0x0f, 0x3f, 0x6d, 0x44, 0x9a, 0xbd, 0xd5, 0xd1, 0x19, 0x1c, 0x22, | |
- 0xec, 0x5d, 0xcd, 0x25, 0x2f, 0x52, 0x2e, 0x5d, 0x0b, 0x49, 0x8f, 0x6f, 0x25, 0x6d, 0x15, 0x9a, | |
- 0xf8, 0x17, 0x86, 0x8e, 0x61, 0x50, 0xf1, 0x22, 0x13, 0xc5, 0x62, 0xba, 0x52, 0x5c, 0xba, 0x76, | |
- 0x40, 0x42, 0x9b, 0xed, 0xf5, 0xbc, 0x9f, 0x1d, 0x70, 0xb6, 0xa6, 0xe9, 0x1b, 0xe8, 0xf1, 0x42, | |
- 0xd5, 0x82, 0x4b, 0x97, 0xe0, 0x13, 0x9e, 0xdc, 0x25, 0xab, 0xe8, 0x45, 0xa1, 0xea, 0xd5, 0xd6, | |
- 0x95, 0x06, 0x50, 0x0a, 0xd6, 0x7c, 0x99, 0xe7, 0x6e, 0x27, 0x20, 0xa1, 0xc3, 0xf0, 0xec, 0x7d, | |
- 0x23, 0x60, 0xe3, 0x30, 0x3d, 0x02, 0x1b, 0x1f, 0x8b, 0x3b, 0x19, 0x4c, 0xfb, 0x8d, 0xf6, 0xc7, | |
- 0xf5, 0xc8, 0x7c, 0x26, 0x32, 0xd6, 0xde, 0x50, 0x0f, 0x9c, 0xaa, 0x16, 0x65, 0x2d, 0xd4, 0x0a, | |
- 0x21, 0x36, 0xdb, 0xd5, 0xcd, 0x36, 0xd2, 0xb8, 0x48, 0x79, 0xee, 0x9a, 0x88, 0xd7, 0x15, 0x7d, | |
- 0xdd, 0x6e, 0xfb, 0xfd, 0xaa, 0xe2, 0xae, 0x15, 0x90, 0xf0, 0x70, 0x72, 0x7c, 0x27, 0x07, 0x33, | |
- 0x2d, 0x62, 0x3b, 0x79, 0x13, 0x9e, 0xe4, 0x45, 0xf6, 0xbc, 0x2c, 0xd4, 0xab, 0xf8, 0x33, 0xc7, | |
- 0xf0, 0x1c, 0xb6, 0xd7, 0x1b, 0x8f, 0xda, 0xec, 0x70, 0xfe, 0x1e, 0xd8, 0xb8, 0x93, 0xa1, 0x41, | |
- 0x1d, 0xb0, 0x9a, 0xeb, 0x21, 0xf1, 0x4e, 0x75, 0xb3, 0x79, 0x70, 0x55, 0xf3, 0xb9, 0xf8, 0xd2, | |
- 0x1a, 0x66, 0xba, 0x6a, 0x52, 0xca, 0x62, 0x15, 0xa3, 0xc1, 0x01, 0xc3, 0xb3, 0xf7, 0x09, 0x0e, | |
- 0xf6, 0xb6, 0x4b, 0x1f, 0x82, 0x99, 0x8a, 0xec, 0x5f, 0x51, 0x35, 0x7d, 0x7a, 0x06, 0x96, 0x6a, | |
- 0x0c, 0x77, 0x6e, 0x37, 0xbc, 0xc7, 0x45, 0xc3, 0x28, 0x1d, 0x3f, 0x85, 0xfb, 0x37, 0xae, 0x76, | |
- 0x36, 0x0c, 0x3a, 0x00, 0x67, 0xeb, 0x79, 0x48, 0xa6, 0xee, 0xe5, 0xda, 0x27, 0x57, 0x6b, 0x9f, | |
- 0xfc, 0x5e, 0xfb, 0xe4, 0xeb, 0xc6, 0x37, 0xae, 0x36, 0xbe, 0xf1, 0x7d, 0xe3, 0x1b, 0x49, 0x17, | |
- 0xff, 0xbf, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x8a, 0xaf, 0x83, 0xd3, 0x03, 0x00, | |
- 0x00, | |
-} | |
- | |
-func (m *Message) Marshal() (dAtA []byte, err error) { | |
- size := m.Size() | |
- dAtA = make([]byte, size) | |
- n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return dAtA[:n], nil | |
-} | |
- | |
-func (m *Message) MarshalTo(dAtA []byte) (int, error) { | |
- size := m.Size() | |
- return m.MarshalToSizedBuffer(dAtA[:size]) | |
-} | |
- | |
-func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
- i := len(dAtA) | |
- _ = i | |
- var l int | |
- _ = l | |
- if m.PendingBytes != 0 { | |
- i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) | |
- i-- | |
- dAtA[i] = 0x28 | |
- } | |
- if len(m.BlockPresences) > 0 { | |
- for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { | |
- { | |
- size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) | |
- if err != nil { | |
- return 0, err | |
- } | |
- i -= size | |
- i = encodeVarintMessage(dAtA, i, uint64(size)) | |
- } | |
- i-- | |
- dAtA[i] = 0x22 | |
- } | |
- } | |
- if len(m.Payload) > 0 { | |
- for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { | |
- { | |
- size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) | |
- if err != nil { | |
- return 0, err | |
- } | |
- i -= size | |
- i = encodeVarintMessage(dAtA, i, uint64(size)) | |
- } | |
- i-- | |
- dAtA[i] = 0x1a | |
- } | |
- } | |
- if len(m.Blocks) > 0 { | |
- for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { | |
- i -= len(m.Blocks[iNdEx]) | |
- copy(dAtA[i:], m.Blocks[iNdEx]) | |
- i = encodeVarintMessage(dAtA, i, uint64(len(m.Blocks[iNdEx]))) | |
- i-- | |
- dAtA[i] = 0x12 | |
- } | |
- } | |
- { | |
- size, err := m.Wantlist.MarshalToSizedBuffer(dAtA[:i]) | |
- if err != nil { | |
- return 0, err | |
- } | |
- i -= size | |
- i = encodeVarintMessage(dAtA, i, uint64(size)) | |
- } | |
- i-- | |
- dAtA[i] = 0xa | |
- return len(dAtA) - i, nil | |
-} | |
- | |
-func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { | |
- size := m.Size() | |
- dAtA = make([]byte, size) | |
- n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return dAtA[:n], nil | |
-} | |
- | |
-func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { | |
- size := m.Size() | |
- return m.MarshalToSizedBuffer(dAtA[:size]) | |
-} | |
- | |
-func (m *Message_Wantlist) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
- i := len(dAtA) | |
- _ = i | |
- var l int | |
- _ = l | |
- if m.Full { | |
- i-- | |
- if m.Full { | |
- dAtA[i] = 1 | |
- } else { | |
- dAtA[i] = 0 | |
- } | |
- i-- | |
- dAtA[i] = 0x10 | |
- } | |
- if len(m.Entries) > 0 { | |
- for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { | |
- { | |
- size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) | |
- if err != nil { | |
- return 0, err | |
- } | |
- i -= size | |
- i = encodeVarintMessage(dAtA, i, uint64(size)) | |
- } | |
- i-- | |
- dAtA[i] = 0xa | |
- } | |
- } | |
- return len(dAtA) - i, nil | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { | |
- size := m.Size() | |
- dAtA = make([]byte, size) | |
- n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return dAtA[:n], nil | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { | |
- size := m.Size() | |
- return m.MarshalToSizedBuffer(dAtA[:size]) | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
- i := len(dAtA) | |
- _ = i | |
- var l int | |
- _ = l | |
- if m.SendDontHave { | |
- i-- | |
- if m.SendDontHave { | |
- dAtA[i] = 1 | |
- } else { | |
- dAtA[i] = 0 | |
- } | |
- i-- | |
- dAtA[i] = 0x28 | |
- } | |
- if m.WantType != 0 { | |
- i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) | |
- i-- | |
- dAtA[i] = 0x20 | |
- } | |
- if m.Cancel { | |
- i-- | |
- if m.Cancel { | |
- dAtA[i] = 1 | |
- } else { | |
- dAtA[i] = 0 | |
- } | |
- i-- | |
- dAtA[i] = 0x18 | |
- } | |
- if m.Priority != 0 { | |
- i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) | |
- i-- | |
- dAtA[i] = 0x10 | |
- } | |
- { | |
- size := m.Block.Size() | |
- i -= size | |
- if _, err := m.Block.MarshalTo(dAtA[i:]); err != nil { | |
- return 0, err | |
- } | |
- i = encodeVarintMessage(dAtA, i, uint64(size)) | |
- } | |
- i-- | |
- dAtA[i] = 0xa | |
- return len(dAtA) - i, nil | |
-} | |
- | |
-func (m *Message_Block) Marshal() (dAtA []byte, err error) { | |
- size := m.Size() | |
- dAtA = make([]byte, size) | |
- n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return dAtA[:n], nil | |
-} | |
- | |
-func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { | |
- size := m.Size() | |
- return m.MarshalToSizedBuffer(dAtA[:size]) | |
-} | |
- | |
-func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
- i := len(dAtA) | |
- _ = i | |
- var l int | |
- _ = l | |
- if len(m.Data) > 0 { | |
- i -= len(m.Data) | |
- copy(dAtA[i:], m.Data) | |
- i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) | |
- i-- | |
- dAtA[i] = 0x12 | |
- } | |
- if len(m.Prefix) > 0 { | |
- i -= len(m.Prefix) | |
- copy(dAtA[i:], m.Prefix) | |
- i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) | |
- i-- | |
- dAtA[i] = 0xa | |
- } | |
- return len(dAtA) - i, nil | |
-} | |
- | |
-func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { | |
- size := m.Size() | |
- dAtA = make([]byte, size) | |
- n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
- if err != nil { | |
- return nil, err | |
- } | |
- return dAtA[:n], nil | |
-} | |
- | |
-func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { | |
- size := m.Size() | |
- return m.MarshalToSizedBuffer(dAtA[:size]) | |
-} | |
- | |
-func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
- i := len(dAtA) | |
- _ = i | |
- var l int | |
- _ = l | |
- if m.Type != 0 { | |
- i = encodeVarintMessage(dAtA, i, uint64(m.Type)) | |
- i-- | |
- dAtA[i] = 0x10 | |
- } | |
- { | |
- size := m.Cid.Size() | |
- i -= size | |
- if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { | |
- return 0, err | |
- } | |
- i = encodeVarintMessage(dAtA, i, uint64(size)) | |
- } | |
- i-- | |
- dAtA[i] = 0xa | |
- return len(dAtA) - i, nil | |
-} | |
- | |
-func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { | |
- offset -= sovMessage(v) | |
- base := offset | |
- for v >= 1<<7 { | |
- dAtA[offset] = uint8(v&0x7f | 0x80) | |
- v >>= 7 | |
- offset++ | |
- } | |
- dAtA[offset] = uint8(v) | |
- return base | |
-} | |
-func (m *Message) Size() (n int) { | |
- if m == nil { | |
- return 0 | |
- } | |
- var l int | |
- _ = l | |
- l = m.Wantlist.Size() | |
- n += 1 + l + sovMessage(uint64(l)) | |
- if len(m.Blocks) > 0 { | |
- for _, b := range m.Blocks { | |
- l = len(b) | |
- n += 1 + l + sovMessage(uint64(l)) | |
- } | |
- } | |
- if len(m.Payload) > 0 { | |
- for _, e := range m.Payload { | |
- l = e.Size() | |
- n += 1 + l + sovMessage(uint64(l)) | |
- } | |
- } | |
- if len(m.BlockPresences) > 0 { | |
- for _, e := range m.BlockPresences { | |
- l = e.Size() | |
- n += 1 + l + sovMessage(uint64(l)) | |
- } | |
- } | |
- if m.PendingBytes != 0 { | |
- n += 1 + sovMessage(uint64(m.PendingBytes)) | |
- } | |
- return n | |
-} | |
- | |
-func (m *Message_Wantlist) Size() (n int) { | |
- if m == nil { | |
- return 0 | |
- } | |
- var l int | |
- _ = l | |
- if len(m.Entries) > 0 { | |
- for _, e := range m.Entries { | |
- l = e.Size() | |
- n += 1 + l + sovMessage(uint64(l)) | |
- } | |
- } | |
- if m.Full { | |
- n += 2 | |
- } | |
- return n | |
-} | |
- | |
-func (m *Message_Wantlist_Entry) Size() (n int) { | |
- if m == nil { | |
- return 0 | |
- } | |
- var l int | |
- _ = l | |
- l = m.Block.Size() | |
- n += 1 + l + sovMessage(uint64(l)) | |
- if m.Priority != 0 { | |
- n += 1 + sovMessage(uint64(m.Priority)) | |
- } | |
- if m.Cancel { | |
- n += 2 | |
- } | |
- if m.WantType != 0 { | |
- n += 1 + sovMessage(uint64(m.WantType)) | |
- } | |
- if m.SendDontHave { | |
- n += 2 | |
- } | |
- return n | |
-} | |
- | |
-func (m *Message_Block) Size() (n int) { | |
- if m == nil { | |
- return 0 | |
- } | |
- var l int | |
- _ = l | |
- l = len(m.Prefix) | |
- if l > 0 { | |
- n += 1 + l + sovMessage(uint64(l)) | |
- } | |
- l = len(m.Data) | |
- if l > 0 { | |
- n += 1 + l + sovMessage(uint64(l)) | |
- } | |
- return n | |
-} | |
- | |
-func (m *Message_BlockPresence) Size() (n int) { | |
- if m == nil { | |
- return 0 | |
- } | |
- var l int | |
- _ = l | |
- l = m.Cid.Size() | |
- n += 1 + l + sovMessage(uint64(l)) | |
- if m.Type != 0 { | |
- n += 1 + sovMessage(uint64(m.Type)) | |
- } | |
- return n | |
-} | |
- | |
-func sovMessage(x uint64) (n int) { | |
- return (math_bits.Len64(x|1) + 6) / 7 | |
-} | |
-func sozMessage(x uint64) (n int) { | |
- return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) | |
-} | |
-func (m *Message) Unmarshal(dAtA []byte) error { | |
- l := len(dAtA) | |
- iNdEx := 0 | |
- for iNdEx < l { | |
- preIndex := iNdEx | |
- var wire uint64 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- wire |= uint64(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- fieldNum := int32(wire >> 3) | |
- wireType := int(wire & 0x7) | |
- if wireType == 4 { | |
- return fmt.Errorf("proto: Message: wiretype end group for non-group") | |
- } | |
- if fieldNum <= 0 { | |
- return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) | |
- } | |
- switch fieldNum { | |
- case 1: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Wantlist", wireType) | |
- } | |
- var msglen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- msglen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if msglen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + msglen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
- return err | |
- } | |
- iNdEx = postIndex | |
- case 2: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) | |
- } | |
- var byteLen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- byteLen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if byteLen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + byteLen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- m.Blocks = append(m.Blocks, make([]byte, postIndex-iNdEx)) | |
- copy(m.Blocks[len(m.Blocks)-1], dAtA[iNdEx:postIndex]) | |
- iNdEx = postIndex | |
- case 3: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) | |
- } | |
- var msglen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- msglen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if msglen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + msglen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- m.Payload = append(m.Payload, Message_Block{}) | |
- if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
- return err | |
- } | |
- iNdEx = postIndex | |
- case 4: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) | |
- } | |
- var msglen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- msglen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if msglen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + msglen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) | |
- if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
- return err | |
- } | |
- iNdEx = postIndex | |
- case 5: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) | |
- } | |
- m.PendingBytes = 0 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- m.PendingBytes |= int32(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- default: | |
- iNdEx = preIndex | |
- skippy, err := skipMessage(dAtA[iNdEx:]) | |
- if err != nil { | |
- return err | |
- } | |
- if (skippy < 0) || (iNdEx+skippy) < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if (iNdEx + skippy) > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- iNdEx += skippy | |
- } | |
- } | |
- | |
- if iNdEx > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- return nil | |
-} | |
-func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { | |
- l := len(dAtA) | |
- iNdEx := 0 | |
- for iNdEx < l { | |
- preIndex := iNdEx | |
- var wire uint64 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- wire |= uint64(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- fieldNum := int32(wire >> 3) | |
- wireType := int(wire & 0x7) | |
- if wireType == 4 { | |
- return fmt.Errorf("proto: Wantlist: wiretype end group for non-group") | |
- } | |
- if fieldNum <= 0 { | |
- return fmt.Errorf("proto: Wantlist: illegal tag %d (wire type %d)", fieldNum, wire) | |
- } | |
- switch fieldNum { | |
- case 1: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) | |
- } | |
- var msglen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- msglen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if msglen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + msglen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- m.Entries = append(m.Entries, Message_Wantlist_Entry{}) | |
- if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
- return err | |
- } | |
- iNdEx = postIndex | |
- case 2: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Full", wireType) | |
- } | |
- var v int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- v |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- m.Full = bool(v != 0) | |
- default: | |
- iNdEx = preIndex | |
- skippy, err := skipMessage(dAtA[iNdEx:]) | |
- if err != nil { | |
- return err | |
- } | |
- if (skippy < 0) || (iNdEx+skippy) < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if (iNdEx + skippy) > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- iNdEx += skippy | |
- } | |
- } | |
- | |
- if iNdEx > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- return nil | |
-} | |
-func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { | |
- l := len(dAtA) | |
- iNdEx := 0 | |
- for iNdEx < l { | |
- preIndex := iNdEx | |
- var wire uint64 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- wire |= uint64(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- fieldNum := int32(wire >> 3) | |
- wireType := int(wire & 0x7) | |
- if wireType == 4 { | |
- return fmt.Errorf("proto: Entry: wiretype end group for non-group") | |
- } | |
- if fieldNum <= 0 { | |
- return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) | |
- } | |
- switch fieldNum { | |
- case 1: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) | |
- } | |
- var byteLen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- byteLen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if byteLen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + byteLen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
- return err | |
- } | |
- iNdEx = postIndex | |
- case 2: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) | |
- } | |
- m.Priority = 0 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- m.Priority |= int32(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- case 3: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Cancel", wireType) | |
- } | |
- var v int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- v |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- m.Cancel = bool(v != 0) | |
- case 4: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) | |
- } | |
- m.WantType = 0 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- case 5: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) | |
- } | |
- var v int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- v |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- m.SendDontHave = bool(v != 0) | |
- default: | |
- iNdEx = preIndex | |
- skippy, err := skipMessage(dAtA[iNdEx:]) | |
- if err != nil { | |
- return err | |
- } | |
- if (skippy < 0) || (iNdEx+skippy) < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if (iNdEx + skippy) > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- iNdEx += skippy | |
- } | |
- } | |
- | |
- if iNdEx > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- return nil | |
-} | |
-func (m *Message_Block) Unmarshal(dAtA []byte) error { | |
- l := len(dAtA) | |
- iNdEx := 0 | |
- for iNdEx < l { | |
- preIndex := iNdEx | |
- var wire uint64 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- wire |= uint64(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- fieldNum := int32(wire >> 3) | |
- wireType := int(wire & 0x7) | |
- if wireType == 4 { | |
- return fmt.Errorf("proto: Block: wiretype end group for non-group") | |
- } | |
- if fieldNum <= 0 { | |
- return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) | |
- } | |
- switch fieldNum { | |
- case 1: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) | |
- } | |
- var byteLen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- byteLen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if byteLen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + byteLen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) | |
- if m.Prefix == nil { | |
- m.Prefix = []byte{} | |
- } | |
- iNdEx = postIndex | |
- case 2: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) | |
- } | |
- var byteLen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- byteLen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if byteLen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + byteLen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) | |
- if m.Data == nil { | |
- m.Data = []byte{} | |
- } | |
- iNdEx = postIndex | |
- default: | |
- iNdEx = preIndex | |
- skippy, err := skipMessage(dAtA[iNdEx:]) | |
- if err != nil { | |
- return err | |
- } | |
- if (skippy < 0) || (iNdEx+skippy) < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if (iNdEx + skippy) > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- iNdEx += skippy | |
- } | |
- } | |
- | |
- if iNdEx > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- return nil | |
-} | |
-func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { | |
- l := len(dAtA) | |
- iNdEx := 0 | |
- for iNdEx < l { | |
- preIndex := iNdEx | |
- var wire uint64 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- wire |= uint64(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- fieldNum := int32(wire >> 3) | |
- wireType := int(wire & 0x7) | |
- if wireType == 4 { | |
- return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") | |
- } | |
- if fieldNum <= 0 { | |
- return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) | |
- } | |
- switch fieldNum { | |
- case 1: | |
- if wireType != 2 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) | |
- } | |
- var byteLen int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- byteLen |= int(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if byteLen < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- postIndex := iNdEx + byteLen | |
- if postIndex < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if postIndex > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
- return err | |
- } | |
- iNdEx = postIndex | |
- case 2: | |
- if wireType != 0 { | |
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) | |
- } | |
- m.Type = 0 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- m.Type |= Message_BlockPresenceType(b&0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- default: | |
- iNdEx = preIndex | |
- skippy, err := skipMessage(dAtA[iNdEx:]) | |
- if err != nil { | |
- return err | |
- } | |
- if (skippy < 0) || (iNdEx+skippy) < 0 { | |
- return ErrInvalidLengthMessage | |
- } | |
- if (iNdEx + skippy) > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- iNdEx += skippy | |
- } | |
- } | |
- | |
- if iNdEx > l { | |
- return io.ErrUnexpectedEOF | |
- } | |
- return nil | |
-} | |
-func skipMessage(dAtA []byte) (n int, err error) { | |
- l := len(dAtA) | |
- iNdEx := 0 | |
- depth := 0 | |
- for iNdEx < l { | |
- var wire uint64 | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return 0, ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return 0, io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- wire |= (uint64(b) & 0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- wireType := int(wire & 0x7) | |
- switch wireType { | |
- case 0: | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return 0, ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return 0, io.ErrUnexpectedEOF | |
- } | |
- iNdEx++ | |
- if dAtA[iNdEx-1] < 0x80 { | |
- break | |
- } | |
- } | |
- case 1: | |
- iNdEx += 8 | |
- case 2: | |
- var length int | |
- for shift := uint(0); ; shift += 7 { | |
- if shift >= 64 { | |
- return 0, ErrIntOverflowMessage | |
- } | |
- if iNdEx >= l { | |
- return 0, io.ErrUnexpectedEOF | |
- } | |
- b := dAtA[iNdEx] | |
- iNdEx++ | |
- length |= (int(b) & 0x7F) << shift | |
- if b < 0x80 { | |
- break | |
- } | |
- } | |
- if length < 0 { | |
- return 0, ErrInvalidLengthMessage | |
- } | |
- iNdEx += length | |
- case 3: | |
- depth++ | |
- case 4: | |
- if depth == 0 { | |
- return 0, ErrUnexpectedEndOfGroupMessage | |
- } | |
- depth-- | |
- case 5: | |
- iNdEx += 4 | |
- default: | |
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType) | |
- } | |
- if iNdEx < 0 { | |
- return 0, ErrInvalidLengthMessage | |
- } | |
- if depth == 0 { | |
- return iNdEx, nil | |
- } | |
- } | |
- return 0, io.ErrUnexpectedEOF | |
-} | |
- | |
-var ( | |
- ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") | |
- ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") | |
- ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") | |
-) | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/message/pb/message.proto a/vendor/github.com/ipfs/go-bitswap/message/pb/message.proto | |
--- b/vendor/github.com/ipfs/go-bitswap/message/pb/message.proto 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/message/pb/message.proto 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,46 +0,0 @@ | |
-syntax = "proto3"; | |
- | |
-package bitswap.message.pb; | |
- | |
-import "github.com/gogo/protobuf/gogoproto/gogo.proto"; | |
- | |
-message Message { | |
- | |
- message Wantlist { | |
- enum WantType { | |
- Block = 0; | |
- Have = 1; | |
- } | |
- | |
- message Entry { | |
- bytes block = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) | |
- int32 priority = 2; // the priority (normalized). default to 1 | |
- bool cancel = 3; // whether this revokes an entry | |
- WantType wantType = 4; // Note: defaults to enum 0, ie Block | |
- bool sendDontHave = 5; // Note: defaults to false | |
- } | |
- | |
- repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries | |
- bool full = 2; // whether this is the full wantlist. default to false | |
- } | |
- | |
- message Block { | |
- bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) | |
- bytes data = 2; | |
- } | |
- | |
- enum BlockPresenceType { | |
- Have = 0; | |
- DontHave = 1; | |
- } | |
- message BlockPresence { | |
- bytes cid = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; | |
- BlockPresenceType type = 2; | |
- } | |
- | |
- Wantlist wantlist = 1 [(gogoproto.nullable) = false]; | |
- repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 | |
- repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 | |
- repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; | |
- int32 pendingBytes = 5; | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/metrics/metrics.go a/vendor/github.com/ipfs/go-bitswap/metrics/metrics.go | |
--- b/vendor/github.com/ipfs/go-bitswap/metrics/metrics.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/metrics/metrics.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,46 +0,0 @@ | |
-package metrics | |
- | |
-import ( | |
- "context" | |
- | |
- "github.com/ipfs/go-metrics-interface" | |
-) | |
- | |
-var ( | |
- // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size | |
- metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} | |
- | |
- timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} | |
-) | |
- | |
-func DupHist(ctx context.Context) metrics.Histogram { | |
- return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) | |
-} | |
- | |
-func AllHist(ctx context.Context) metrics.Histogram { | |
- return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) | |
-} | |
- | |
-func SentHist(ctx context.Context) metrics.Histogram { | |
- return metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) | |
-} | |
- | |
-func SendTimeHist(ctx context.Context) metrics.Histogram { | |
- return metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) | |
-} | |
- | |
-func PendingEngineGauge(ctx context.Context) metrics.Gauge { | |
- return metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() | |
-} | |
- | |
-func ActiveEngineGauge(ctx context.Context) metrics.Gauge { | |
- return metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() | |
-} | |
- | |
-func PendingBlocksGauge(ctx context.Context) metrics.Gauge { | |
- return metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() | |
-} | |
- | |
-func ActiveBlocksGauge(ctx context.Context) metrics.Gauge { | |
- return metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/network/connecteventmanager.go a/vendor/github.com/ipfs/go-bitswap/network/connecteventmanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/network/connecteventmanager.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/network/connecteventmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,218 +0,0 @@ | |
-package network | |
- | |
-import ( | |
- "sync" | |
- | |
- "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-type ConnectionListener interface { | |
- PeerConnected(peer.ID) | |
- PeerDisconnected(peer.ID) | |
-} | |
- | |
-type state byte | |
- | |
-const ( | |
- stateDisconnected = iota | |
- stateResponsive | |
- stateUnresponsive | |
-) | |
- | |
-type connectEventManager struct { | |
- connListeners []ConnectionListener | |
- lk sync.RWMutex | |
- cond sync.Cond | |
- peers map[peer.ID]*peerState | |
- | |
- changeQueue []peer.ID | |
- stop bool | |
- done chan struct{} | |
-} | |
- | |
-type peerState struct { | |
- newState, curState state | |
- pending bool | |
-} | |
- | |
-func newConnectEventManager(connListeners ...ConnectionListener) *connectEventManager { | |
- evtManager := &connectEventManager{ | |
- connListeners: connListeners, | |
- peers: make(map[peer.ID]*peerState), | |
- done: make(chan struct{}), | |
- } | |
- evtManager.cond = sync.Cond{L: &evtManager.lk} | |
- return evtManager | |
-} | |
- | |
-func (c *connectEventManager) Start() { | |
- go c.worker() | |
-} | |
- | |
-func (c *connectEventManager) Stop() { | |
- c.lk.Lock() | |
- c.stop = true | |
- c.lk.Unlock() | |
- c.cond.Broadcast() | |
- | |
- <-c.done | |
-} | |
- | |
-func (c *connectEventManager) getState(p peer.ID) state { | |
- if state, ok := c.peers[p]; ok { | |
- return state.newState | |
- } else { | |
- return stateDisconnected | |
- } | |
-} | |
- | |
-func (c *connectEventManager) setState(p peer.ID, newState state) { | |
- state, ok := c.peers[p] | |
- if !ok { | |
- state = new(peerState) | |
- c.peers[p] = state | |
- } | |
- state.newState = newState | |
- if !state.pending && state.newState != state.curState { | |
- state.pending = true | |
- c.changeQueue = append(c.changeQueue, p) | |
- c.cond.Broadcast() | |
- } | |
-} | |
- | |
-// Waits for a change to be enqueued, or for the event manager to be stopped. Returns false if the | |
-// connect event manager has been stopped. | |
-func (c *connectEventManager) waitChange() bool { | |
- for !c.stop && len(c.changeQueue) == 0 { | |
- c.cond.Wait() | |
- } | |
- return !c.stop | |
-} | |
- | |
-func (c *connectEventManager) worker() { | |
- c.lk.Lock() | |
- defer c.lk.Unlock() | |
- defer close(c.done) | |
- | |
- for c.waitChange() { | |
- pid := c.changeQueue[0] | |
- c.changeQueue[0] = peer.ID("") // free the peer ID (slicing won't do that) | |
- c.changeQueue = c.changeQueue[1:] | |
- | |
- state, ok := c.peers[pid] | |
- // If we've disconnected and forgotten, continue. | |
- if !ok { | |
- // This shouldn't be possible because _this_ thread is responsible for | |
- // removing peers from this map, and we shouldn't get duplicate entries in | |
- // the change queue. | |
- log.Error("a change was enqueued for a peer we're not tracking") | |
- continue | |
- } | |
- | |
- // Record the fact that this "state" is no longer in the queue. | |
- state.pending = false | |
- | |
- // Then, if there's nothing to do, continue. | |
- if state.curState == state.newState { | |
- continue | |
- } | |
- | |
- // Or record the state update, then apply it. | |
- oldState := state.curState | |
- state.curState = state.newState | |
- | |
- switch state.newState { | |
- case stateDisconnected: | |
- delete(c.peers, pid) | |
- fallthrough | |
- case stateUnresponsive: | |
- // Only trigger a disconnect event if the peer was responsive. | |
- // We could be transitioning from unresponsive to disconnected. | |
- if oldState == stateResponsive { | |
- c.lk.Unlock() | |
- for _, v := range c.connListeners { | |
- v.PeerDisconnected(pid) | |
- } | |
- c.lk.Lock() | |
- } | |
- case stateResponsive: | |
- c.lk.Unlock() | |
- for _, v := range c.connListeners { | |
- v.PeerConnected(pid) | |
- } | |
- c.lk.Lock() | |
- } | |
- } | |
-} | |
- | |
-// Called whenever we receive a new connection. May be called many times. | |
-func (c *connectEventManager) Connected(p peer.ID) { | |
- c.lk.Lock() | |
- defer c.lk.Unlock() | |
- | |
- // !responsive -> responsive | |
- | |
- if c.getState(p) == stateResponsive { | |
- return | |
- } | |
- c.setState(p, stateResponsive) | |
-} | |
- | |
-// Called when we drop the final connection to a peer. | |
-func (c *connectEventManager) Disconnected(p peer.ID) { | |
- c.lk.Lock() | |
- defer c.lk.Unlock() | |
- | |
- // !disconnected -> disconnected | |
- | |
- if c.getState(p) == stateDisconnected { | |
- return | |
- } | |
- | |
- c.setState(p, stateDisconnected) | |
-} | |
- | |
-// Called whenever a peer is unresponsive. | |
-func (c *connectEventManager) MarkUnresponsive(p peer.ID) { | |
- c.lk.Lock() | |
- defer c.lk.Unlock() | |
- | |
- // responsive -> unresponsive | |
- | |
- if c.getState(p) != stateResponsive { | |
- return | |
- } | |
- | |
- c.setState(p, stateUnresponsive) | |
-} | |
- | |
-// Called whenever we receive a message from a peer. | |
-// | |
-// - When we're connected to the peer, this will mark the peer as responsive (from unresponsive). | |
-// - When not connected, we ignore this call. Unfortunately, a peer may disconnect before we process | |
-// | |
-// the "on message" event, so we can't treat this as evidence of a connection. | |
-func (c *connectEventManager) OnMessage(p peer.ID) { | |
- c.lk.RLock() | |
- unresponsive := c.getState(p) == stateUnresponsive | |
- c.lk.RUnlock() | |
- | |
- // Only continue if both connected, and unresponsive. | |
- if !unresponsive { | |
- return | |
- } | |
- | |
- // unresponsive -> responsive | |
- | |
- // We need to make a modification so now take a write lock | |
- c.lk.Lock() | |
- defer c.lk.Unlock() | |
- | |
- // Note: state may have changed in the time between when read lock | |
- // was released and write lock taken, so check again | |
- if c.getState(p) != stateUnresponsive { | |
- return | |
- } | |
- | |
- c.setState(p, stateResponsive) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/network/interface.go a/vendor/github.com/ipfs/go-bitswap/network/interface.go | |
--- b/vendor/github.com/ipfs/go-bitswap/network/interface.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/network/interface.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,111 +0,0 @@ | |
-package network | |
- | |
-import ( | |
- "context" | |
- "time" | |
- | |
- bsmsg "github.com/ipfs/go-bitswap/message" | |
- "github.com/ipfs/go-bitswap/network/internal" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- | |
- "github.com/libp2p/go-libp2p/core/connmgr" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
- "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
-) | |
- | |
-var ( | |
- // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol | |
- ProtocolBitswapNoVers = internal.ProtocolBitswapNoVers | |
- // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol | |
- ProtocolBitswapOneZero = internal.ProtocolBitswapOneZero | |
- // ProtocolBitswapOneOne is the the prefix for version 1.1.0 | |
- ProtocolBitswapOneOne = internal.ProtocolBitswapOneOne | |
- // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 | |
- ProtocolBitswap = internal.ProtocolBitswap | |
-) | |
- | |
-// BitSwapNetwork provides network connectivity for BitSwap sessions. | |
-type BitSwapNetwork interface { | |
- Self() peer.ID | |
- | |
- // SendMessage sends a BitSwap message to a peer. | |
- SendMessage( | |
- context.Context, | |
- peer.ID, | |
- bsmsg.BitSwapMessage) error | |
- | |
- // Start registers the Reciver and starts handling new messages, connectivity events, etc. | |
- Start(...Receiver) | |
- // Stop stops the network service. | |
- Stop() | |
- | |
- ConnectTo(context.Context, peer.ID) error | |
- DisconnectFrom(context.Context, peer.ID) error | |
- | |
- NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) | |
- | |
- ConnectionManager() connmgr.ConnManager | |
- | |
- Stats() Stats | |
- | |
- Routing | |
- | |
- Pinger | |
-} | |
- | |
-// MessageSender is an interface for sending a series of messages over the bitswap | |
-// network | |
-type MessageSender interface { | |
- SendMsg(context.Context, bsmsg.BitSwapMessage) error | |
- Close() error | |
- Reset() error | |
- // Indicates whether the remote peer supports HAVE / DONT_HAVE messages | |
- SupportsHave() bool | |
-} | |
- | |
-type MessageSenderOpts struct { | |
- MaxRetries int | |
- SendTimeout time.Duration | |
- SendErrorBackoff time.Duration | |
-} | |
- | |
-// Receiver is an interface that can receive messages from the BitSwapNetwork. | |
-type Receiver interface { | |
- ReceiveMessage( | |
- ctx context.Context, | |
- sender peer.ID, | |
- incoming bsmsg.BitSwapMessage) | |
- | |
- ReceiveError(error) | |
- | |
- // Connected/Disconnected warns bitswap about peer connections. | |
- PeerConnected(peer.ID) | |
- PeerDisconnected(peer.ID) | |
-} | |
- | |
-// Routing is an interface to providing and finding providers on a bitswap | |
-// network. | |
-type Routing interface { | |
- // FindProvidersAsync returns a channel of providers for the given key. | |
- FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID | |
- | |
- // Provide provides the key to the network. | |
- Provide(context.Context, cid.Cid) error | |
-} | |
- | |
-// Pinger is an interface to ping a peer and get the average latency of all pings | |
-type Pinger interface { | |
- // Ping a peer | |
- Ping(context.Context, peer.ID) ping.Result | |
- // Get the average latency of all pings | |
- Latency(peer.ID) time.Duration | |
-} | |
- | |
-// Stats is a container for statistics about the bitswap network | |
-// the numbers inside are specific to bitswap, and not any other protocols | |
-// using the same underlying network. | |
-type Stats struct { | |
- MessagesSent uint64 | |
- MessagesRecvd uint64 | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/network/internal/default.go a/vendor/github.com/ipfs/go-bitswap/network/internal/default.go | |
--- b/vendor/github.com/ipfs/go-bitswap/network/internal/default.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/network/internal/default.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,23 +0,0 @@ | |
-package internal | |
- | |
-import ( | |
- "github.com/libp2p/go-libp2p/core/protocol" | |
-) | |
- | |
-var ( | |
- // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol | |
- ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" | |
- // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol | |
- ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" | |
- // ProtocolBitswapOneOne is the the prefix for version 1.1.0 | |
- ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" | |
- // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 | |
- ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" | |
-) | |
- | |
-var DefaultProtocols = []protocol.ID{ | |
- ProtocolBitswap, | |
- ProtocolBitswapOneOne, | |
- ProtocolBitswapOneZero, | |
- ProtocolBitswapNoVers, | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/network/ipfs_impl.go a/vendor/github.com/ipfs/go-bitswap/network/ipfs_impl.go | |
--- b/vendor/github.com/ipfs/go-bitswap/network/ipfs_impl.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/network/ipfs_impl.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,472 +0,0 @@ | |
-package network | |
- | |
-import ( | |
- "context" | |
- "errors" | |
- "fmt" | |
- "io" | |
- "sync/atomic" | |
- "time" | |
- | |
- bsmsg "github.com/ipfs/go-bitswap/message" | |
- "github.com/ipfs/go-bitswap/network/internal" | |
- | |
- cid "github.com/ipfs/go-cid" | |
- logging "github.com/ipfs/go-log" | |
- "github.com/libp2p/go-libp2p/core/connmgr" | |
- "github.com/libp2p/go-libp2p/core/host" | |
- "github.com/libp2p/go-libp2p/core/network" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
- peerstore "github.com/libp2p/go-libp2p/core/peerstore" | |
- "github.com/libp2p/go-libp2p/core/protocol" | |
- "github.com/libp2p/go-libp2p/core/routing" | |
- "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
- msgio "github.com/libp2p/go-msgio" | |
- ma "github.com/multiformats/go-multiaddr" | |
- "github.com/multiformats/go-multistream" | |
-) | |
- | |
-var log = logging.Logger("bitswap_network") | |
- | |
-var connectTimeout = time.Second * 5 | |
- | |
-var maxSendTimeout = 2 * time.Minute | |
-var minSendTimeout = 10 * time.Second | |
-var sendLatency = 2 * time.Second | |
-var minSendRate = (100 * 1000) / 8 // 100kbit/s | |
- | |
-// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. | |
-func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { | |
- s := processSettings(opts...) | |
- | |
- bitswapNetwork := impl{ | |
- host: host, | |
- routing: r, | |
- | |
- protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, | |
- protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, | |
- protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, | |
- protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, | |
- | |
- supportedProtocols: s.SupportedProtocols, | |
- } | |
- | |
- return &bitswapNetwork | |
-} | |
- | |
-func processSettings(opts ...NetOpt) Settings { | |
- s := Settings{SupportedProtocols: append([]protocol.ID(nil), internal.DefaultProtocols...)} | |
- for _, opt := range opts { | |
- opt(&s) | |
- } | |
- for i, proto := range s.SupportedProtocols { | |
- s.SupportedProtocols[i] = s.ProtocolPrefix + proto | |
- } | |
- return s | |
-} | |
- | |
-// impl transforms the ipfs network interface, which sends and receives | |
-// NetMessage objects, into the bitswap network interface. | |
-type impl struct { | |
- // NOTE: Stats must be at the top of the heap allocation to ensure 64bit | |
- // alignment. | |
- stats Stats | |
- | |
- host host.Host | |
- routing routing.ContentRouting | |
- connectEvtMgr *connectEventManager | |
- | |
- protocolBitswapNoVers protocol.ID | |
- protocolBitswapOneZero protocol.ID | |
- protocolBitswapOneOne protocol.ID | |
- protocolBitswap protocol.ID | |
- | |
- supportedProtocols []protocol.ID | |
- | |
- // inbound messages from the network are forwarded to the receiver | |
- receivers []Receiver | |
-} | |
- | |
-type streamMessageSender struct { | |
- to peer.ID | |
- stream network.Stream | |
- connected bool | |
- bsnet *impl | |
- opts *MessageSenderOpts | |
-} | |
- | |
-// Open a stream to the remote peer | |
-func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { | |
- if s.connected { | |
- return s.stream, nil | |
- } | |
- | |
- tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) | |
- defer cancel() | |
- | |
- if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { | |
- return nil, err | |
- } | |
- | |
- stream, err := s.bsnet.newStreamToPeer(tctx, s.to) | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- s.stream = stream | |
- s.connected = true | |
- return s.stream, nil | |
-} | |
- | |
-// Reset the stream | |
-func (s *streamMessageSender) Reset() error { | |
- if s.stream != nil { | |
- err := s.stream.Reset() | |
- s.connected = false | |
- return err | |
- } | |
- return nil | |
-} | |
- | |
-// Close the stream | |
-func (s *streamMessageSender) Close() error { | |
- return s.stream.Close() | |
-} | |
- | |
-// Indicates whether the peer supports HAVE / DONT_HAVE messages | |
-func (s *streamMessageSender) SupportsHave() bool { | |
- return s.bsnet.SupportsHave(s.stream.Protocol()) | |
-} | |
- | |
-// Send a message to the peer, attempting multiple times | |
-func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { | |
- return s.multiAttempt(ctx, func() error { | |
- return s.send(ctx, msg) | |
- }) | |
-} | |
- | |
-// Perform a function with multiple attempts, and a timeout | |
-func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { | |
- // Try to call the function repeatedly | |
- var err error | |
- for i := 0; i < s.opts.MaxRetries; i++ { | |
- if err = fn(); err == nil { | |
- // Attempt was successful | |
- return nil | |
- } | |
- | |
- // Attempt failed | |
- | |
- // If the sender has been closed or the context cancelled, just bail out | |
- select { | |
- case <-ctx.Done(): | |
- return ctx.Err() | |
- default: | |
- } | |
- | |
- // Protocol is not supported, so no need to try multiple times | |
- if errors.Is(err, multistream.ErrNotSupported) { | |
- s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) | |
- return err | |
- } | |
- | |
- // Failed to send so reset stream and try again | |
- _ = s.Reset() | |
- | |
- // Failed too many times so mark the peer as unresponsive and return an error | |
- if i == s.opts.MaxRetries-1 { | |
- s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) | |
- return err | |
- } | |
- | |
- select { | |
- case <-ctx.Done(): | |
- return ctx.Err() | |
- case <-time.After(s.opts.SendErrorBackoff): | |
- // wait a short time in case disconnect notifications are still propagating | |
- log.Infof("send message to %s failed but context was not Done: %s", s.to, err) | |
- } | |
- } | |
- return err | |
-} | |
- | |
-// Send a message to the peer | |
-func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { | |
- start := time.Now() | |
- stream, err := s.Connect(ctx) | |
- if err != nil { | |
- log.Infof("failed to open stream to %s: %s", s.to, err) | |
- return err | |
- } | |
- | |
- // The send timeout includes the time required to connect | |
- // (although usually we will already have connected - we only need to | |
- // connect after a failed attempt to send) | |
- timeout := s.opts.SendTimeout - time.Since(start) | |
- if err = s.bsnet.msgToStream(ctx, stream, msg, timeout); err != nil { | |
- log.Infof("failed to send message to %s: %s", s.to, err) | |
- return err | |
- } | |
- | |
- return nil | |
-} | |
- | |
-func (bsnet *impl) Self() peer.ID { | |
- return bsnet.host.ID() | |
-} | |
- | |
-func (bsnet *impl) Ping(ctx context.Context, p peer.ID) ping.Result { | |
- ctx, cancel := context.WithCancel(ctx) | |
- defer cancel() | |
- res := <-ping.Ping(ctx, bsnet.host, p) | |
- return res | |
-} | |
- | |
-func (bsnet *impl) Latency(p peer.ID) time.Duration { | |
- return bsnet.host.Peerstore().LatencyEWMA(p) | |
-} | |
- | |
-// Indicates whether the given protocol supports HAVE / DONT_HAVE messages | |
-func (bsnet *impl) SupportsHave(proto protocol.ID) bool { | |
- switch proto { | |
- case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: | |
- return false | |
- } | |
- return true | |
-} | |
- | |
-func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage, timeout time.Duration) error { | |
- deadline := time.Now().Add(timeout) | |
- if dl, ok := ctx.Deadline(); ok && dl.Before(deadline) { | |
- deadline = dl | |
- } | |
- | |
- if err := s.SetWriteDeadline(deadline); err != nil { | |
- log.Warnf("error setting deadline: %s", err) | |
- } | |
- | |
- // Older Bitswap versions use a slightly different wire format so we need | |
- // to convert the message to the appropriate format depending on the remote | |
- // peer's Bitswap version. | |
- switch s.Protocol() { | |
- case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: | |
- if err := msg.ToNetV1(s); err != nil { | |
- log.Debugf("error: %s", err) | |
- return err | |
- } | |
- case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: | |
- if err := msg.ToNetV0(s); err != nil { | |
- log.Debugf("error: %s", err) | |
- return err | |
- } | |
- default: | |
- return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) | |
- } | |
- | |
- atomic.AddUint64(&bsnet.stats.MessagesSent, 1) | |
- | |
- if err := s.SetWriteDeadline(time.Time{}); err != nil { | |
- log.Warnf("error resetting deadline: %s", err) | |
- } | |
- return nil | |
-} | |
- | |
-func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { | |
- opts = setDefaultOpts(opts) | |
- | |
- sender := &streamMessageSender{ | |
- to: p, | |
- bsnet: bsnet, | |
- opts: opts, | |
- } | |
- | |
- err := sender.multiAttempt(ctx, func() error { | |
- _, err := sender.Connect(ctx) | |
- return err | |
- }) | |
- | |
- if err != nil { | |
- return nil, err | |
- } | |
- | |
- return sender, nil | |
-} | |
- | |
-func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { | |
- copy := *opts | |
- if opts.MaxRetries == 0 { | |
- copy.MaxRetries = 3 | |
- } | |
- if opts.SendTimeout == 0 { | |
- copy.SendTimeout = maxSendTimeout | |
- } | |
- if opts.SendErrorBackoff == 0 { | |
- copy.SendErrorBackoff = 100 * time.Millisecond | |
- } | |
- return © | |
-} | |
- | |
-func sendTimeout(size int) time.Duration { | |
- timeout := sendLatency | |
- timeout += time.Duration((uint64(time.Second) * uint64(size)) / uint64(minSendRate)) | |
- if timeout > maxSendTimeout { | |
- timeout = maxSendTimeout | |
- } else if timeout < minSendTimeout { | |
- timeout = minSendTimeout | |
- } | |
- return timeout | |
-} | |
- | |
-func (bsnet *impl) SendMessage( | |
- ctx context.Context, | |
- p peer.ID, | |
- outgoing bsmsg.BitSwapMessage) error { | |
- | |
- tctx, cancel := context.WithTimeout(ctx, connectTimeout) | |
- defer cancel() | |
- | |
- s, err := bsnet.newStreamToPeer(tctx, p) | |
- if err != nil { | |
- return err | |
- } | |
- | |
- timeout := sendTimeout(outgoing.Size()) | |
- if err = bsnet.msgToStream(ctx, s, outgoing, timeout); err != nil { | |
- _ = s.Reset() | |
- return err | |
- } | |
- | |
- return s.Close() | |
-} | |
- | |
-func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { | |
- return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) | |
-} | |
- | |
-func (bsnet *impl) Start(r ...Receiver) { | |
- bsnet.receivers = r | |
- { | |
- connectionListeners := make([]ConnectionListener, len(r)) | |
- for i, v := range r { | |
- connectionListeners[i] = v | |
- } | |
- bsnet.connectEvtMgr = newConnectEventManager(connectionListeners...) | |
- } | |
- for _, proto := range bsnet.supportedProtocols { | |
- bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) | |
- } | |
- bsnet.host.Network().Notify((*netNotifiee)(bsnet)) | |
- bsnet.connectEvtMgr.Start() | |
- | |
-} | |
- | |
-func (bsnet *impl) Stop() { | |
- bsnet.connectEvtMgr.Stop() | |
- bsnet.host.Network().StopNotify((*netNotifiee)(bsnet)) | |
-} | |
- | |
-func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { | |
- return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) | |
-} | |
- | |
-func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { | |
- panic("Not implemented: DisconnectFrom() is only used by tests") | |
-} | |
- | |
-// FindProvidersAsync returns a channel of providers for the given key. | |
-func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { | |
- out := make(chan peer.ID, max) | |
- go func() { | |
- defer close(out) | |
- providers := bsnet.routing.FindProvidersAsync(ctx, k, max) | |
- for info := range providers { | |
- if info.ID == bsnet.host.ID() { | |
- continue // ignore self as provider | |
- } | |
- bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) | |
- select { | |
- case <-ctx.Done(): | |
- return | |
- case out <- info.ID: | |
- } | |
- } | |
- }() | |
- return out | |
-} | |
- | |
-// Provide provides the key to the network | |
-func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { | |
- return bsnet.routing.Provide(ctx, k, true) | |
-} | |
- | |
-// handleNewStream receives a new stream from the network. | |
-func (bsnet *impl) handleNewStream(s network.Stream) { | |
- defer s.Close() | |
- | |
- if len(bsnet.receivers) == 0 { | |
- _ = s.Reset() | |
- return | |
- } | |
- | |
- reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) | |
- for { | |
- received, err := bsmsg.FromMsgReader(reader) | |
- if err != nil { | |
- if err != io.EOF { | |
- _ = s.Reset() | |
- for _, v := range bsnet.receivers { | |
- v.ReceiveError(err) | |
- } | |
- log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) | |
- } | |
- return | |
- } | |
- | |
- p := s.Conn().RemotePeer() | |
- ctx := context.Background() | |
- log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) | |
- bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) | |
- atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) | |
- for _, v := range bsnet.receivers { | |
- v.ReceiveMessage(ctx, p, received) | |
- } | |
- } | |
-} | |
- | |
-func (bsnet *impl) ConnectionManager() connmgr.ConnManager { | |
- return bsnet.host.ConnManager() | |
-} | |
- | |
-func (bsnet *impl) Stats() Stats { | |
- return Stats{ | |
- MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), | |
- MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), | |
- } | |
-} | |
- | |
-type netNotifiee impl | |
- | |
-func (nn *netNotifiee) impl() *impl { | |
- return (*impl)(nn) | |
-} | |
- | |
-func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { | |
- // ignore transient connections | |
- if v.Stat().Transient { | |
- return | |
- } | |
- | |
- nn.impl().connectEvtMgr.Connected(v.RemotePeer()) | |
-} | |
-func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { | |
- // Only record a "disconnect" when we actually disconnect. | |
- if n.Connectedness(v.RemotePeer()) == network.Connected { | |
- return | |
- } | |
- | |
- nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) | |
-} | |
-func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} | |
-func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} | |
-func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} | |
-func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/network/options.go a/vendor/github.com/ipfs/go-bitswap/network/options.go | |
--- b/vendor/github.com/ipfs/go-bitswap/network/options.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/network/options.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,22 +0,0 @@ | |
-package network | |
- | |
-import "github.com/libp2p/go-libp2p/core/protocol" | |
- | |
-type NetOpt func(*Settings) | |
- | |
-type Settings struct { | |
- ProtocolPrefix protocol.ID | |
- SupportedProtocols []protocol.ID | |
-} | |
- | |
-func Prefix(prefix protocol.ID) NetOpt { | |
- return func(settings *Settings) { | |
- settings.ProtocolPrefix = prefix | |
- } | |
-} | |
- | |
-func SupportedProtocols(protos []protocol.ID) NetOpt { | |
- return func(settings *Settings) { | |
- settings.SupportedProtocols = protos | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/options.go a/vendor/github.com/ipfs/go-bitswap/options.go | |
--- b/vendor/github.com/ipfs/go-bitswap/options.go 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/options.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,79 +0,0 @@ | |
-package bitswap | |
- | |
-import ( | |
- "time" | |
- | |
- "github.com/ipfs/go-bitswap/client" | |
- "github.com/ipfs/go-bitswap/server" | |
- "github.com/ipfs/go-bitswap/tracer" | |
- delay "github.com/ipfs/go-ipfs-delay" | |
-) | |
- | |
-type option func(*Bitswap) | |
- | |
-// Option is interface{} of server.Option or client.Option or func(*Bitswap) | |
-// wrapped in a struct to gain strong type checking. | |
-type Option struct { | |
- v interface{} | |
-} | |
- | |
-func EngineBlockstoreWorkerCount(count int) Option { | |
- return Option{server.EngineBlockstoreWorkerCount(count)} | |
-} | |
- | |
-func EngineTaskWorkerCount(count int) Option { | |
- return Option{server.EngineTaskWorkerCount(count)} | |
-} | |
- | |
-func MaxOutstandingBytesPerPeer(count int) Option { | |
- return Option{server.MaxOutstandingBytesPerPeer(count)} | |
-} | |
- | |
-func TaskWorkerCount(count int) Option { | |
- return Option{server.TaskWorkerCount(count)} | |
-} | |
- | |
-func ProvideEnabled(enabled bool) Option { | |
- return Option{server.ProvideEnabled(enabled)} | |
-} | |
- | |
-func SetSendDontHaves(send bool) Option { | |
- return Option{server.SetSendDontHaves(send)} | |
-} | |
- | |
-func WithPeerBlockRequestFilter(pbrf server.PeerBlockRequestFilter) Option { | |
- return Option{server.WithPeerBlockRequestFilter(pbrf)} | |
-} | |
- | |
-func WithScoreLedger(scoreLedger server.ScoreLedger) Option { | |
- return Option{server.WithScoreLedger(scoreLedger)} | |
-} | |
- | |
-func WithTargetMessageSize(tms int) Option { | |
- return Option{server.WithTargetMessageSize(tms)} | |
-} | |
- | |
-func WithTaskComparator(comparator server.TaskComparator) Option { | |
- return Option{server.WithTaskComparator(comparator)} | |
-} | |
- | |
-func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { | |
- return Option{client.ProviderSearchDelay(newProvSearchDelay)} | |
-} | |
- | |
-func RebroadcastDelay(newRebroadcastDelay delay.D) Option { | |
- return Option{client.RebroadcastDelay(newRebroadcastDelay)} | |
-} | |
- | |
-func SetSimulateDontHavesOnTimeout(send bool) Option { | |
- return Option{client.SetSimulateDontHavesOnTimeout(send)} | |
-} | |
- | |
-func WithTracer(tap tracer.Tracer) Option { | |
- // Only trace the server, both receive the same messages anyway | |
- return Option{ | |
- option(func(bs *Bitswap) { | |
- bs.tracer = tap | |
- }), | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/README.md a/vendor/github.com/ipfs/go-bitswap/README.md | |
--- b/vendor/github.com/ipfs/go-bitswap/README.md 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/README.md 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,148 +0,0 @@ | |
-go-bitswap | |
-================== | |
- | |
-[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) | |
-[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) | |
-[![Matrix](https://img.shields.io/badge/matrix-%23ipfs%3Amatrix.org-blue.svg?style=flat-square)](https://matrix.to/#/#ipfs:matrix.org) | |
-[![IRC](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) | |
-[![Discord](https://img.shields.io/discord/475789330380488707?color=blueviolet&label=discord&style=flat-square)](https://discord.gg/24fmuwR) | |
-[![Coverage Status](https://codecov.io/gh/ipfs/go-bitswap/branch/master/graph/badge.svg)](https://codecov.io/gh/ipfs/go-bitswap/branch/master) | |
-[![Build Status](https://circleci.com/gh/ipfs/go-bitswap.svg?style=svg)](https://circleci.com/gh/ipfs/go-bitswap) | |
- | |
-> An implementation of the bitswap protocol in go! | |
- | |
-## Lead Maintainer | |
- | |
-[Dirk McCormick](https://github.com/dirkmc) | |
- | |
-## Table of Contents | |
- | |
-- [Background](#background) | |
-- [Install](#install) | |
-- [Usage](#usage) | |
-- [Implementation](#implementation) | |
-- [Contribute](#contribute) | |
-- [License](#license) | |
- | |
- | |
-## Background | |
- | |
-Bitswap is the data trading module for ipfs. It manages requesting and sending | |
-blocks to and from other peers in the network. Bitswap has two main jobs: | |
-- to acquire blocks requested by the client from the network | |
-- to judiciously send blocks in its possession to other peers who want them | |
- | |
-Bitswap is a message based protocol, as opposed to request-response. All messages | |
-contain wantlists or blocks. | |
- | |
-A node sends a wantlist to tell peers which blocks it wants. When a node receives | |
-a wantlist it should check which blocks it has from the wantlist, and consider | |
-sending the matching blocks to the requestor. | |
- | |
-When a node receives blocks that it asked for, the node should send out a | |
-notification called a 'Cancel' to tell its peers that the node no longer | |
-wants those blocks. | |
- | |
-`go-bitswap` provides an implementation of the Bitswap protocol in go. | |
- | |
-[Learn more about how Bitswap works](./docs/how-bitswap-works.md) | |
- | |
-## Install | |
- | |
-`go-bitswap` requires Go >= 1.11 and can be installed using Go modules | |
- | |
-## Usage | |
- | |
-### Initializing a Bitswap Exchange | |
- | |
-```golang | |
-import ( | |
- "context" | |
- bitswap "github.com/ipfs/go-bitswap" | |
- bsnet "github.com/ipfs/go-bitswap/network" | |
- blockstore "github.com/ipfs/go-ipfs-blockstore" | |
- "github.com/libp2p/go-libp2p-core/routing" | |
- "github.com/libp2p/go-libp2p-core/host" | |
-) | |
- | |
-var ctx context.Context | |
-var host host.Host | |
-var router routing.ContentRouting | |
-var bstore blockstore.Blockstore | |
- | |
-network := bsnet.NewFromIpfsHost(host, router) | |
-exchange := bitswap.New(ctx, network, bstore) | |
-``` | |
- | |
-Parameter Notes: | |
- | |
-1. `ctx` is just the parent context for all of Bitswap | |
-2. `network` is a network abstraction provided to Bitswap on top of libp2p & content routing. | |
-3. `bstore` is an IPFS blockstore | |
- | |
-### Get A Block Synchronously | |
- | |
-```golang | |
-var c cid.Cid | |
-var ctx context.Context | |
-var exchange bitswap.Bitswap | |
- | |
-block, err := exchange.GetBlock(ctx, c) | |
-``` | |
- | |
-Parameter Notes: | |
- | |
-1. `ctx` is the context for this request, which can be cancelled to cancel the request | |
-2. `c` is the content ID of the block you're requesting | |
- | |
-### Get Several Blocks Asynchronously | |
- | |
-```golang | |
-var cids []cid.Cid | |
-var ctx context.Context | |
-var exchange bitswap.Bitswap | |
- | |
-blockChannel, err := exchange.GetBlocks(ctx, cids) | |
-``` | |
- | |
-Parameter Notes: | |
- | |
-1. `ctx` is the context for this request, which can be cancelled to cancel the request | |
-2. `cids` is a slice of content IDs for the blocks you're requesting | |
- | |
-### Get Related Blocks Faster With Sessions | |
- | |
-In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap Session to manage a series of block requests as part of a single higher level operation. You should initialize a Bitswap Session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. | |
- | |
-```golang | |
-var ctx context.Context | |
-var cids []cids.cid | |
-var exchange bitswap.Bitswap | |
- | |
-session := exchange.NewSession(ctx) | |
-blocksChannel, err := session.GetBlocks(ctx, cids) | |
-// later | |
-var relatedCids []cids.cid | |
-relatedBlocksChannel, err := session.GetBlocks(ctx, relatedCids) | |
-``` | |
- | |
-Note that `NewSession` returns an interface with `GetBlock` and `GetBlocks` methods that have the same signature as the overall Bitswap exchange. | |
- | |
-### Tell bitswap a new block was added to the local datastore | |
- | |
-```golang | |
-var blk blocks.Block | |
-var exchange bitswap.Bitswap | |
- | |
-err := exchange.HasBlock(blk) | |
-``` | |
- | |
-## Contribute | |
- | |
-PRs are welcome! | |
- | |
-Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. | |
- | |
-## License | |
- | |
-MIT © Juan Batiz-Benet | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/sendOnlyTracer.go a/vendor/github.com/ipfs/go-bitswap/sendOnlyTracer.go | |
--- b/vendor/github.com/ipfs/go-bitswap/sendOnlyTracer.go 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/sendOnlyTracer.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,20 +0,0 @@ | |
-package bitswap | |
- | |
-import ( | |
- "github.com/ipfs/go-bitswap/message" | |
- "github.com/ipfs/go-bitswap/tracer" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-type sendOnlyTracer interface { | |
- MessageSent(peer.ID, message.BitSwapMessage) | |
-} | |
- | |
-var _ tracer.Tracer = nopReceiveTracer{} | |
- | |
-// we need to only trace sends because we already trace receives in the polyfill object (to not get them traced twice) | |
-type nopReceiveTracer struct { | |
- sendOnlyTracer | |
-} | |
- | |
-func (nopReceiveTracer) MessageReceived(peer.ID, message.BitSwapMessage) {} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/forward.go a/vendor/github.com/ipfs/go-bitswap/server/forward.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/forward.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/forward.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,14 +0,0 @@ | |
-package server | |
- | |
-import ( | |
- "github.com/ipfs/go-bitswap/server/internal/decision" | |
-) | |
- | |
-type ( | |
- Receipt = decision.Receipt | |
- PeerBlockRequestFilter = decision.PeerBlockRequestFilter | |
- TaskComparator = decision.TaskComparator | |
- TaskInfo = decision.TaskInfo | |
- ScoreLedger = decision.ScoreLedger | |
- ScorePeerFunc = decision.ScorePeerFunc | |
-) | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/blockstoremanager.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/blockstoremanager.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/blockstoremanager.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/blockstoremanager.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,149 +0,0 @@ | |
-package decision | |
- | |
-import ( | |
- "context" | |
- "fmt" | |
- "sync" | |
- | |
- blocks "github.com/ipfs/go-block-format" | |
- cid "github.com/ipfs/go-cid" | |
- bstore "github.com/ipfs/go-ipfs-blockstore" | |
- ipld "github.com/ipfs/go-ipld-format" | |
- "github.com/ipfs/go-metrics-interface" | |
-) | |
- | |
-// blockstoreManager maintains a pool of workers that make requests to the blockstore. | |
-type blockstoreManager struct { | |
- bs bstore.Blockstore | |
- workerCount int | |
- jobs chan func() | |
- pendingGauge metrics.Gauge | |
- activeGauge metrics.Gauge | |
- | |
- workerWG sync.WaitGroup | |
- stopChan chan struct{} | |
- stopOnce sync.Once | |
-} | |
- | |
-// newBlockstoreManager creates a new blockstoreManager with the given context | |
-// and number of workers | |
-func newBlockstoreManager( | |
- bs bstore.Blockstore, | |
- workerCount int, | |
- pendingGauge metrics.Gauge, | |
- activeGauge metrics.Gauge, | |
-) *blockstoreManager { | |
- return &blockstoreManager{ | |
- bs: bs, | |
- workerCount: workerCount, | |
- jobs: make(chan func()), | |
- pendingGauge: pendingGauge, | |
- activeGauge: activeGauge, | |
- stopChan: make(chan struct{}), | |
- } | |
-} | |
- | |
-func (bsm *blockstoreManager) start() { | |
- bsm.workerWG.Add(bsm.workerCount) | |
- for i := 0; i < bsm.workerCount; i++ { | |
- go bsm.worker() | |
- } | |
-} | |
- | |
-func (bsm *blockstoreManager) stop() { | |
- bsm.stopOnce.Do(func() { | |
- close(bsm.stopChan) | |
- }) | |
- bsm.workerWG.Wait() | |
-} | |
- | |
-func (bsm *blockstoreManager) worker() { | |
- defer bsm.workerWG.Done() | |
- for { | |
- select { | |
- case <-bsm.stopChan: | |
- return | |
- case job := <-bsm.jobs: | |
- bsm.pendingGauge.Dec() | |
- bsm.activeGauge.Inc() | |
- job() | |
- bsm.activeGauge.Dec() | |
- } | |
- } | |
-} | |
- | |
-func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { | |
- select { | |
- case <-ctx.Done(): | |
- return ctx.Err() | |
- case <-bsm.stopChan: | |
- return fmt.Errorf("shutting down") | |
- case bsm.jobs <- job: | |
- bsm.pendingGauge.Inc() | |
- return nil | |
- } | |
-} | |
- | |
-func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) (map[cid.Cid]int, error) { | |
- res := make(map[cid.Cid]int) | |
- if len(ks) == 0 { | |
- return res, nil | |
- } | |
- | |
- var lk sync.Mutex | |
- return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { | |
- size, err := bsm.bs.GetSize(ctx, c) | |
- if err != nil { | |
- if !ipld.IsNotFound(err) { | |
- // Note: this isn't a fatal error. We shouldn't abort the request | |
- log.Errorf("blockstore.GetSize(%s) error: %s", c, err) | |
- } | |
- } else { | |
- lk.Lock() | |
- res[c] = size | |
- lk.Unlock() | |
- } | |
- }) | |
-} | |
- | |
-func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { | |
- res := make(map[cid.Cid]blocks.Block, len(ks)) | |
- if len(ks) == 0 { | |
- return res, nil | |
- } | |
- | |
- var lk sync.Mutex | |
- return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { | |
- blk, err := bsm.bs.Get(ctx, c) | |
- if err != nil { | |
- if !ipld.IsNotFound(err) { | |
- // Note: this isn't a fatal error. We shouldn't abort the request | |
- log.Errorf("blockstore.Get(%s) error: %s", c, err) | |
- } | |
- return | |
- } | |
- | |
- lk.Lock() | |
- res[c] = blk | |
- lk.Unlock() | |
- }) | |
-} | |
- | |
-func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { | |
- var err error | |
- var wg sync.WaitGroup | |
- for _, k := range ks { | |
- c := k | |
- wg.Add(1) | |
- err = bsm.addJob(ctx, func() { | |
- jobFn(c) | |
- wg.Done() | |
- }) | |
- if err != nil { | |
- wg.Done() | |
- break | |
- } | |
- } | |
- wg.Wait() | |
- return err | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/engine.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/engine.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/engine.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/engine.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,1026 +0,0 @@ | |
-// Package decision implements the decision engine for the bitswap service. | |
-package decision | |
- | |
-import ( | |
- "context" | |
- "fmt" | |
- "sync" | |
- "time" | |
- | |
- "github.com/google/uuid" | |
- | |
- wl "github.com/ipfs/go-bitswap/client/wantlist" | |
- "github.com/ipfs/go-bitswap/internal/defaults" | |
- bsmsg "github.com/ipfs/go-bitswap/message" | |
- pb "github.com/ipfs/go-bitswap/message/pb" | |
- bmetrics "github.com/ipfs/go-bitswap/metrics" | |
- blocks "github.com/ipfs/go-block-format" | |
- "github.com/ipfs/go-cid" | |
- bstore "github.com/ipfs/go-ipfs-blockstore" | |
- logging "github.com/ipfs/go-log" | |
- "github.com/ipfs/go-metrics-interface" | |
- "github.com/ipfs/go-peertaskqueue" | |
- "github.com/ipfs/go-peertaskqueue/peertask" | |
- "github.com/ipfs/go-peertaskqueue/peertracker" | |
- process "github.com/jbenet/goprocess" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// TODO consider taking responsibility for other types of requests. For | |
-// example, there could be a |cancelQueue| for all of the cancellation | |
-// messages that need to go out. There could also be a |wantlistQueue| for | |
-// the local peer's wantlists. Alternatively, these could all be bundled | |
-// into a single, intelligent global queue that efficiently | |
-// batches/combines and takes all of these into consideration. | |
-// | |
-// Right now, messages go onto the network for four reasons: | |
-// 1. an initial `sendwantlist` message to a provider of the first key in a | |
-// request | |
-// 2. a periodic full sweep of `sendwantlist` messages to all providers | |
-// 3. upon receipt of blocks, a `cancel` message to all peers | |
-// 4. draining the priority queue of `blockrequests` from peers | |
-// | |
-// Presently, only `blockrequests` are handled by the decision engine. | |
-// However, there is an opportunity to give it more responsibility! If the | |
-// decision engine is given responsibility for all of the others, it can | |
-// intelligently decide how to combine requests efficiently. | |
-// | |
-// Some examples of what would be possible: | |
-// | |
-// * when sending out the wantlists, include `cancel` requests | |
-// * when handling `blockrequests`, include `sendwantlist` and `cancel` as | |
-// appropriate | |
-// * when handling `cancel`, if we recently received a wanted block from a | |
-// peer, include a partial wantlist that contains a few other high priority | |
-// blocks | |
-// | |
-// In a sense, if we treat the decision engine as a black box, it could do | |
-// whatever it sees fit to produce desired outcomes (get wanted keys | |
-// quickly, maintain good relationships with peers, etc). | |
- | |
-var log = logging.Logger("engine") | |
- | |
-const ( | |
- // outboxChanBuffer must be 0 to prevent stale messages from being sent | |
- outboxChanBuffer = 0 | |
- // targetMessageSize is the ideal size of the batched payload. We try to | |
- // pop this much data off the request queue, but it may be a little more | |
- // or less depending on what's in the queue. | |
- defaultTargetMessageSize = 16 * 1024 | |
- // tagFormat is the tag given to peers associated an engine | |
- tagFormat = "bs-engine-%s-%s" | |
- | |
- // queuedTagWeight is the default weight for peers that have work queued | |
- // on their behalf. | |
- queuedTagWeight = 10 | |
- | |
- // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in | |
- // bytes up to which we will replace a want-have with a want-block | |
- maxBlockSizeReplaceHasWithBlock = 1024 | |
-) | |
- | |
-// Envelope contains a message for a Peer. | |
-type Envelope struct { | |
- // Peer is the intended recipient. | |
- Peer peer.ID | |
- | |
- // Message is the payload. | |
- Message bsmsg.BitSwapMessage | |
- | |
- // A callback to notify the decision queue that the task is complete | |
- Sent func() | |
-} | |
- | |
-// PeerTagger covers the methods on the connection manager used by the decision | |
-// engine to tag peers | |
-type PeerTagger interface { | |
- TagPeer(peer.ID, string, int) | |
- UntagPeer(p peer.ID, tag string) | |
-} | |
- | |
-// Assigns a specific score to a peer | |
-type ScorePeerFunc func(peer.ID, int) | |
- | |
-// ScoreLedger is an external ledger dealing with peer scores. | |
-type ScoreLedger interface { | |
- // Returns aggregated data communication with a given peer. | |
- GetReceipt(p peer.ID) *Receipt | |
- // Increments the sent counter for the given peer. | |
- AddToSentBytes(p peer.ID, n int) | |
- // Increments the received counter for the given peer. | |
- AddToReceivedBytes(p peer.ID, n int) | |
- // PeerConnected should be called when a new peer connects, | |
- // meaning the ledger should open accounting. | |
- PeerConnected(p peer.ID) | |
- // PeerDisconnected should be called when a peer disconnects to | |
- // clean up the accounting. | |
- PeerDisconnected(p peer.ID) | |
- // Starts the ledger sampling process. | |
- Start(scorePeer ScorePeerFunc) | |
- // Stops the sampling process. | |
- Stop() | |
-} | |
- | |
-// Engine manages sending requested blocks to peers. | |
-type Engine struct { | |
- // peerRequestQueue is a priority queue of requests received from peers. | |
- // Requests are popped from the queue, packaged up, and placed in the | |
- // outbox. | |
- peerRequestQueue *peertaskqueue.PeerTaskQueue | |
- | |
- // FIXME it's a bit odd for the client and the worker to both share memory | |
- // (both modify the peerRequestQueue) and also to communicate over the | |
- // workSignal channel. consider sending requests over the channel and | |
- // allowing the worker to have exclusive access to the peerRequestQueue. In | |
- // that case, no lock would be required. | |
- workSignal chan struct{} | |
- | |
- // outbox contains outgoing messages to peers. This is owned by the | |
- // taskWorker goroutine | |
- outbox chan (<-chan *Envelope) | |
- | |
- bsm *blockstoreManager | |
- | |
- peerTagger PeerTagger | |
- | |
- tagQueued, tagUseful string | |
- | |
- lock sync.RWMutex // protects the fields immediately below | |
- | |
- // ledgerMap lists block-related Ledgers by their Partner key. | |
- ledgerMap map[peer.ID]*ledger | |
- | |
- // peerLedger saves which peers are waiting for a Cid | |
- peerLedger *peerLedger | |
- | |
- // an external ledger dealing with peer scores | |
- scoreLedger ScoreLedger | |
- | |
- ticker *time.Ticker | |
- | |
- taskWorkerLock sync.Mutex | |
- taskWorkerCount int | |
- | |
- targetMessageSize int | |
- | |
- // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in | |
- // bytes up to which we will replace a want-have with a want-block | |
- maxBlockSizeReplaceHasWithBlock int | |
- | |
- sendDontHaves bool | |
- | |
- self peer.ID | |
- | |
- // metrics gauge for total pending tasks across all workers | |
- pendingGauge metrics.Gauge | |
- | |
- // metrics gauge for total pending tasks across all workers | |
- activeGauge metrics.Gauge | |
- | |
- // used to ensure metrics are reported each fixed number of operation | |
- metricsLock sync.Mutex | |
- metricUpdateCounter int | |
- | |
- taskComparator TaskComparator | |
- | |
- peerBlockRequestFilter PeerBlockRequestFilter | |
- | |
- bstoreWorkerCount int | |
- maxOutstandingBytesPerPeer int | |
-} | |
- | |
-// TaskInfo represents the details of a request from a peer. | |
-type TaskInfo struct { | |
- Peer peer.ID | |
- // The CID of the block | |
- Cid cid.Cid | |
- // Tasks can be want-have or want-block | |
- IsWantBlock bool | |
- // Whether to immediately send a response if the block is not found | |
- SendDontHave bool | |
- // The size of the block corresponding to the task | |
- BlockSize int | |
- // Whether the block was found | |
- HaveBlock bool | |
-} | |
- | |
-// TaskComparator is used for task prioritization. | |
-// It should return true if task 'ta' has higher priority than task 'tb' | |
-type TaskComparator func(ta, tb *TaskInfo) bool | |
- | |
-// PeerBlockRequestFilter is used to accept / deny requests for a CID coming from a PeerID | |
-// It should return true if the request should be fullfilled. | |
-type PeerBlockRequestFilter func(p peer.ID, c cid.Cid) bool | |
- | |
-type Option func(*Engine) | |
- | |
-func WithTaskComparator(comparator TaskComparator) Option { | |
- return func(e *Engine) { | |
- e.taskComparator = comparator | |
- } | |
-} | |
- | |
-func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { | |
- return func(e *Engine) { | |
- e.peerBlockRequestFilter = pbrf | |
- } | |
-} | |
- | |
-func WithTargetMessageSize(size int) Option { | |
- return func(e *Engine) { | |
- e.targetMessageSize = size | |
- } | |
-} | |
- | |
-func WithScoreLedger(scoreledger ScoreLedger) Option { | |
- return func(e *Engine) { | |
- e.scoreLedger = scoreledger | |
- } | |
-} | |
- | |
-// WithBlockstoreWorkerCount sets the number of worker threads used for | |
-// blockstore operations in the decision engine | |
-func WithBlockstoreWorkerCount(count int) Option { | |
- if count <= 0 { | |
- panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) | |
- } | |
- return func(e *Engine) { | |
- e.bstoreWorkerCount = count | |
- } | |
-} | |
- | |
-// WithTaskWorkerCount sets the number of worker threads used inside the engine | |
-func WithTaskWorkerCount(count int) Option { | |
- if count <= 0 { | |
- panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) | |
- } | |
- return func(e *Engine) { | |
- e.taskWorkerCount = count | |
- } | |
-} | |
- | |
-// WithMaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any | |
-// given time. Setting it to 0 will disable any limiting. | |
-func WithMaxOutstandingBytesPerPeer(count int) Option { | |
- if count < 0 { | |
- panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) | |
- } | |
- return func(e *Engine) { | |
- e.maxOutstandingBytesPerPeer = count | |
- } | |
-} | |
- | |
-func WithSetSendDontHave(send bool) Option { | |
- return func(e *Engine) { | |
- e.sendDontHaves = send | |
- } | |
-} | |
- | |
-// wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator | |
-func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { | |
- return func(a, b *peertask.QueueTask) bool { | |
- taskDataA := a.Task.Data.(*taskData) | |
- taskInfoA := &TaskInfo{ | |
- Peer: a.Target, | |
- Cid: a.Task.Topic.(cid.Cid), | |
- IsWantBlock: taskDataA.IsWantBlock, | |
- SendDontHave: taskDataA.SendDontHave, | |
- BlockSize: taskDataA.BlockSize, | |
- HaveBlock: taskDataA.HaveBlock, | |
- } | |
- taskDataB := b.Task.Data.(*taskData) | |
- taskInfoB := &TaskInfo{ | |
- Peer: b.Target, | |
- Cid: b.Task.Topic.(cid.Cid), | |
- IsWantBlock: taskDataB.IsWantBlock, | |
- SendDontHave: taskDataB.SendDontHave, | |
- BlockSize: taskDataB.BlockSize, | |
- HaveBlock: taskDataB.HaveBlock, | |
- } | |
- return tc(taskInfoA, taskInfoB) | |
- } | |
-} | |
- | |
-// NewEngine creates a new block sending engine for the given block store. | |
-// maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum | |
-// work already outstanding. | |
-func NewEngine( | |
- ctx context.Context, | |
- bs bstore.Blockstore, | |
- peerTagger PeerTagger, | |
- self peer.ID, | |
- opts ...Option, | |
-) *Engine { | |
- return newEngine( | |
- ctx, | |
- bs, | |
- peerTagger, | |
- self, | |
- maxBlockSizeReplaceHasWithBlock, | |
- opts..., | |
- ) | |
-} | |
- | |
-func newEngine( | |
- ctx context.Context, | |
- bs bstore.Blockstore, | |
- peerTagger PeerTagger, | |
- self peer.ID, | |
- maxReplaceSize int, | |
- opts ...Option, | |
-) *Engine { | |
- e := &Engine{ | |
- ledgerMap: make(map[peer.ID]*ledger), | |
- scoreLedger: NewDefaultScoreLedger(), | |
- bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, | |
- maxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, | |
- peerTagger: peerTagger, | |
- outbox: make(chan (<-chan *Envelope), outboxChanBuffer), | |
- workSignal: make(chan struct{}, 1), | |
- ticker: time.NewTicker(time.Millisecond * 100), | |
- maxBlockSizeReplaceHasWithBlock: maxReplaceSize, | |
- taskWorkerCount: defaults.BitswapEngineTaskWorkerCount, | |
- sendDontHaves: true, | |
- self: self, | |
- peerLedger: newPeerLedger(), | |
- pendingGauge: bmetrics.PendingEngineGauge(ctx), | |
- activeGauge: bmetrics.ActiveEngineGauge(ctx), | |
- targetMessageSize: defaultTargetMessageSize, | |
- tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), | |
- tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), | |
- } | |
- | |
- for _, opt := range opts { | |
- opt(e) | |
- } | |
- | |
- e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(ctx), bmetrics.ActiveBlocksGauge(ctx)) | |
- | |
- // default peer task queue options | |
- peerTaskQueueOpts := []peertaskqueue.Option{ | |
- peertaskqueue.OnPeerAddedHook(e.onPeerAdded), | |
- peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), | |
- peertaskqueue.TaskMerger(newTaskMerger()), | |
- peertaskqueue.IgnoreFreezing(true), | |
- peertaskqueue.MaxOutstandingWorkPerPeer(e.maxOutstandingBytesPerPeer), | |
- } | |
- | |
- if e.taskComparator != nil { | |
- queueTaskComparator := wrapTaskComparator(e.taskComparator) | |
- peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(queueTaskComparator))) | |
- peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(queueTaskComparator)) | |
- } | |
- | |
- e.peerRequestQueue = peertaskqueue.New(peerTaskQueueOpts...) | |
- | |
- return e | |
-} | |
- | |
-func (e *Engine) updateMetrics() { | |
- e.metricsLock.Lock() | |
- c := e.metricUpdateCounter | |
- e.metricUpdateCounter++ | |
- e.metricsLock.Unlock() | |
- | |
- if c%100 == 0 { | |
- stats := e.peerRequestQueue.Stats() | |
- e.activeGauge.Set(float64(stats.NumActive)) | |
- e.pendingGauge.Set(float64(stats.NumPending)) | |
- } | |
-} | |
- | |
-// SetSendDontHaves indicates what to do when the engine receives a want-block | |
-// for a block that is not in the blockstore. Either | |
-// - Send a DONT_HAVE message | |
-// - Simply don't respond | |
-// Older versions of Bitswap did not respond, so this allows us to simulate | |
-// those older versions for testing. | |
-func (e *Engine) SetSendDontHaves(send bool) { | |
- e.sendDontHaves = send | |
-} | |
- | |
-// Starts the score ledger. Before start the function checks and, | |
-// if it is unset, initializes the scoreLedger with the default | |
-// implementation. | |
-func (e *Engine) startScoreLedger(px process.Process) { | |
- e.scoreLedger.Start(func(p peer.ID, score int) { | |
- if score == 0 { | |
- e.peerTagger.UntagPeer(p, e.tagUseful) | |
- } else { | |
- e.peerTagger.TagPeer(p, e.tagUseful, score) | |
- } | |
- }) | |
- px.Go(func(ppx process.Process) { | |
- <-ppx.Closing() | |
- e.scoreLedger.Stop() | |
- }) | |
-} | |
- | |
-func (e *Engine) startBlockstoreManager(px process.Process) { | |
- e.bsm.start() | |
- px.Go(func(ppx process.Process) { | |
- <-ppx.Closing() | |
- e.bsm.stop() | |
- }) | |
-} | |
- | |
-// Start up workers to handle requests from other nodes for the data on this node | |
-func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { | |
- e.startBlockstoreManager(px) | |
- e.startScoreLedger(px) | |
- | |
- e.taskWorkerLock.Lock() | |
- defer e.taskWorkerLock.Unlock() | |
- | |
- for i := 0; i < e.taskWorkerCount; i++ { | |
- px.Go(func(_ process.Process) { | |
- e.taskWorker(ctx) | |
- }) | |
- } | |
- | |
-} | |
- | |
-func (e *Engine) onPeerAdded(p peer.ID) { | |
- e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) | |
-} | |
- | |
-func (e *Engine) onPeerRemoved(p peer.ID) { | |
- e.peerTagger.UntagPeer(p, e.tagQueued) | |
-} | |
- | |
-// WantlistForPeer returns the list of keys that the given peer has asked for | |
-func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { | |
- partner := e.findOrCreate(p) | |
- | |
- partner.lk.Lock() | |
- entries := partner.wantList.Entries() | |
- partner.lk.Unlock() | |
- | |
- return entries | |
-} | |
- | |
-// LedgerForPeer returns aggregated data communication with a given peer. | |
-func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { | |
- return e.scoreLedger.GetReceipt(p) | |
-} | |
- | |
-// Each taskWorker pulls items off the request queue up to the maximum size | |
-// and adds them to an envelope that is passed off to the bitswap workers, | |
-// which send the message to the network. | |
-func (e *Engine) taskWorker(ctx context.Context) { | |
- defer e.taskWorkerExit() | |
- for { | |
- oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking | |
- select { | |
- case <-ctx.Done(): | |
- return | |
- case e.outbox <- oneTimeUse: | |
- } | |
- // receiver is ready for an outoing envelope. let's prepare one. first, | |
- // we must acquire a task from the PQ... | |
- envelope, err := e.nextEnvelope(ctx) | |
- if err != nil { | |
- close(oneTimeUse) | |
- return // ctx cancelled | |
- } | |
- oneTimeUse <- envelope // buffered. won't block | |
- close(oneTimeUse) | |
- } | |
-} | |
- | |
-// taskWorkerExit handles cleanup of task workers | |
-func (e *Engine) taskWorkerExit() { | |
- e.taskWorkerLock.Lock() | |
- defer e.taskWorkerLock.Unlock() | |
- | |
- e.taskWorkerCount-- | |
- if e.taskWorkerCount == 0 { | |
- close(e.outbox) | |
- } | |
-} | |
- | |
-// nextEnvelope runs in the taskWorker goroutine. Returns an error if the | |
-// context is cancelled before the next Envelope can be created. | |
-func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { | |
- for { | |
- // Pop some tasks off the request queue | |
- p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(e.targetMessageSize) | |
- e.updateMetrics() | |
- for len(nextTasks) == 0 { | |
- select { | |
- case <-ctx.Done(): | |
- return nil, ctx.Err() | |
- case <-e.workSignal: | |
- p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) | |
- e.updateMetrics() | |
- case <-e.ticker.C: | |
- // When a task is cancelled, the queue may be "frozen" for a | |
- // period of time. We periodically "thaw" the queue to make | |
- // sure it doesn't get stuck in a frozen state. | |
- e.peerRequestQueue.ThawRound() | |
- p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) | |
- e.updateMetrics() | |
- } | |
- } | |
- | |
- // Create a new message | |
- msg := bsmsg.New(false) | |
- | |
- log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) | |
- | |
- // Amount of data in the request queue still waiting to be popped | |
- msg.SetPendingBytes(int32(pendingBytes)) | |
- | |
- // Split out want-blocks, want-haves and DONT_HAVEs | |
- blockCids := make([]cid.Cid, 0, len(nextTasks)) | |
- blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) | |
- for _, t := range nextTasks { | |
- c := t.Topic.(cid.Cid) | |
- td := t.Data.(*taskData) | |
- if td.HaveBlock { | |
- if td.IsWantBlock { | |
- blockCids = append(blockCids, c) | |
- blockTasks[c] = td | |
- } else { | |
- // Add HAVES to the message | |
- msg.AddHave(c) | |
- } | |
- } else { | |
- // Add DONT_HAVEs to the message | |
- msg.AddDontHave(c) | |
- } | |
- } | |
- | |
- // Fetch blocks from datastore | |
- blks, err := e.bsm.getBlocks(ctx, blockCids) | |
- if err != nil { | |
- // we're dropping the envelope but that's not an issue in practice. | |
- return nil, err | |
- } | |
- | |
- for c, t := range blockTasks { | |
- blk := blks[c] | |
- // If the block was not found (it has been removed) | |
- if blk == nil { | |
- // If the client requested DONT_HAVE, add DONT_HAVE to the message | |
- if t.SendDontHave { | |
- msg.AddDontHave(c) | |
- } | |
- } else { | |
- // Add the block to the message | |
- // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", e.self, p, c, len(blk.RawData())) | |
- msg.AddBlock(blk) | |
- } | |
- } | |
- | |
- // If there's nothing in the message, bail out | |
- if msg.Empty() { | |
- e.peerRequestQueue.TasksDone(p, nextTasks...) | |
- continue | |
- } | |
- | |
- log.Debugw("Bitswap engine -> msg", "local", e.self, "to", p, "blockCount", len(msg.Blocks()), "presenceCount", len(msg.BlockPresences()), "size", msg.Size()) | |
- return &Envelope{ | |
- Peer: p, | |
- Message: msg, | |
- Sent: func() { | |
- // Once the message has been sent, signal the request queue so | |
- // it can be cleared from the queue | |
- e.peerRequestQueue.TasksDone(p, nextTasks...) | |
- | |
- // Signal the worker to check for more work | |
- e.signalNewWork() | |
- }, | |
- }, nil | |
- } | |
-} | |
- | |
-// Outbox returns a channel of one-time use Envelope channels. | |
-func (e *Engine) Outbox() <-chan (<-chan *Envelope) { | |
- return e.outbox | |
-} | |
- | |
-// Peers returns a slice of Peers with whom the local node has active sessions. | |
-func (e *Engine) Peers() []peer.ID { | |
- e.lock.RLock() | |
- defer e.lock.RUnlock() | |
- | |
- response := make([]peer.ID, 0, len(e.ledgerMap)) | |
- | |
- for _, ledger := range e.ledgerMap { | |
- response = append(response, ledger.Partner) | |
- } | |
- return response | |
-} | |
- | |
-// MessageReceived is called when a message is received from a remote peer. | |
-// For each item in the wantlist, add a want-have or want-block entry to the | |
-// request queue (this is later popped off by the workerTasks) | |
-func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { | |
- entries := m.Wantlist() | |
- | |
- if len(entries) > 0 { | |
- log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) | |
- for _, et := range entries { | |
- if !et.Cancel { | |
- if et.WantType == pb.Message_Wantlist_Have { | |
- log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) | |
- } else { | |
- log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) | |
- } | |
- } | |
- } | |
- } | |
- | |
- if m.Empty() { | |
- log.Infof("received empty message from %s", p) | |
- } | |
- | |
- newWorkExists := false | |
- defer func() { | |
- if newWorkExists { | |
- e.signalNewWork() | |
- } | |
- }() | |
- | |
- // Dispatch entries | |
- wants, cancels := e.splitWantsCancels(entries) | |
- wants, denials := e.splitWantsDenials(p, wants) | |
- | |
- // Get block sizes | |
- wantKs := cid.NewSet() | |
- for _, entry := range wants { | |
- wantKs.Add(entry.Cid) | |
- } | |
- blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) | |
- if err != nil { | |
- log.Info("aborting message processing", err) | |
- return | |
- } | |
- | |
- e.lock.Lock() | |
- for _, entry := range wants { | |
- e.peerLedger.Wants(p, entry.Cid) | |
- } | |
- for _, entry := range cancels { | |
- e.peerLedger.CancelWant(p, entry.Cid) | |
- } | |
- e.lock.Unlock() | |
- | |
- // Get the ledger for the peer | |
- l := e.findOrCreate(p) | |
- l.lk.Lock() | |
- defer l.lk.Unlock() | |
- | |
- // If the peer sent a full wantlist, replace the ledger's wantlist | |
- if m.Full() { | |
- l.wantList = wl.New() | |
- } | |
- | |
- var activeEntries []peertask.Task | |
- | |
- // Remove cancelled blocks from the queue | |
- for _, entry := range cancels { | |
- log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) | |
- if l.CancelWant(entry.Cid) { | |
- e.peerRequestQueue.Remove(entry.Cid, p) | |
- } | |
- } | |
- | |
- // Cancel a block operation | |
- sendDontHave := func(entry bsmsg.Entry) { | |
- // Only add the task to the queue if the requester wants a DONT_HAVE | |
- if e.sendDontHaves && entry.SendDontHave { | |
- c := entry.Cid | |
- | |
- newWorkExists = true | |
- isWantBlock := false | |
- if entry.WantType == pb.Message_Wantlist_Block { | |
- isWantBlock = true | |
- } | |
- | |
- activeEntries = append(activeEntries, peertask.Task{ | |
- Topic: c, | |
- Priority: int(entry.Priority), | |
- Work: bsmsg.BlockPresenceSize(c), | |
- Data: &taskData{ | |
- BlockSize: 0, | |
- HaveBlock: false, | |
- IsWantBlock: isWantBlock, | |
- SendDontHave: entry.SendDontHave, | |
- }, | |
- }) | |
- } | |
- } | |
- | |
- // Deny access to blocks | |
- for _, entry := range denials { | |
- log.Debugw("Bitswap engine: block denied access", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) | |
- sendDontHave(entry) | |
- } | |
- | |
- // For each want-have / want-block | |
- for _, entry := range wants { | |
- c := entry.Cid | |
- blockSize, found := blockSizes[entry.Cid] | |
- | |
- // Add each want-have / want-block to the ledger | |
- l.Wants(c, entry.Priority, entry.WantType) | |
- | |
- // If the block was not found | |
- if !found { | |
- log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) | |
- sendDontHave(entry) | |
- } else { | |
- // The block was found, add it to the queue | |
- newWorkExists = true | |
- | |
- isWantBlock := e.sendAsBlock(entry.WantType, blockSize) | |
- | |
- log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) | |
- | |
- // entrySize is the amount of space the entry takes up in the | |
- // message we send to the recipient. If we're sending a block, the | |
- // entrySize is the size of the block. Otherwise it's the size of | |
- // a block presence entry. | |
- entrySize := blockSize | |
- if !isWantBlock { | |
- entrySize = bsmsg.BlockPresenceSize(c) | |
- } | |
- activeEntries = append(activeEntries, peertask.Task{ | |
- Topic: c, | |
- Priority: int(entry.Priority), | |
- Work: entrySize, | |
- Data: &taskData{ | |
- BlockSize: blockSize, | |
- HaveBlock: true, | |
- IsWantBlock: isWantBlock, | |
- SendDontHave: entry.SendDontHave, | |
- }, | |
- }) | |
- } | |
- } | |
- | |
- // Push entries onto the request queue | |
- if len(activeEntries) > 0 { | |
- e.peerRequestQueue.PushTasks(p, activeEntries...) | |
- e.updateMetrics() | |
- } | |
-} | |
- | |
-// Split the want-have / want-block entries from the cancel entries | |
-func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { | |
- wants := make([]bsmsg.Entry, 0, len(es)) | |
- cancels := make([]bsmsg.Entry, 0, len(es)) | |
- for _, et := range es { | |
- if et.Cancel { | |
- cancels = append(cancels, et) | |
- } else { | |
- wants = append(wants, et) | |
- } | |
- } | |
- return wants, cancels | |
-} | |
- | |
-// Split the want-have / want-block entries from the block that will be denied access | |
-func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { | |
- if e.peerBlockRequestFilter == nil { | |
- return allWants, nil | |
- } | |
- | |
- wants := make([]bsmsg.Entry, 0, len(allWants)) | |
- denied := make([]bsmsg.Entry, 0, len(allWants)) | |
- | |
- for _, et := range allWants { | |
- if e.peerBlockRequestFilter(p, et.Cid) { | |
- wants = append(wants, et) | |
- } else { | |
- denied = append(denied, et) | |
- } | |
- } | |
- | |
- return wants, denied | |
-} | |
- | |
-// ReceivedBlocks is called when new blocks are received from the network. | |
-// This function also updates the receive side of the ledger. | |
-func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { | |
- if len(blks) == 0 { | |
- return | |
- } | |
- | |
- l := e.findOrCreate(from) | |
- | |
- // Record how many bytes were received in the ledger | |
- l.lk.Lock() | |
- defer l.lk.Unlock() | |
- for _, blk := range blks { | |
- log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) | |
- e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) | |
- } | |
-} | |
- | |
-// NotifyNewBlocks is called when new blocks becomes available locally, and in particular when the caller of bitswap | |
-// decide to store those blocks and make them available on the network. | |
-func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { | |
- if len(blks) == 0 { | |
- return | |
- } | |
- | |
- // Get the size of each block | |
- blockSizes := make(map[cid.Cid]int, len(blks)) | |
- for _, blk := range blks { | |
- blockSizes[blk.Cid()] = len(blk.RawData()) | |
- } | |
- | |
- // Check each peer to see if it wants one of the blocks we received | |
- var work bool | |
- missingWants := make(map[peer.ID][]cid.Cid) | |
- for _, b := range blks { | |
- k := b.Cid() | |
- | |
- e.lock.RLock() | |
- peers := e.peerLedger.Peers(k) | |
- e.lock.RUnlock() | |
- | |
- for _, p := range peers { | |
- e.lock.RLock() | |
- ledger, ok := e.ledgerMap[p] | |
- e.lock.RUnlock() | |
- | |
- if !ok { | |
- // This can happen if the peer has disconnected while we're processing this list. | |
- log.Debugw("failed to find peer in ledger", "peer", p) | |
- missingWants[p] = append(missingWants[p], k) | |
- continue | |
- } | |
- ledger.lk.RLock() | |
- entry, ok := ledger.WantListContains(k) | |
- ledger.lk.RUnlock() | |
- if !ok { | |
- // This can happen if the peer has canceled their want while we're processing this message. | |
- log.Debugw("wantlist index doesn't match peer's wantlist", "peer", p) | |
- missingWants[p] = append(missingWants[p], k) | |
- continue | |
- } | |
- work = true | |
- | |
- blockSize := blockSizes[k] | |
- isWantBlock := e.sendAsBlock(entry.WantType, blockSize) | |
- | |
- entrySize := blockSize | |
- if !isWantBlock { | |
- entrySize = bsmsg.BlockPresenceSize(k) | |
- } | |
- | |
- e.peerRequestQueue.PushTasks(p, peertask.Task{ | |
- Topic: entry.Cid, | |
- Priority: int(entry.Priority), | |
- Work: entrySize, | |
- Data: &taskData{ | |
- BlockSize: blockSize, | |
- HaveBlock: true, | |
- IsWantBlock: isWantBlock, | |
- SendDontHave: false, | |
- }, | |
- }) | |
- e.updateMetrics() | |
- } | |
- } | |
- | |
- // If we found missing wants (e.g., because the peer disconnected, we have some races here) | |
- // remove them from the list. Unfortunately, we still have to re-check because the user | |
- // could have re-connected in the meantime. | |
- if len(missingWants) > 0 { | |
- e.lock.Lock() | |
- for p, wl := range missingWants { | |
- if ledger, ok := e.ledgerMap[p]; ok { | |
- ledger.lk.RLock() | |
- for _, k := range wl { | |
- if _, has := ledger.WantListContains(k); has { | |
- continue | |
- } | |
- e.peerLedger.CancelWant(p, k) | |
- } | |
- ledger.lk.RUnlock() | |
- } else { | |
- for _, k := range wl { | |
- e.peerLedger.CancelWant(p, k) | |
- } | |
- } | |
- } | |
- e.lock.Unlock() | |
- } | |
- | |
- if work { | |
- e.signalNewWork() | |
- } | |
-} | |
- | |
-// TODO add contents of m.WantList() to my local wantlist? NB: could introduce | |
-// race conditions where I send a message, but MessageSent gets handled after | |
-// MessageReceived. The information in the local wantlist could become | |
-// inconsistent. Would need to ensure that Sends and acknowledgement of the | |
-// send happen atomically | |
- | |
-// MessageSent is called when a message has successfully been sent out, to record | |
-// changes. | |
-func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { | |
- l := e.findOrCreate(p) | |
- l.lk.Lock() | |
- defer l.lk.Unlock() | |
- | |
- // Remove sent blocks from the want list for the peer | |
- for _, block := range m.Blocks() { | |
- e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData())) | |
- l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) | |
- } | |
- | |
- // Remove sent block presences from the want list for the peer | |
- for _, bp := range m.BlockPresences() { | |
- // Don't record sent data. We reserve that for data blocks. | |
- if bp.Type == pb.Message_Have { | |
- l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) | |
- } | |
- } | |
-} | |
- | |
-// PeerConnected is called when a new peer connects, meaning we should start | |
-// sending blocks. | |
-func (e *Engine) PeerConnected(p peer.ID) { | |
- e.lock.Lock() | |
- defer e.lock.Unlock() | |
- | |
- _, ok := e.ledgerMap[p] | |
- if !ok { | |
- e.ledgerMap[p] = newLedger(p) | |
- } | |
- | |
- e.scoreLedger.PeerConnected(p) | |
-} | |
- | |
-// PeerDisconnected is called when a peer disconnects. | |
-func (e *Engine) PeerDisconnected(p peer.ID) { | |
- e.lock.Lock() | |
- defer e.lock.Unlock() | |
- | |
- ledger, ok := e.ledgerMap[p] | |
- if ok { | |
- ledger.lk.RLock() | |
- entries := ledger.Entries() | |
- ledger.lk.RUnlock() | |
- | |
- for _, entry := range entries { | |
- e.peerLedger.CancelWant(p, entry.Cid) | |
- } | |
- } | |
- delete(e.ledgerMap, p) | |
- | |
- e.scoreLedger.PeerDisconnected(p) | |
-} | |
- | |
-// If the want is a want-have, and it's below a certain size, send the full | |
-// block (instead of sending a HAVE) | |
-func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { | |
- isWantBlock := wantType == pb.Message_Wantlist_Block | |
- return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock | |
-} | |
- | |
-func (e *Engine) numBytesSentTo(p peer.ID) uint64 { | |
- return e.LedgerForPeer(p).Sent | |
-} | |
- | |
-func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { | |
- return e.LedgerForPeer(p).Recv | |
-} | |
- | |
-// ledger lazily instantiates a ledger | |
-func (e *Engine) findOrCreate(p peer.ID) *ledger { | |
- // Take a read lock (as it's less expensive) to check if we have a ledger | |
- // for the peer | |
- e.lock.RLock() | |
- l, ok := e.ledgerMap[p] | |
- e.lock.RUnlock() | |
- if ok { | |
- return l | |
- } | |
- | |
- // There's no ledger, so take a write lock, then check again and create the | |
- // ledger if necessary | |
- e.lock.Lock() | |
- defer e.lock.Unlock() | |
- l, ok = e.ledgerMap[p] | |
- if !ok { | |
- l = newLedger(p) | |
- e.ledgerMap[p] = l | |
- } | |
- return l | |
-} | |
- | |
-func (e *Engine) signalNewWork() { | |
- // Signal task generation to restart (if stopped!) | |
- select { | |
- case e.workSignal <- struct{}{}: | |
- default: | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ewma.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ewma.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ewma.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ewma.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,5 +0,0 @@ | |
-package decision | |
- | |
-func ewma(old, new, alpha float64) float64 { | |
- return new*alpha + (1-alpha)*old | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ledger.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ledger.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ledger.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/ledger.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,46 +0,0 @@ | |
-package decision | |
- | |
-import ( | |
- "sync" | |
- | |
- wl "github.com/ipfs/go-bitswap/client/wantlist" | |
- pb "github.com/ipfs/go-bitswap/message/pb" | |
- | |
- "github.com/ipfs/go-cid" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-func newLedger(p peer.ID) *ledger { | |
- return &ledger{ | |
- wantList: wl.New(), | |
- Partner: p, | |
- } | |
-} | |
- | |
-// Keeps the wantlist for the partner. NOT threadsafe! | |
-type ledger struct { | |
- // Partner is the remote Peer. | |
- Partner peer.ID | |
- | |
- // wantList is a (bounded, small) set of keys that Partner desires. | |
- wantList *wl.Wantlist | |
- | |
- lk sync.RWMutex | |
-} | |
- | |
-func (l *ledger) Wants(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) { | |
- log.Debugf("peer %s wants %s", l.Partner, k) | |
- l.wantList.Add(k, priority, wantType) | |
-} | |
- | |
-func (l *ledger) CancelWant(k cid.Cid) bool { | |
- return l.wantList.Remove(k) | |
-} | |
- | |
-func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { | |
- return l.wantList.Contains(k) | |
-} | |
- | |
-func (l *ledger) Entries() []wl.Entry { | |
- return l.wantList.Entries() | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/peer_ledger.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/peer_ledger.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/peer_ledger.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/peer_ledger.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,46 +0,0 @@ | |
-package decision | |
- | |
-import ( | |
- "github.com/ipfs/go-cid" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-type peerLedger struct { | |
- cids map[cid.Cid]map[peer.ID]struct{} | |
-} | |
- | |
-func newPeerLedger() *peerLedger { | |
- return &peerLedger{cids: make(map[cid.Cid]map[peer.ID]struct{})} | |
-} | |
- | |
-func (l *peerLedger) Wants(p peer.ID, k cid.Cid) { | |
- m, ok := l.cids[k] | |
- if !ok { | |
- m = make(map[peer.ID]struct{}) | |
- l.cids[k] = m | |
- } | |
- m[p] = struct{}{} | |
-} | |
- | |
-func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) { | |
- m, ok := l.cids[k] | |
- if !ok { | |
- return | |
- } | |
- delete(m, p) | |
- if len(m) == 0 { | |
- delete(l.cids, k) | |
- } | |
-} | |
- | |
-func (l *peerLedger) Peers(k cid.Cid) []peer.ID { | |
- m, ok := l.cids[k] | |
- if !ok { | |
- return nil | |
- } | |
- peers := make([]peer.ID, 0, len(m)) | |
- for p := range m { | |
- peers = append(peers, p) | |
- } | |
- return peers | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/scoreledger.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/scoreledger.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/scoreledger.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/scoreledger.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,353 +0,0 @@ | |
-package decision | |
- | |
-import ( | |
- "sync" | |
- "time" | |
- | |
- "github.com/benbjohnson/clock" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-const ( | |
- // the alpha for the EWMA used to track short term usefulness | |
- shortTermAlpha = 0.5 | |
- | |
- // the alpha for the EWMA used to track long term usefulness | |
- longTermAlpha = 0.05 | |
- | |
- // how frequently the engine should sample usefulness. Peers that | |
- // interact every shortTerm time period are considered "active". | |
- shortTerm = 10 * time.Second | |
- | |
- // long term ratio defines what "long term" means in terms of the | |
- // shortTerm duration. Peers that interact once every longTermRatio are | |
- // considered useful over the long term. | |
- longTermRatio = 10 | |
- | |
- // long/short term scores for tagging peers | |
- longTermScore = 10 // this is a high tag but it grows _very_ slowly. | |
- shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. | |
-) | |
- | |
-// Stores the data exchange relationship between two peers. | |
-type scoreledger struct { | |
- // Partner is the remote Peer. | |
- partner peer.ID | |
- | |
- // tracks bytes sent... | |
- bytesSent uint64 | |
- | |
- // ...and received. | |
- bytesRecv uint64 | |
- | |
- // lastExchange is the time of the last data exchange. | |
- lastExchange time.Time | |
- | |
- // These scores keep track of how useful we think this peer is. Short | |
- // tracks short-term usefulness and long tracks long-term usefulness. | |
- shortScore, longScore float64 | |
- | |
- // Score keeps track of the score used in the peer tagger. We track it | |
- // here to avoid unnecessarily updating the tags in the connection manager. | |
- score int | |
- | |
- // exchangeCount is the number of exchanges with this peer | |
- exchangeCount uint64 | |
- | |
- // the record lock | |
- lock sync.RWMutex | |
- | |
- clock clock.Clock | |
-} | |
- | |
-// Receipt is a summary of the ledger for a given peer | |
-// collecting various pieces of aggregated data for external | |
-// reporting purposes. | |
-type Receipt struct { | |
- Peer string | |
- Value float64 | |
- Sent uint64 | |
- Recv uint64 | |
- Exchanged uint64 | |
-} | |
- | |
-// Increments the sent counter. | |
-func (l *scoreledger) AddToSentBytes(n int) { | |
- l.lock.Lock() | |
- defer l.lock.Unlock() | |
- l.exchangeCount++ | |
- l.lastExchange = l.clock.Now() | |
- l.bytesSent += uint64(n) | |
-} | |
- | |
-// Increments the received counter. | |
-func (l *scoreledger) AddToReceivedBytes(n int) { | |
- l.lock.Lock() | |
- defer l.lock.Unlock() | |
- l.exchangeCount++ | |
- l.lastExchange = l.clock.Now() | |
- l.bytesRecv += uint64(n) | |
-} | |
- | |
-// Returns the Receipt for this ledger record. | |
-func (l *scoreledger) Receipt() *Receipt { | |
- l.lock.RLock() | |
- defer l.lock.RUnlock() | |
- | |
- return &Receipt{ | |
- Peer: l.partner.String(), | |
- Value: float64(l.bytesSent) / float64(l.bytesRecv+1), | |
- Sent: l.bytesSent, | |
- Recv: l.bytesRecv, | |
- Exchanged: l.exchangeCount, | |
- } | |
-} | |
- | |
-// DefaultScoreLedger is used by Engine as the default ScoreLedger. | |
-type DefaultScoreLedger struct { | |
- // the score func | |
- scorePeer ScorePeerFunc | |
- // is closed on Close | |
- closing chan struct{} | |
- // protects the fields immediatly below | |
- lock sync.RWMutex | |
- // ledgerMap lists score ledgers by their partner key. | |
- ledgerMap map[peer.ID]*scoreledger | |
- // how frequently the engine should sample peer usefulness | |
- peerSampleInterval time.Duration | |
- // used by the tests to detect when a sample is taken | |
- sampleCh chan struct{} | |
- clock clock.Clock | |
-} | |
- | |
-// scoreWorker keeps track of how "useful" our peers are, updating scores in the | |
-// connection manager. | |
-// | |
-// It does this by tracking two scores: short-term usefulness and long-term | |
-// usefulness. Short-term usefulness is sampled frequently and highly weights | |
-// new observations. Long-term usefulness is sampled less frequently and highly | |
-// weights on long-term trends. | |
-// | |
-// In practice, we do this by keeping two EWMAs. If we see an interaction | |
-// within the sampling period, we record the score, otherwise, we record a 0. | |
-// The short-term one has a high alpha and is sampled every shortTerm period. | |
-// The long-term one has a low alpha and is sampled every | |
-// longTermRatio*shortTerm period. | |
-// | |
-// To calculate the final score, we sum the short-term and long-term scores then | |
-// adjust it ±25% based on our debt ratio. Peers that have historically been | |
-// more useful to us than we are to them get the highest score. | |
-func (dsl *DefaultScoreLedger) scoreWorker() { | |
- ticker := dsl.clock.Ticker(dsl.peerSampleInterval) | |
- defer ticker.Stop() | |
- | |
- type update struct { | |
- peer peer.ID | |
- score int | |
- } | |
- var ( | |
- lastShortUpdate, lastLongUpdate time.Time | |
- updates []update | |
- ) | |
- | |
- for i := 0; ; i = (i + 1) % longTermRatio { | |
- var now time.Time | |
- select { | |
- case now = <-ticker.C: | |
- case <-dsl.closing: | |
- return | |
- } | |
- | |
- // The long term update ticks every `longTermRatio` short | |
- // intervals. | |
- updateLong := i == 0 | |
- | |
- dsl.lock.Lock() | |
- for _, l := range dsl.ledgerMap { | |
- l.lock.Lock() | |
- | |
- // Update the short-term score. | |
- if l.lastExchange.After(lastShortUpdate) { | |
- l.shortScore = ewma(l.shortScore, shortTermScore, shortTermAlpha) | |
- } else { | |
- l.shortScore = ewma(l.shortScore, 0, shortTermAlpha) | |
- } | |
- | |
- // Update the long-term score. | |
- if updateLong { | |
- if l.lastExchange.After(lastLongUpdate) { | |
- l.longScore = ewma(l.longScore, longTermScore, longTermAlpha) | |
- } else { | |
- l.longScore = ewma(l.longScore, 0, longTermAlpha) | |
- } | |
- } | |
- | |
- // Calculate the new score. | |
- // | |
- // The accounting score adjustment prefers peers _we_ | |
- // need over peers that need us. This doesn't help with | |
- // leeching. | |
- var lscore float64 | |
- if l.bytesRecv == 0 { | |
- lscore = 0 | |
- } else { | |
- lscore = float64(l.bytesRecv) / float64(l.bytesRecv+l.bytesSent) | |
- } | |
- score := int((l.shortScore + l.longScore) * (lscore*.5 + .75)) | |
- | |
- // Avoid updating the connection manager unless there's a change. This can be expensive. | |
- if l.score != score { | |
- // put these in a list so we can perform the updates outside _global_ the lock. | |
- updates = append(updates, update{l.partner, score}) | |
- l.score = score | |
- } | |
- l.lock.Unlock() | |
- } | |
- dsl.lock.Unlock() | |
- | |
- // record the times. | |
- lastShortUpdate = now | |
- if updateLong { | |
- lastLongUpdate = now | |
- } | |
- | |
- // apply the updates | |
- for _, update := range updates { | |
- dsl.scorePeer(update.peer, update.score) | |
- } | |
- // Keep the memory. It's not much and it saves us from having to allocate. | |
- updates = updates[:0] | |
- | |
- // Used by the tests | |
- if dsl.sampleCh != nil { | |
- dsl.sampleCh <- struct{}{} | |
- } | |
- } | |
-} | |
- | |
-// Returns the score ledger for the given peer or nil if that peer | |
-// is not on the ledger. | |
-func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { | |
- // Take a read lock (as it's less expensive) to check if we have | |
- // a ledger for the peer. | |
- dsl.lock.RLock() | |
- l, ok := dsl.ledgerMap[p] | |
- dsl.lock.RUnlock() | |
- if ok { | |
- return l | |
- } | |
- return nil | |
-} | |
- | |
-// Returns a new scoreledger. | |
-func newScoreLedger(p peer.ID, clock clock.Clock) *scoreledger { | |
- return &scoreledger{ | |
- partner: p, | |
- clock: clock, | |
- } | |
-} | |
- | |
-// Lazily instantiates a ledger. | |
-func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { | |
- l := dsl.find(p) | |
- if l != nil { | |
- return l | |
- } | |
- | |
- // There's no ledger, so take a write lock, then check again and | |
- // create the ledger if necessary. | |
- dsl.lock.Lock() | |
- defer dsl.lock.Unlock() | |
- l, ok := dsl.ledgerMap[p] | |
- if !ok { | |
- l = newScoreLedger(p, dsl.clock) | |
- dsl.ledgerMap[p] = l | |
- } | |
- return l | |
-} | |
- | |
-// GetReceipt returns aggregated data communication with a given peer. | |
-func (dsl *DefaultScoreLedger) GetReceipt(p peer.ID) *Receipt { | |
- l := dsl.find(p) | |
- if l != nil { | |
- return l.Receipt() | |
- } | |
- | |
- // Return a blank receipt otherwise. | |
- return &Receipt{ | |
- Peer: p.String(), | |
- Value: 0, | |
- Sent: 0, | |
- Recv: 0, | |
- Exchanged: 0, | |
- } | |
-} | |
- | |
-// Starts the default ledger sampling process. | |
-func (dsl *DefaultScoreLedger) Start(scorePeer ScorePeerFunc) { | |
- dsl.init(scorePeer) | |
- go dsl.scoreWorker() | |
-} | |
- | |
-// Stops the sampling process. | |
-func (dsl *DefaultScoreLedger) Stop() { | |
- close(dsl.closing) | |
-} | |
- | |
-// Initializes the score ledger. | |
-func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { | |
- dsl.lock.Lock() | |
- defer dsl.lock.Unlock() | |
- dsl.scorePeer = scorePeer | |
-} | |
- | |
-// Increments the sent counter for the given peer. | |
-func (dsl *DefaultScoreLedger) AddToSentBytes(p peer.ID, n int) { | |
- l := dsl.findOrCreate(p) | |
- l.AddToSentBytes(n) | |
-} | |
- | |
-// Increments the received counter for the given peer. | |
-func (dsl *DefaultScoreLedger) AddToReceivedBytes(p peer.ID, n int) { | |
- l := dsl.findOrCreate(p) | |
- l.AddToReceivedBytes(n) | |
-} | |
- | |
-// PeerConnected should be called when a new peer connects, meaning | |
-// we should open accounting. | |
-func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { | |
- dsl.lock.Lock() | |
- defer dsl.lock.Unlock() | |
- _, ok := dsl.ledgerMap[p] | |
- if !ok { | |
- dsl.ledgerMap[p] = newScoreLedger(p, dsl.clock) | |
- } | |
-} | |
- | |
-// PeerDisconnected should be called when a peer disconnects to | |
-// clean up the accounting. | |
-func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { | |
- dsl.lock.Lock() | |
- defer dsl.lock.Unlock() | |
- delete(dsl.ledgerMap, p) | |
-} | |
- | |
-// Creates a new instance of the default score ledger. | |
-func NewDefaultScoreLedger() *DefaultScoreLedger { | |
- return &DefaultScoreLedger{ | |
- ledgerMap: make(map[peer.ID]*scoreledger), | |
- closing: make(chan struct{}), | |
- peerSampleInterval: shortTerm, | |
- clock: clock.New(), | |
- } | |
-} | |
- | |
-// Creates a new instance of the default score ledger with testing | |
-// parameters. | |
-func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) *DefaultScoreLedger { | |
- dsl := NewDefaultScoreLedger() | |
- dsl.peerSampleInterval = peerSampleInterval | |
- dsl.sampleCh = sampleCh | |
- dsl.clock = clock | |
- return dsl | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/taskmerger.go a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/taskmerger.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/internal/decision/taskmerger.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/internal/decision/taskmerger.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,87 +0,0 @@ | |
-package decision | |
- | |
-import ( | |
- "github.com/ipfs/go-peertaskqueue/peertask" | |
-) | |
- | |
-// taskData is extra data associated with each task in the request queue | |
-type taskData struct { | |
- // Tasks can be want-have or want-block | |
- IsWantBlock bool | |
- // Whether to immediately send a response if the block is not found | |
- SendDontHave bool | |
- // The size of the block corresponding to the task | |
- BlockSize int | |
- // Whether the block was found | |
- HaveBlock bool | |
-} | |
- | |
-type taskMerger struct{} | |
- | |
-func newTaskMerger() *taskMerger { | |
- return &taskMerger{} | |
-} | |
- | |
-// The request queue uses this Method to decide if a newly pushed task has any | |
-// new information beyond the tasks with the same Topic (CID) in the queue. | |
-func (*taskMerger) HasNewInfo(task peertask.Task, existing []*peertask.Task) bool { | |
- haveSize := false | |
- isWantBlock := false | |
- for _, et := range existing { | |
- etd := et.Data.(*taskData) | |
- if etd.HaveBlock { | |
- haveSize = true | |
- } | |
- | |
- if etd.IsWantBlock { | |
- isWantBlock = true | |
- } | |
- } | |
- | |
- // If there is no active want-block and the new task is a want-block, | |
- // the new task is better | |
- newTaskData := task.Data.(*taskData) | |
- if !isWantBlock && newTaskData.IsWantBlock { | |
- return true | |
- } | |
- | |
- // If there is no size information for the CID and the new task has | |
- // size information, the new task is better | |
- if !haveSize && newTaskData.HaveBlock { | |
- return true | |
- } | |
- | |
- return false | |
-} | |
- | |
-// The request queue uses Merge to merge a newly pushed task with an existing | |
-// task with the same Topic (CID) | |
-func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { | |
- newTask := task.Data.(*taskData) | |
- existingTask := existing.Data.(*taskData) | |
- | |
- // If we now have block size information, update the task with | |
- // the new block size | |
- if !existingTask.HaveBlock && newTask.HaveBlock { | |
- existingTask.HaveBlock = newTask.HaveBlock | |
- existingTask.BlockSize = newTask.BlockSize | |
- } | |
- | |
- // If replacing a want-have with a want-block | |
- if !existingTask.IsWantBlock && newTask.IsWantBlock { | |
- // Change the type from want-have to want-block | |
- existingTask.IsWantBlock = true | |
- // If the want-have was a DONT_HAVE, or the want-block has a size | |
- if !existingTask.HaveBlock || newTask.HaveBlock { | |
- // Update the entry size | |
- existingTask.HaveBlock = newTask.HaveBlock | |
- existing.Work = task.Work | |
- } | |
- } | |
- | |
- // If the task is a want-block, make sure the entry size is equal | |
- // to the block size (because we will send the whole block) | |
- if existingTask.IsWantBlock && existingTask.HaveBlock { | |
- existing.Work = existingTask.BlockSize | |
- } | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/server/server.go a/vendor/github.com/ipfs/go-bitswap/server/server.go | |
--- b/vendor/github.com/ipfs/go-bitswap/server/server.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/server/server.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,536 +0,0 @@ | |
-package server | |
- | |
-import ( | |
- "context" | |
- "errors" | |
- "fmt" | |
- "sort" | |
- "sync" | |
- "time" | |
- | |
- "github.com/ipfs/go-bitswap/internal/defaults" | |
- "github.com/ipfs/go-bitswap/message" | |
- pb "github.com/ipfs/go-bitswap/message/pb" | |
- bmetrics "github.com/ipfs/go-bitswap/metrics" | |
- bsnet "github.com/ipfs/go-bitswap/network" | |
- "github.com/ipfs/go-bitswap/server/internal/decision" | |
- "github.com/ipfs/go-bitswap/tracer" | |
- blocks "github.com/ipfs/go-block-format" | |
- "github.com/ipfs/go-cid" | |
- blockstore "github.com/ipfs/go-ipfs-blockstore" | |
- logging "github.com/ipfs/go-log" | |
- "github.com/ipfs/go-metrics-interface" | |
- process "github.com/jbenet/goprocess" | |
- procctx "github.com/jbenet/goprocess/context" | |
- "github.com/libp2p/go-libp2p/core/peer" | |
- "go.uber.org/zap" | |
-) | |
- | |
-var provideKeysBufferSize = 2048 | |
- | |
-var log = logging.Logger("bitswap-server") | |
-var sflog = log.Desugar() | |
- | |
-const provideWorkerMax = 6 | |
- | |
-type Option func(*Server) | |
- | |
-type Server struct { | |
- sentHistogram metrics.Histogram | |
- sendTimeHistogram metrics.Histogram | |
- | |
- // the engine is the bit of logic that decides who to send which blocks to | |
- engine *decision.Engine | |
- | |
- // network delivers messages on behalf of the session | |
- network bsnet.BitSwapNetwork | |
- | |
- // External statistics interface | |
- tracer tracer.Tracer | |
- | |
- // Counters for various statistics | |
- counterLk sync.Mutex | |
- counters Stat | |
- | |
- // the total number of simultaneous threads sending outgoing messages | |
- taskWorkerCount int | |
- | |
- process process.Process | |
- | |
- // newBlocks is a channel for newly added blocks to be provided to the | |
- // network. blocks pushed down this channel get buffered and fed to the | |
- // provideKeys channel later on to avoid too much network activity | |
- newBlocks chan cid.Cid | |
- // provideKeys directly feeds provide workers | |
- provideKeys chan cid.Cid | |
- | |
- // Extra options to pass to the decision manager | |
- engineOptions []decision.Option | |
- | |
- // the size of channel buffer to use | |
- hasBlockBufferSize int | |
- // whether or not to make provide announcements | |
- provideEnabled bool | |
-} | |
- | |
-func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Server { | |
- ctx, cancel := context.WithCancel(ctx) | |
- | |
- px := process.WithTeardown(func() error { | |
- return nil | |
- }) | |
- go func() { | |
- <-px.Closing() // process closes first | |
- cancel() | |
- }() | |
- | |
- s := &Server{ | |
- sentHistogram: bmetrics.SentHist(ctx), | |
- sendTimeHistogram: bmetrics.SendTimeHist(ctx), | |
- taskWorkerCount: defaults.BitswapTaskWorkerCount, | |
- network: network, | |
- process: px, | |
- provideEnabled: true, | |
- hasBlockBufferSize: defaults.HasBlockBufferSize, | |
- provideKeys: make(chan cid.Cid, provideKeysBufferSize), | |
- } | |
- s.newBlocks = make(chan cid.Cid, s.hasBlockBufferSize) | |
- | |
- for _, o := range options { | |
- o(s) | |
- } | |
- | |
- s.engine = decision.NewEngine( | |
- ctx, | |
- bstore, | |
- network.ConnectionManager(), | |
- network.Self(), | |
- s.engineOptions..., | |
- ) | |
- s.engineOptions = nil | |
- | |
- s.startWorkers(ctx, px) | |
- | |
- return s | |
-} | |
- | |
-func TaskWorkerCount(count int) Option { | |
- if count <= 0 { | |
- panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) | |
- } | |
- return func(bs *Server) { | |
- bs.taskWorkerCount = count | |
- } | |
-} | |
- | |
-func WithTracer(tap tracer.Tracer) Option { | |
- return func(bs *Server) { | |
- bs.tracer = tap | |
- } | |
-} | |
- | |
-// ProvideEnabled is an option for enabling/disabling provide announcements | |
-func ProvideEnabled(enabled bool) Option { | |
- return func(bs *Server) { | |
- bs.provideEnabled = enabled | |
- } | |
-} | |
- | |
-func WithPeerBlockRequestFilter(pbrf decision.PeerBlockRequestFilter) Option { | |
- o := decision.WithPeerBlockRequestFilter(pbrf) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// WithTaskComparator configures custom task prioritization logic. | |
-func WithTaskComparator(comparator decision.TaskComparator) Option { | |
- o := decision.WithTaskComparator(comparator) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// Configures the engine to use the given score decision logic. | |
-func WithScoreLedger(scoreLedger decision.ScoreLedger) Option { | |
- o := decision.WithScoreLedger(scoreLedger) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// LedgerForPeer returns aggregated data about blocks swapped and communication | |
-// with a given peer. | |
-func (bs *Server) LedgerForPeer(p peer.ID) *decision.Receipt { | |
- return bs.engine.LedgerForPeer(p) | |
-} | |
- | |
-// EngineTaskWorkerCount sets the number of worker threads used inside the engine | |
-func EngineTaskWorkerCount(count int) Option { | |
- o := decision.WithTaskWorkerCount(count) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// SetSendDontHaves indicates what to do when the engine receives a want-block | |
-// for a block that is not in the blockstore. Either | |
-// - Send a DONT_HAVE message | |
-// - Simply don't respond | |
-// This option is only used for testing. | |
-func SetSendDontHaves(send bool) Option { | |
- o := decision.WithSetSendDontHave(send) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// EngineBlockstoreWorkerCount sets the number of worker threads used for | |
-// blockstore operations in the decision engine | |
-func EngineBlockstoreWorkerCount(count int) Option { | |
- o := decision.WithBlockstoreWorkerCount(count) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-func WithTargetMessageSize(tms int) Option { | |
- o := decision.WithTargetMessageSize(tms) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any | |
-// given time. Setting it to 0 will disable any limiting. | |
-func MaxOutstandingBytesPerPeer(count int) Option { | |
- o := decision.WithMaxOutstandingBytesPerPeer(count) | |
- return func(bs *Server) { | |
- bs.engineOptions = append(bs.engineOptions, o) | |
- } | |
-} | |
- | |
-// HasBlockBufferSize configure how big the new blocks buffer should be. | |
-func HasBlockBufferSize(count int) Option { | |
- if count < 0 { | |
- panic("cannot have negative buffer size") | |
- } | |
- return func(bs *Server) { | |
- bs.hasBlockBufferSize = count | |
- } | |
-} | |
- | |
-// WantlistForPeer returns the currently understood list of blocks requested by a | |
-// given peer. | |
-func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { | |
- var out []cid.Cid | |
- for _, e := range bs.engine.WantlistForPeer(p) { | |
- out = append(out, e.Cid) | |
- } | |
- return out | |
-} | |
- | |
-func (bs *Server) startWorkers(ctx context.Context, px process.Process) { | |
- bs.engine.StartWorkers(ctx, px) | |
- | |
- // Start up workers to handle requests from other nodes for the data on this node | |
- for i := 0; i < bs.taskWorkerCount; i++ { | |
- i := i | |
- px.Go(func(px process.Process) { | |
- bs.taskWorker(ctx, i) | |
- }) | |
- } | |
- | |
- if bs.provideEnabled { | |
- // Start up a worker to manage sending out provides messages | |
- px.Go(func(px process.Process) { | |
- bs.provideCollector(ctx) | |
- }) | |
- | |
- // Spawn up multiple workers to handle incoming blocks | |
- // consider increasing number if providing blocks bottlenecks | |
- // file transfers | |
- px.Go(bs.provideWorker) | |
- } | |
-} | |
- | |
-func (bs *Server) taskWorker(ctx context.Context, id int) { | |
- defer log.Debug("bitswap task worker shutting down...") | |
- log := log.With("ID", id) | |
- for { | |
- log.Debug("Bitswap.TaskWorker.Loop") | |
- select { | |
- case nextEnvelope := <-bs.engine.Outbox(): | |
- select { | |
- case envelope, ok := <-nextEnvelope: | |
- if !ok { | |
- continue | |
- } | |
- | |
- start := time.Now() | |
- | |
- // TODO: Only record message as sent if there was no error? | |
- // Ideally, yes. But we'd need some way to trigger a retry and/or drop | |
- // the peer. | |
- bs.engine.MessageSent(envelope.Peer, envelope.Message) | |
- if bs.tracer != nil { | |
- bs.tracer.MessageSent(envelope.Peer, envelope.Message) | |
- } | |
- bs.sendBlocks(ctx, envelope) | |
- | |
- dur := time.Since(start) | |
- bs.sendTimeHistogram.Observe(dur.Seconds()) | |
- | |
- case <-ctx.Done(): | |
- return | |
- } | |
- case <-ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (bs *Server) logOutgoingBlocks(env *decision.Envelope) { | |
- if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { | |
- return | |
- } | |
- | |
- self := bs.network.Self() | |
- | |
- for _, blockPresence := range env.Message.BlockPresences() { | |
- c := blockPresence.Cid | |
- switch blockPresence.Type { | |
- case pb.Message_Have: | |
- log.Debugw("sent message", | |
- "type", "HAVE", | |
- "cid", c, | |
- "local", self, | |
- "to", env.Peer, | |
- ) | |
- case pb.Message_DontHave: | |
- log.Debugw("sent message", | |
- "type", "DONT_HAVE", | |
- "cid", c, | |
- "local", self, | |
- "to", env.Peer, | |
- ) | |
- default: | |
- panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) | |
- } | |
- | |
- } | |
- for _, block := range env.Message.Blocks() { | |
- log.Debugw("sent message", | |
- "type", "BLOCK", | |
- "cid", block.Cid(), | |
- "local", self, | |
- "to", env.Peer, | |
- ) | |
- } | |
-} | |
- | |
-func (bs *Server) sendBlocks(ctx context.Context, env *decision.Envelope) { | |
- // Blocks need to be sent synchronously to maintain proper backpressure | |
- // throughout the network stack | |
- defer env.Sent() | |
- | |
- err := bs.network.SendMessage(ctx, env.Peer, env.Message) | |
- if err != nil { | |
- log.Debugw("failed to send blocks message", | |
- "peer", env.Peer, | |
- "error", err, | |
- ) | |
- return | |
- } | |
- | |
- bs.logOutgoingBlocks(env) | |
- | |
- dataSent := 0 | |
- blocks := env.Message.Blocks() | |
- for _, b := range blocks { | |
- dataSent += len(b.RawData()) | |
- } | |
- bs.counterLk.Lock() | |
- bs.counters.BlocksSent += uint64(len(blocks)) | |
- bs.counters.DataSent += uint64(dataSent) | |
- bs.counterLk.Unlock() | |
- bs.sentHistogram.Observe(float64(env.Message.Size())) | |
- log.Debugw("sent message", "peer", env.Peer) | |
-} | |
- | |
-type Stat struct { | |
- Peers []string | |
- ProvideBufLen int | |
- BlocksSent uint64 | |
- DataSent uint64 | |
-} | |
- | |
-// Stat returns aggregated statistics about bitswap operations | |
-func (bs *Server) Stat() (Stat, error) { | |
- bs.counterLk.Lock() | |
- s := bs.counters | |
- bs.counterLk.Unlock() | |
- s.ProvideBufLen = len(bs.newBlocks) | |
- | |
- peers := bs.engine.Peers() | |
- peersStr := make([]string, len(peers)) | |
- for i, p := range peers { | |
- peersStr[i] = p.Pretty() | |
- } | |
- sort.Strings(peersStr) | |
- s.Peers = peersStr | |
- | |
- return s, nil | |
-} | |
- | |
-// NotifyNewBlocks announces the existence of blocks to this bitswap service. The | |
-// service will potentially notify its peers. | |
-// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure | |
-// that those blocks are available in the blockstore before calling this function. | |
-func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { | |
- select { | |
- case <-bs.process.Closing(): | |
- return errors.New("bitswap is closed") | |
- default: | |
- } | |
- | |
- // Send wanted blocks to decision engine | |
- bs.engine.NotifyNewBlocks(blks) | |
- | |
- // If the reprovider is enabled, send block to reprovider | |
- if bs.provideEnabled { | |
- for _, blk := range blks { | |
- select { | |
- case bs.newBlocks <- blk.Cid(): | |
- // send block off to be reprovided | |
- case <-bs.process.Closing(): | |
- return bs.process.Close() | |
- } | |
- } | |
- } | |
- | |
- return nil | |
-} | |
- | |
-func (bs *Server) provideCollector(ctx context.Context) { | |
- defer close(bs.provideKeys) | |
- var toProvide []cid.Cid | |
- var nextKey cid.Cid | |
- var keysOut chan cid.Cid | |
- | |
- for { | |
- select { | |
- case blkey, ok := <-bs.newBlocks: | |
- if !ok { | |
- log.Debug("newBlocks channel closed") | |
- return | |
- } | |
- | |
- if keysOut == nil { | |
- nextKey = blkey | |
- keysOut = bs.provideKeys | |
- } else { | |
- toProvide = append(toProvide, blkey) | |
- } | |
- case keysOut <- nextKey: | |
- if len(toProvide) > 0 { | |
- nextKey = toProvide[0] | |
- toProvide = toProvide[1:] | |
- } else { | |
- keysOut = nil | |
- } | |
- case <-ctx.Done(): | |
- return | |
- } | |
- } | |
-} | |
- | |
-func (bs *Server) provideWorker(px process.Process) { | |
- // FIXME: OnClosingContext returns a _custom_ context type. | |
- // Unfortunately, deriving a new cancelable context from this custom | |
- // type fires off a goroutine. To work around this, we create a single | |
- // cancelable context up-front and derive all sub-contexts from that. | |
- // | |
- // See: https://github.com/ipfs/go-ipfs/issues/5810 | |
- ctx := procctx.OnClosingContext(px) | |
- ctx, cancel := context.WithCancel(ctx) | |
- defer cancel() | |
- | |
- limit := make(chan struct{}, provideWorkerMax) | |
- | |
- limitedGoProvide := func(k cid.Cid, wid int) { | |
- defer func() { | |
- // replace token when done | |
- <-limit | |
- }() | |
- | |
- log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) | |
- defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) | |
- | |
- ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx | |
- defer cancel() | |
- | |
- if err := bs.network.Provide(ctx, k); err != nil { | |
- log.Warn(err) | |
- } | |
- } | |
- | |
- // worker spawner, reads from bs.provideKeys until it closes, spawning a | |
- // _ratelimited_ number of workers to handle each key. | |
- for wid := 2; ; wid++ { | |
- log.Debug("Bitswap.ProvideWorker.Loop") | |
- | |
- select { | |
- case <-px.Closing(): | |
- return | |
- case k, ok := <-bs.provideKeys: | |
- if !ok { | |
- log.Debug("provideKeys channel closed") | |
- return | |
- } | |
- select { | |
- case <-px.Closing(): | |
- return | |
- case limit <- struct{}{}: | |
- go limitedGoProvide(k, wid) | |
- } | |
- } | |
- } | |
-} | |
- | |
-func (bs *Server) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { | |
- // This call records changes to wantlists, blocks received, | |
- // and number of bytes transfered. | |
- bs.engine.MessageReceived(ctx, p, incoming) | |
- // TODO: this is bad, and could be easily abused. | |
- // Should only track *useful* messages in ledger | |
- | |
- if bs.tracer != nil { | |
- bs.tracer.MessageReceived(p, incoming) | |
- } | |
-} | |
- | |
-// ReceivedBlocks notify the decision engine that a peer is well behaving | |
-// and gave us usefull data, potentially increasing it's score and making us | |
-// send them more data in exchange. | |
-func (bs *Server) ReceivedBlocks(from peer.ID, blks []blocks.Block) { | |
- bs.engine.ReceivedBlocks(from, blks) | |
-} | |
- | |
-func (*Server) ReceiveError(err error) { | |
- log.Infof("Bitswap Client ReceiveError: %s", err) | |
- // TODO log the network error | |
- // TODO bubble the network error up to the parent context/error logger | |
- | |
-} | |
-func (bs *Server) PeerConnected(p peer.ID) { | |
- bs.engine.PeerConnected(p) | |
-} | |
-func (bs *Server) PeerDisconnected(p peer.ID) { | |
- bs.engine.PeerDisconnected(p) | |
-} | |
- | |
-// Close is called to shutdown the Client | |
-func (bs *Server) Close() error { | |
- return bs.process.Close() | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/tracer/tracer.go a/vendor/github.com/ipfs/go-bitswap/tracer/tracer.go | |
--- b/vendor/github.com/ipfs/go-bitswap/tracer/tracer.go 2023-01-30 20:34:50.532142371 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/tracer/tracer.go 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,13 +0,0 @@ | |
-package tracer | |
- | |
-import ( | |
- bsmsg "github.com/ipfs/go-bitswap/message" | |
- peer "github.com/libp2p/go-libp2p/core/peer" | |
-) | |
- | |
-// Tracer provides methods to access all messages sent and received by Bitswap. | |
-// This interface can be used to implement various statistics (this is original intent). | |
-type Tracer interface { | |
- MessageReceived(peer.ID, bsmsg.BitSwapMessage) | |
- MessageSent(peer.ID, bsmsg.BitSwapMessage) | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-bitswap/version.json a/vendor/github.com/ipfs/go-bitswap/version.json | |
--- b/vendor/github.com/ipfs/go-bitswap/version.json 2023-01-30 20:34:50.525475644 +0100 | |
+++ a/vendor/github.com/ipfs/go-bitswap/version.json 1970-01-01 01:00:00.000000000 +0100 | |
@@ -1,3 +0,0 @@ | |
-{ | |
- "version": "v0.10.2" | |
-} | |
diff -Naur --color b/vendor/github.com/ipfs/go-ipfs-routing/none/none_client.go a/vendor/github.com/ipfs/go-ipfs-routing/none/none_client.go | |
--- b/vendor/github.com/ipfs/go-ipfs-routing/none/none_client.go 2023-01-30 20:34:50.562142649 +0100 | |
+++ a/vendor/github.com/ipfs/go-ipfs-routing/none/none_client.go 2023-01-30 20:34:49.292130910 +0100 | |
@@ -5,14 +5,12 @@ | |
"context" | |
"errors" | |
- cid "github.com/ipfs/go-cid" | |
+ "github.com/ipfs/go-cid" | |
ds "github.com/ipfs/go-datastore" | |
- | |
- "github.com/libp2p/go-libp2p-core/host" | |
- "github.com/libp2p/go-libp2p-core/peer" | |
- "github.com/libp2p/go-libp2p-core/routing" | |
- | |
record "github.com/libp2p/go-libp2p-record" | |
+ "github.com/libp2p/go-libp2p/core/host" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+ "github.com/libp2p/go-libp2p/core/routing" | |
) | |
type nilclient struct { | |
@@ -53,5 +51,5 @@ | |
return &nilclient{}, nil | |
} | |
-// ensure nilclient satisfies interface | |
+// ensure nilclient satisfies interface | |
var _ routing.Routing = &nilclient{} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/bitswap.go a/vendor/github.com/ipfs/go-libipfs/bitswap/bitswap.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/bitswap.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/bitswap.go 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,181 @@ | |
+package bitswap | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ | |
+ "github.com/ipfs/go-libipfs/bitswap/client" | |
+ "github.com/ipfs/go-libipfs/bitswap/internal/defaults" | |
+ "github.com/ipfs/go-libipfs/bitswap/message" | |
+ "github.com/ipfs/go-libipfs/bitswap/network" | |
+ "github.com/ipfs/go-libipfs/bitswap/server" | |
+ "github.com/ipfs/go-libipfs/bitswap/tracer" | |
+ "github.com/ipfs/go-metrics-interface" | |
+ | |
+ "github.com/ipfs/go-cid" | |
+ blockstore "github.com/ipfs/go-ipfs-blockstore" | |
+ exchange "github.com/ipfs/go-ipfs-exchange-interface" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ logging "github.com/ipfs/go-log" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+ | |
+ "go.uber.org/multierr" | |
+) | |
+ | |
+var log = logging.Logger("bitswap") | |
+ | |
+// old interface we are targeting | |
+type bitswap interface { | |
+ Close() error | |
+ GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) | |
+ GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) | |
+ GetWantBlocks() []cid.Cid | |
+ GetWantHaves() []cid.Cid | |
+ GetWantlist() []cid.Cid | |
+ IsOnline() bool | |
+ LedgerForPeer(p peer.ID) *server.Receipt | |
+ NewSession(ctx context.Context) exchange.Fetcher | |
+ NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error | |
+ PeerConnected(p peer.ID) | |
+ PeerDisconnected(p peer.ID) | |
+ ReceiveError(err error) | |
+ ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) | |
+ Stat() (*Stat, error) | |
+ WantlistForPeer(p peer.ID) []cid.Cid | |
+} | |
+ | |
+var _ exchange.SessionExchange = (*Bitswap)(nil) | |
+var _ bitswap = (*Bitswap)(nil) | |
+var HasBlockBufferSize = defaults.HasBlockBufferSize | |
+ | |
+type Bitswap struct { | |
+ *client.Client | |
+ *server.Server | |
+ | |
+ tracer tracer.Tracer | |
+ net network.BitSwapNetwork | |
+} | |
+ | |
+func New(ctx context.Context, net network.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Bitswap { | |
+ bs := &Bitswap{ | |
+ net: net, | |
+ } | |
+ | |
+ var serverOptions []server.Option | |
+ var clientOptions []client.Option | |
+ | |
+ for _, o := range options { | |
+ switch typedOption := o.v.(type) { | |
+ case server.Option: | |
+ serverOptions = append(serverOptions, typedOption) | |
+ case client.Option: | |
+ clientOptions = append(clientOptions, typedOption) | |
+ case option: | |
+ typedOption(bs) | |
+ default: | |
+ panic(fmt.Errorf("unknown option type passed to bitswap.New, got: %T, %v; expected: %T, %T or %T", typedOption, typedOption, server.Option(nil), client.Option(nil), option(nil))) | |
+ } | |
+ } | |
+ | |
+ if bs.tracer != nil { | |
+ var tracer tracer.Tracer = nopReceiveTracer{bs.tracer} | |
+ clientOptions = append(clientOptions, client.WithTracer(tracer)) | |
+ serverOptions = append(serverOptions, server.WithTracer(tracer)) | |
+ } | |
+ | |
+ if HasBlockBufferSize != defaults.HasBlockBufferSize { | |
+ serverOptions = append(serverOptions, server.HasBlockBufferSize(HasBlockBufferSize)) | |
+ } | |
+ | |
+ ctx = metrics.CtxSubScope(ctx, "bitswap") | |
+ | |
+ bs.Server = server.New(ctx, net, bstore, serverOptions...) | |
+ bs.Client = client.New(ctx, net, bstore, append(clientOptions, client.WithBlockReceivedNotifier(bs.Server))...) | |
+ net.Start(bs) // use the polyfill receiver to log received errors and trace messages only once | |
+ | |
+ return bs | |
+} | |
+ | |
+func (bs *Bitswap) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { | |
+ return multierr.Combine( | |
+ bs.Client.NotifyNewBlocks(ctx, blks...), | |
+ bs.Server.NotifyNewBlocks(ctx, blks...), | |
+ ) | |
+} | |
+ | |
+type Stat struct { | |
+ Wantlist []cid.Cid | |
+ Peers []string | |
+ BlocksReceived uint64 | |
+ DataReceived uint64 | |
+ DupBlksReceived uint64 | |
+ DupDataReceived uint64 | |
+ MessagesReceived uint64 | |
+ BlocksSent uint64 | |
+ DataSent uint64 | |
+ ProvideBufLen int | |
+} | |
+ | |
+func (bs *Bitswap) Stat() (*Stat, error) { | |
+ cs, err := bs.Client.Stat() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ ss, err := bs.Server.Stat() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ return &Stat{ | |
+ Wantlist: cs.Wantlist, | |
+ BlocksReceived: cs.BlocksReceived, | |
+ DataReceived: cs.DataReceived, | |
+ DupBlksReceived: cs.DupBlksReceived, | |
+ DupDataReceived: cs.DupDataReceived, | |
+ MessagesReceived: cs.MessagesReceived, | |
+ Peers: ss.Peers, | |
+ BlocksSent: ss.BlocksSent, | |
+ DataSent: ss.DataSent, | |
+ ProvideBufLen: ss.ProvideBufLen, | |
+ }, nil | |
+} | |
+ | |
+func (bs *Bitswap) Close() error { | |
+ bs.net.Stop() | |
+ return multierr.Combine( | |
+ bs.Client.Close(), | |
+ bs.Server.Close(), | |
+ ) | |
+} | |
+ | |
+func (bs *Bitswap) WantlistForPeer(p peer.ID) []cid.Cid { | |
+ if p == bs.net.Self() { | |
+ return bs.Client.GetWantlist() | |
+ } | |
+ return bs.Server.WantlistForPeer(p) | |
+} | |
+ | |
+func (bs *Bitswap) PeerConnected(p peer.ID) { | |
+ bs.Client.PeerConnected(p) | |
+ bs.Server.PeerConnected(p) | |
+} | |
+ | |
+func (bs *Bitswap) PeerDisconnected(p peer.ID) { | |
+ bs.Client.PeerDisconnected(p) | |
+ bs.Server.PeerDisconnected(p) | |
+} | |
+ | |
+func (bs *Bitswap) ReceiveError(err error) { | |
+ log.Infof("Bitswap Client ReceiveError: %s", err) | |
+ // TODO log the network error | |
+ // TODO bubble the network error up to the parent context/error logger | |
+} | |
+ | |
+func (bs *Bitswap) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { | |
+ if bs.tracer != nil { | |
+ bs.tracer.MessageReceived(p, incoming) | |
+ } | |
+ | |
+ bs.Client.ReceiveMessage(ctx, p, incoming) | |
+ bs.Server.ReceiveMessage(ctx, p, incoming) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/client.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/client.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/client.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/client.go 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,479 @@ | |
+// Package bitswap implements the IPFS exchange interface with the BitSwap | |
+// bilateral exchange protocol. | |
+package client | |
+ | |
+import ( | |
+ "context" | |
+ "errors" | |
+ | |
+ "sync" | |
+ "time" | |
+ | |
+ delay "github.com/ipfs/go-ipfs-delay" | |
+ "go.opentelemetry.io/otel/attribute" | |
+ "go.opentelemetry.io/otel/trace" | |
+ | |
+ "github.com/ipfs/go-cid" | |
+ blockstore "github.com/ipfs/go-ipfs-blockstore" | |
+ exchange "github.com/ipfs/go-ipfs-exchange-interface" | |
+ bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" | |
+ bsgetter "github.com/ipfs/go-libipfs/bitswap/client/internal/getter" | |
+ bsmq "github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue" | |
+ "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" | |
+ bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" | |
+ bspqm "github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager" | |
+ bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session" | |
+ bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" | |
+ bssm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager" | |
+ bsspm "github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager" | |
+ "github.com/ipfs/go-libipfs/bitswap/internal" | |
+ "github.com/ipfs/go-libipfs/bitswap/internal/defaults" | |
+ bsmsg "github.com/ipfs/go-libipfs/bitswap/message" | |
+ bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics" | |
+ bsnet "github.com/ipfs/go-libipfs/bitswap/network" | |
+ "github.com/ipfs/go-libipfs/bitswap/tracer" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ logging "github.com/ipfs/go-log" | |
+ "github.com/ipfs/go-metrics-interface" | |
+ process "github.com/jbenet/goprocess" | |
+ procctx "github.com/jbenet/goprocess/context" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+var log = logging.Logger("bitswap-client") | |
+ | |
+// Option defines the functional option type that can be used to configure | |
+// bitswap instances | |
+type Option func(*Client) | |
+ | |
+// ProviderSearchDelay overwrites the global provider search delay | |
+func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { | |
+ return func(bs *Client) { | |
+ bs.provSearchDelay = newProvSearchDelay | |
+ } | |
+} | |
+ | |
+// RebroadcastDelay overwrites the global provider rebroadcast delay | |
+func RebroadcastDelay(newRebroadcastDelay delay.D) Option { | |
+ return func(bs *Client) { | |
+ bs.rebroadcastDelay = newRebroadcastDelay | |
+ } | |
+} | |
+ | |
+func SetSimulateDontHavesOnTimeout(send bool) Option { | |
+ return func(bs *Client) { | |
+ bs.simulateDontHavesOnTimeout = send | |
+ } | |
+} | |
+ | |
+// Configures the Client to use given tracer. | |
+// This provides methods to access all messages sent and received by the Client. | |
+// This interface can be used to implement various statistics (this is original intent). | |
+func WithTracer(tap tracer.Tracer) Option { | |
+ return func(bs *Client) { | |
+ bs.tracer = tap | |
+ } | |
+} | |
+ | |
+func WithBlockReceivedNotifier(brn BlockReceivedNotifier) Option { | |
+ return func(bs *Client) { | |
+ bs.blockReceivedNotifier = brn | |
+ } | |
+} | |
+ | |
+type BlockReceivedNotifier interface { | |
+ // ReceivedBlocks notifies the decision engine that a peer is well-behaving | |
+ // and gave us useful data, potentially increasing its score and making us | |
+ // send them more data in exchange. | |
+ ReceivedBlocks(peer.ID, []blocks.Block) | |
+} | |
+ | |
+// New initializes a Bitswap client that runs until client.Close is called. | |
+func New(parent context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Client { | |
+ // important to use provided parent context (since it may include important | |
+ // loggable data). It's probably not a good idea to allow bitswap to be | |
+ // coupled to the concerns of the ipfs daemon in this way. | |
+ // | |
+ // FIXME(btc) Now that bitswap manages itself using a process, it probably | |
+ // shouldn't accept a context anymore. Clients should probably use Close() | |
+ // exclusively. We should probably find another way to share logging data | |
+ ctx, cancelFunc := context.WithCancel(parent) | |
+ | |
+ px := process.WithTeardown(func() error { | |
+ return nil | |
+ }) | |
+ | |
+ // onDontHaveTimeout is called when a want-block is sent to a peer that | |
+ // has an old version of Bitswap that doesn't support DONT_HAVE messages, | |
+ // or when no response is received within a timeout. | |
+ var sm *bssm.SessionManager | |
+ var bs *Client | |
+ onDontHaveTimeout := func(p peer.ID, dontHaves []cid.Cid) { | |
+ // Simulate a message arriving with DONT_HAVEs | |
+ if bs.simulateDontHavesOnTimeout { | |
+ sm.ReceiveFrom(ctx, p, nil, nil, dontHaves) | |
+ } | |
+ } | |
+ peerQueueFactory := func(ctx context.Context, p peer.ID) bspm.PeerQueue { | |
+ return bsmq.New(ctx, p, network, onDontHaveTimeout) | |
+ } | |
+ | |
+ sim := bssim.New() | |
+ bpm := bsbpm.New() | |
+ pm := bspm.New(ctx, peerQueueFactory, network.Self()) | |
+ pqm := bspqm.New(ctx, network) | |
+ | |
+ sessionFactory := func( | |
+ sessctx context.Context, | |
+ sessmgr bssession.SessionManager, | |
+ id uint64, | |
+ spm bssession.SessionPeerManager, | |
+ sim *bssim.SessionInterestManager, | |
+ pm bssession.PeerManager, | |
+ bpm *bsbpm.BlockPresenceManager, | |
+ notif notifications.PubSub, | |
+ provSearchDelay time.Duration, | |
+ rebroadcastDelay delay.D, | |
+ self peer.ID) bssm.Session { | |
+ return bssession.New(sessctx, sessmgr, id, spm, pqm, sim, pm, bpm, notif, provSearchDelay, rebroadcastDelay, self) | |
+ } | |
+ sessionPeerManagerFactory := func(ctx context.Context, id uint64) bssession.SessionPeerManager { | |
+ return bsspm.New(id, network.ConnectionManager()) | |
+ } | |
+ notif := notifications.New() | |
+ sm = bssm.New(ctx, sessionFactory, sim, sessionPeerManagerFactory, bpm, pm, notif, network.Self()) | |
+ | |
+ bs = &Client{ | |
+ blockstore: bstore, | |
+ network: network, | |
+ process: px, | |
+ pm: pm, | |
+ pqm: pqm, | |
+ sm: sm, | |
+ sim: sim, | |
+ notif: notif, | |
+ counters: new(counters), | |
+ dupMetric: bmetrics.DupHist(ctx), | |
+ allMetric: bmetrics.AllHist(ctx), | |
+ provSearchDelay: defaults.ProvSearchDelay, | |
+ rebroadcastDelay: delay.Fixed(time.Minute), | |
+ simulateDontHavesOnTimeout: true, | |
+ } | |
+ | |
+ // apply functional options before starting and running bitswap | |
+ for _, option := range options { | |
+ option(bs) | |
+ } | |
+ | |
+ bs.pqm.Startup() | |
+ | |
+ // bind the context and process. | |
+ // do it over here to avoid closing before all setup is done. | |
+ go func() { | |
+ <-px.Closing() // process closes first | |
+ sm.Shutdown() | |
+ cancelFunc() | |
+ notif.Shutdown() | |
+ }() | |
+ procctx.CloseAfterContext(px, ctx) // parent cancelled first | |
+ | |
+ return bs | |
+} | |
+ | |
+// Client instances implement the bitswap protocol. | |
+type Client struct { | |
+ pm *bspm.PeerManager | |
+ | |
+ // the provider query manager manages requests to find providers | |
+ pqm *bspqm.ProviderQueryManager | |
+ | |
+ // network delivers messages on behalf of the session | |
+ network bsnet.BitSwapNetwork | |
+ | |
+ // blockstore is the local database | |
+ // NB: ensure threadsafety | |
+ blockstore blockstore.Blockstore | |
+ | |
+ // manages channels of outgoing blocks for sessions | |
+ notif notifications.PubSub | |
+ | |
+ process process.Process | |
+ | |
+ // Counters for various statistics | |
+ counterLk sync.Mutex | |
+ counters *counters | |
+ | |
+ // Metrics interface metrics | |
+ dupMetric metrics.Histogram | |
+ allMetric metrics.Histogram | |
+ | |
+ // External statistics interface | |
+ tracer tracer.Tracer | |
+ | |
+ // the SessionManager routes requests to interested sessions | |
+ sm *bssm.SessionManager | |
+ | |
+ // the SessionInterestManager keeps track of which sessions are interested | |
+ // in which CIDs | |
+ sim *bssim.SessionInterestManager | |
+ | |
+ // how long to wait before looking for providers in a session | |
+ provSearchDelay time.Duration | |
+ | |
+ // how often to rebroadcast providing requests to find more optimized providers | |
+ rebroadcastDelay delay.D | |
+ | |
+ blockReceivedNotifier BlockReceivedNotifier | |
+ | |
+ // whether we should actually simulate dont haves on request timeout | |
+ simulateDontHavesOnTimeout bool | |
+} | |
+ | |
+type counters struct { | |
+ blocksRecvd uint64 | |
+ dupBlocksRecvd uint64 | |
+ dupDataRecvd uint64 | |
+ dataRecvd uint64 | |
+ messagesRecvd uint64 | |
+} | |
+ | |
+// GetBlock attempts to retrieve a particular block from peers within the | |
+// deadline enforced by the context. | |
+func (bs *Client) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { | |
+ ctx, span := internal.StartSpan(ctx, "GetBlock", trace.WithAttributes(attribute.String("Key", k.String()))) | |
+ defer span.End() | |
+ return bsgetter.SyncGetBlock(ctx, k, bs.GetBlocks) | |
+} | |
+ | |
+// GetBlocks returns a channel where the caller may receive blocks that | |
+// correspond to the provided |keys|. Returns an error if BitSwap is unable to | |
+// begin this request within the deadline enforced by the context. | |
+// | |
+// NB: Your request remains open until the context expires. To conserve | |
+// resources, provide a context with a reasonably short deadline (ie. not one | |
+// that lasts throughout the lifetime of the server) | |
+func (bs *Client) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { | |
+ ctx, span := internal.StartSpan(ctx, "GetBlocks", trace.WithAttributes(attribute.Int("NumKeys", len(keys)))) | |
+ defer span.End() | |
+ session := bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) | |
+ return session.GetBlocks(ctx, keys) | |
+} | |
+ | |
+// NotifyNewBlocks announces the existence of blocks to this bitswap service. | |
+// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure | |
+// that those blocks are available in the blockstore before calling this function. | |
+func (bs *Client) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { | |
+ ctx, span := internal.StartSpan(ctx, "NotifyNewBlocks") | |
+ defer span.End() | |
+ | |
+ select { | |
+ case <-bs.process.Closing(): | |
+ return errors.New("bitswap is closed") | |
+ default: | |
+ } | |
+ | |
+ blkCids := make([]cid.Cid, len(blks)) | |
+ for i, blk := range blks { | |
+ blkCids[i] = blk.Cid() | |
+ } | |
+ | |
+ // Send all block keys (including duplicates) to any sessions that want them. | |
+ // (The duplicates are needed by sessions for accounting purposes) | |
+ bs.sm.ReceiveFrom(ctx, "", blkCids, nil, nil) | |
+ | |
+ // Publish the block to any Bitswap clients that had requested blocks. | |
+ // (the sessions use this pubsub mechanism to inform clients of incoming | |
+ // blocks) | |
+ bs.notif.Publish(blks...) | |
+ | |
+ return nil | |
+} | |
+ | |
+// receiveBlocksFrom process blocks received from the network | |
+func (bs *Client) receiveBlocksFrom(ctx context.Context, from peer.ID, blks []blocks.Block, haves []cid.Cid, dontHaves []cid.Cid) error { | |
+ select { | |
+ case <-bs.process.Closing(): | |
+ return errors.New("bitswap is closed") | |
+ default: | |
+ } | |
+ | |
+ wanted, notWanted := bs.sim.SplitWantedUnwanted(blks) | |
+ for _, b := range notWanted { | |
+ log.Debugf("[recv] block not in wantlist; cid=%s, peer=%s", b.Cid(), from) | |
+ } | |
+ | |
+ allKs := make([]cid.Cid, 0, len(blks)) | |
+ for _, b := range blks { | |
+ allKs = append(allKs, b.Cid()) | |
+ } | |
+ | |
+ // Inform the PeerManager so that we can calculate per-peer latency | |
+ combined := make([]cid.Cid, 0, len(allKs)+len(haves)+len(dontHaves)) | |
+ combined = append(combined, allKs...) | |
+ combined = append(combined, haves...) | |
+ combined = append(combined, dontHaves...) | |
+ bs.pm.ResponseReceived(from, combined) | |
+ | |
+ // Send all block keys (including duplicates) to any sessions that want them for accounting purpose. | |
+ bs.sm.ReceiveFrom(ctx, from, allKs, haves, dontHaves) | |
+ | |
+ if bs.blockReceivedNotifier != nil { | |
+ bs.blockReceivedNotifier.ReceivedBlocks(from, wanted) | |
+ } | |
+ | |
+ // Publish the block to any Bitswap clients that had requested blocks. | |
+ // (the sessions use this pubsub mechanism to inform clients of incoming | |
+ // blocks) | |
+ for _, b := range wanted { | |
+ bs.notif.Publish(b) | |
+ } | |
+ | |
+ for _, b := range wanted { | |
+ log.Debugw("Bitswap.GetBlockRequest.End", "cid", b.Cid()) | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
+// ReceiveMessage is called by the network interface when a new message is | |
+// received. | |
+func (bs *Client) ReceiveMessage(ctx context.Context, p peer.ID, incoming bsmsg.BitSwapMessage) { | |
+ bs.counterLk.Lock() | |
+ bs.counters.messagesRecvd++ | |
+ bs.counterLk.Unlock() | |
+ | |
+ if bs.tracer != nil { | |
+ bs.tracer.MessageReceived(p, incoming) | |
+ } | |
+ | |
+ iblocks := incoming.Blocks() | |
+ | |
+ if len(iblocks) > 0 { | |
+ bs.updateReceiveCounters(iblocks) | |
+ for _, b := range iblocks { | |
+ log.Debugf("[recv] block; cid=%s, peer=%s", b.Cid(), p) | |
+ } | |
+ } | |
+ | |
+ haves := incoming.Haves() | |
+ dontHaves := incoming.DontHaves() | |
+ if len(iblocks) > 0 || len(haves) > 0 || len(dontHaves) > 0 { | |
+ // Process blocks | |
+ err := bs.receiveBlocksFrom(ctx, p, iblocks, haves, dontHaves) | |
+ if err != nil { | |
+ log.Warnf("ReceiveMessage recvBlockFrom error: %s", err) | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (bs *Client) updateReceiveCounters(blocks []blocks.Block) { | |
+ // Check which blocks are in the datastore | |
+ // (Note: any errors from the blockstore are simply logged out in | |
+ // blockstoreHas()) | |
+ blocksHas := bs.blockstoreHas(blocks) | |
+ | |
+ bs.counterLk.Lock() | |
+ defer bs.counterLk.Unlock() | |
+ | |
+ // Do some accounting for each block | |
+ for i, b := range blocks { | |
+ has := blocksHas[i] | |
+ | |
+ blkLen := len(b.RawData()) | |
+ bs.allMetric.Observe(float64(blkLen)) | |
+ if has { | |
+ bs.dupMetric.Observe(float64(blkLen)) | |
+ } | |
+ | |
+ c := bs.counters | |
+ | |
+ c.blocksRecvd++ | |
+ c.dataRecvd += uint64(blkLen) | |
+ if has { | |
+ c.dupBlocksRecvd++ | |
+ c.dupDataRecvd += uint64(blkLen) | |
+ } | |
+ } | |
+} | |
+ | |
+func (bs *Client) blockstoreHas(blks []blocks.Block) []bool { | |
+ res := make([]bool, len(blks)) | |
+ | |
+ wg := sync.WaitGroup{} | |
+ for i, block := range blks { | |
+ wg.Add(1) | |
+ go func(i int, b blocks.Block) { | |
+ defer wg.Done() | |
+ | |
+ has, err := bs.blockstore.Has(context.TODO(), b.Cid()) | |
+ if err != nil { | |
+ log.Infof("blockstore.Has error: %s", err) | |
+ has = false | |
+ } | |
+ | |
+ res[i] = has | |
+ }(i, block) | |
+ } | |
+ wg.Wait() | |
+ | |
+ return res | |
+} | |
+ | |
+// PeerConnected is called by the network interface | |
+// when a peer initiates a new connection to bitswap. | |
+func (bs *Client) PeerConnected(p peer.ID) { | |
+ bs.pm.Connected(p) | |
+} | |
+ | |
+// PeerDisconnected is called by the network interface when a peer | |
+// closes a connection | |
+func (bs *Client) PeerDisconnected(p peer.ID) { | |
+ bs.pm.Disconnected(p) | |
+} | |
+ | |
+// ReceiveError is called by the network interface when an error happens | |
+// at the network layer. Currently just logs error. | |
+func (bs *Client) ReceiveError(err error) { | |
+ log.Infof("Bitswap Client ReceiveError: %s", err) | |
+ // TODO log the network error | |
+ // TODO bubble the network error up to the parent context/error logger | |
+} | |
+ | |
+// Close is called to shutdown the Client | |
+func (bs *Client) Close() error { | |
+ return bs.process.Close() | |
+} | |
+ | |
+// GetWantlist returns the current local wantlist (both want-blocks and | |
+// want-haves). | |
+func (bs *Client) GetWantlist() []cid.Cid { | |
+ return bs.pm.CurrentWants() | |
+} | |
+ | |
+// GetWantBlocks returns the current list of want-blocks. | |
+func (bs *Client) GetWantBlocks() []cid.Cid { | |
+ return bs.pm.CurrentWantBlocks() | |
+} | |
+ | |
+// GetWanthaves returns the current list of want-haves. | |
+func (bs *Client) GetWantHaves() []cid.Cid { | |
+ return bs.pm.CurrentWantHaves() | |
+} | |
+ | |
+// IsOnline is needed to match go-ipfs-exchange-interface | |
+func (bs *Client) IsOnline() bool { | |
+ return true | |
+} | |
+ | |
+// NewSession generates a new Bitswap session. You should use this, rather | |
+// that calling Client.GetBlocks, any time you intend to do several related | |
+// block requests in a row. The session returned will have it's own GetBlocks | |
+// method, but the session will use the fact that the requests are related to | |
+// be more efficient in its requests to peers. If you are using a session | |
+// from go-blockservice, it will create a bitswap session automatically. | |
+func (bs *Client) NewSession(ctx context.Context) exchange.Fetcher { | |
+ ctx, span := internal.StartSpan(ctx, "NewSession") | |
+ defer span.End() | |
+ return bs.sm.NewSession(ctx, bs.provSearchDelay, bs.rebroadcastDelay) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager/blockpresencemanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,121 @@ | |
+package blockpresencemanager | |
+ | |
+import ( | |
+ "sync" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// BlockPresenceManager keeps track of which peers have indicated that they | |
+// have or explicitly don't have a block | |
+type BlockPresenceManager struct { | |
+ sync.RWMutex | |
+ presence map[cid.Cid]map[peer.ID]bool | |
+} | |
+ | |
+func New() *BlockPresenceManager { | |
+ return &BlockPresenceManager{ | |
+ presence: make(map[cid.Cid]map[peer.ID]bool), | |
+ } | |
+} | |
+ | |
+// ReceiveFrom is called when a peer sends us information about which blocks | |
+// it has and does not have | |
+func (bpm *BlockPresenceManager) ReceiveFrom(p peer.ID, haves []cid.Cid, dontHaves []cid.Cid) { | |
+ bpm.Lock() | |
+ defer bpm.Unlock() | |
+ | |
+ for _, c := range haves { | |
+ bpm.updateBlockPresence(p, c, true) | |
+ } | |
+ for _, c := range dontHaves { | |
+ bpm.updateBlockPresence(p, c, false) | |
+ } | |
+} | |
+ | |
+func (bpm *BlockPresenceManager) updateBlockPresence(p peer.ID, c cid.Cid, present bool) { | |
+ _, ok := bpm.presence[c] | |
+ if !ok { | |
+ bpm.presence[c] = make(map[peer.ID]bool) | |
+ } | |
+ | |
+ // Make sure not to change HAVE to DONT_HAVE | |
+ has, pok := bpm.presence[c][p] | |
+ if pok && has { | |
+ return | |
+ } | |
+ bpm.presence[c][p] = present | |
+} | |
+ | |
+// PeerHasBlock indicates whether the given peer has sent a HAVE for the given | |
+// cid | |
+func (bpm *BlockPresenceManager) PeerHasBlock(p peer.ID, c cid.Cid) bool { | |
+ bpm.RLock() | |
+ defer bpm.RUnlock() | |
+ | |
+ return bpm.presence[c][p] | |
+} | |
+ | |
+// PeerDoesNotHaveBlock indicates whether the given peer has sent a DONT_HAVE | |
+// for the given cid | |
+func (bpm *BlockPresenceManager) PeerDoesNotHaveBlock(p peer.ID, c cid.Cid) bool { | |
+ bpm.RLock() | |
+ defer bpm.RUnlock() | |
+ | |
+ have, known := bpm.presence[c][p] | |
+ return known && !have | |
+} | |
+ | |
+// Filters the keys such that all the given peers have received a DONT_HAVE | |
+// for a key. | |
+// This allows us to know if we've exhausted all possibilities of finding | |
+// the key with the peers we know about. | |
+func (bpm *BlockPresenceManager) AllPeersDoNotHaveBlock(peers []peer.ID, ks []cid.Cid) []cid.Cid { | |
+ bpm.RLock() | |
+ defer bpm.RUnlock() | |
+ | |
+ var res []cid.Cid | |
+ for _, c := range ks { | |
+ if bpm.allDontHave(peers, c) { | |
+ res = append(res, c) | |
+ } | |
+ } | |
+ return res | |
+} | |
+ | |
+func (bpm *BlockPresenceManager) allDontHave(peers []peer.ID, c cid.Cid) bool { | |
+ // Check if we know anything about the cid's block presence | |
+ ps, cok := bpm.presence[c] | |
+ if !cok { | |
+ return false | |
+ } | |
+ | |
+ // Check if we explicitly know that all the given peers do not have the cid | |
+ for _, p := range peers { | |
+ if has, pok := ps[p]; !pok || has { | |
+ return false | |
+ } | |
+ } | |
+ return true | |
+} | |
+ | |
+// RemoveKeys cleans up the given keys from the block presence map | |
+func (bpm *BlockPresenceManager) RemoveKeys(ks []cid.Cid) { | |
+ bpm.Lock() | |
+ defer bpm.Unlock() | |
+ | |
+ for _, c := range ks { | |
+ delete(bpm.presence, c) | |
+ } | |
+} | |
+ | |
+// HasKey indicates whether the BlockPresenceManager is tracking the given key | |
+// (used by the tests) | |
+func (bpm *BlockPresenceManager) HasKey(c cid.Cid) bool { | |
+ bpm.Lock() | |
+ defer bpm.Unlock() | |
+ | |
+ _, ok := bpm.presence[c] | |
+ return ok | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/getter/getter.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/getter/getter.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/getter/getter.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/getter/getter.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,138 @@ | |
+package getter | |
+ | |
+import ( | |
+ "context" | |
+ "errors" | |
+ | |
+ "github.com/ipfs/go-libipfs/bitswap/client/internal" | |
+ notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" | |
+ logging "github.com/ipfs/go-log" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ ipld "github.com/ipfs/go-ipld-format" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+) | |
+ | |
+var log = logging.Logger("bitswap") | |
+ | |
+// GetBlocksFunc is any function that can take an array of CIDs and return a | |
+// channel of incoming blocks. | |
+type GetBlocksFunc func(context.Context, []cid.Cid) (<-chan blocks.Block, error) | |
+ | |
+// SyncGetBlock takes a block cid and an async function for getting several | |
+// blocks that returns a channel, and uses that function to return the | |
+// block syncronously. | |
+func SyncGetBlock(p context.Context, k cid.Cid, gb GetBlocksFunc) (blocks.Block, error) { | |
+ p, span := internal.StartSpan(p, "Getter.SyncGetBlock") | |
+ defer span.End() | |
+ | |
+ if !k.Defined() { | |
+ log.Error("undefined cid in GetBlock") | |
+ return nil, ipld.ErrNotFound{Cid: k} | |
+ } | |
+ | |
+ // Any async work initiated by this function must end when this function | |
+ // returns. To ensure this, derive a new context. Note that it is okay to | |
+ // listen on parent in this scope, but NOT okay to pass |parent| to | |
+ // functions called by this one. Otherwise those functions won't return | |
+ // when this context's cancel func is executed. This is difficult to | |
+ // enforce. May this comment keep you safe. | |
+ ctx, cancel := context.WithCancel(p) | |
+ defer cancel() | |
+ | |
+ promise, err := gb(ctx, []cid.Cid{k}) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ select { | |
+ case block, ok := <-promise: | |
+ if !ok { | |
+ select { | |
+ case <-ctx.Done(): | |
+ return nil, ctx.Err() | |
+ default: | |
+ return nil, errors.New("promise channel was closed") | |
+ } | |
+ } | |
+ return block, nil | |
+ case <-p.Done(): | |
+ return nil, p.Err() | |
+ } | |
+} | |
+ | |
+// WantFunc is any function that can express a want for set of blocks. | |
+type WantFunc func(context.Context, []cid.Cid) | |
+ | |
+// AsyncGetBlocks take a set of block cids, a pubsub channel for incoming | |
+// blocks, a want function, and a close function, and returns a channel of | |
+// incoming blocks. | |
+func AsyncGetBlocks(ctx context.Context, sessctx context.Context, keys []cid.Cid, notif notifications.PubSub, | |
+ want WantFunc, cwants func([]cid.Cid)) (<-chan blocks.Block, error) { | |
+ ctx, span := internal.StartSpan(ctx, "Getter.AsyncGetBlocks") | |
+ defer span.End() | |
+ | |
+ // If there are no keys supplied, just return a closed channel | |
+ if len(keys) == 0 { | |
+ out := make(chan blocks.Block) | |
+ close(out) | |
+ return out, nil | |
+ } | |
+ | |
+ // Use a PubSub notifier to listen for incoming blocks for each key | |
+ remaining := cid.NewSet() | |
+ promise := notif.Subscribe(ctx, keys...) | |
+ for _, k := range keys { | |
+ log.Debugw("Bitswap.GetBlockRequest.Start", "cid", k) | |
+ remaining.Add(k) | |
+ } | |
+ | |
+ // Send the want request for the keys to the network | |
+ want(ctx, keys) | |
+ | |
+ out := make(chan blocks.Block) | |
+ go handleIncoming(ctx, sessctx, remaining, promise, out, cwants) | |
+ return out, nil | |
+} | |
+ | |
+// Listens for incoming blocks, passing them to the out channel. | |
+// If the context is cancelled or the incoming channel closes, calls cfun with | |
+// any keys corresponding to blocks that were never received. | |
+func handleIncoming(ctx context.Context, sessctx context.Context, remaining *cid.Set, | |
+ in <-chan blocks.Block, out chan blocks.Block, cfun func([]cid.Cid)) { | |
+ | |
+ ctx, cancel := context.WithCancel(ctx) | |
+ | |
+ // Clean up before exiting this function, and call the cancel function on | |
+ // any remaining keys | |
+ defer func() { | |
+ cancel() | |
+ close(out) | |
+ // can't just defer this call on its own, arguments are resolved *when* the defer is created | |
+ cfun(remaining.Keys()) | |
+ }() | |
+ | |
+ for { | |
+ select { | |
+ case blk, ok := <-in: | |
+ // If the channel is closed, we're done (note that PubSub closes | |
+ // the channel once all the keys have been received) | |
+ if !ok { | |
+ return | |
+ } | |
+ | |
+ remaining.Remove(blk.Cid()) | |
+ select { | |
+ case out <- blk: | |
+ case <-ctx.Done(): | |
+ return | |
+ case <-sessctx.Done(): | |
+ return | |
+ } | |
+ case <-ctx.Done(): | |
+ return | |
+ case <-sessctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/donthavetimeoutmgr.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,398 @@ | |
+package messagequeue | |
+ | |
+import ( | |
+ "context" | |
+ "sync" | |
+ "time" | |
+ | |
+ "github.com/benbjohnson/clock" | |
+ cid "github.com/ipfs/go-cid" | |
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
+) | |
+ | |
+const ( | |
+ // dontHaveTimeout is used to simulate a DONT_HAVE when communicating with | |
+ // a peer whose Bitswap client doesn't support the DONT_HAVE response, | |
+ // or when the peer takes too long to respond. | |
+ // If the peer doesn't respond to a want-block within the timeout, the | |
+ // local node assumes that the peer doesn't have the block. | |
+ dontHaveTimeout = 5 * time.Second | |
+ | |
+ // maxExpectedWantProcessTime is the maximum amount of time we expect a | |
+ // peer takes to process a want and initiate sending a response to us | |
+ maxExpectedWantProcessTime = 2 * time.Second | |
+ | |
+ // maxTimeout is the maximum allowed timeout, regardless of latency | |
+ maxTimeout = dontHaveTimeout + maxExpectedWantProcessTime | |
+ | |
+ // pingLatencyMultiplier is multiplied by the average ping time to | |
+ // get an upper bound on how long we expect to wait for a peer's response | |
+ // to arrive | |
+ pingLatencyMultiplier = 3 | |
+ | |
+ // messageLatencyAlpha is the alpha supplied to the message latency EWMA | |
+ messageLatencyAlpha = 0.5 | |
+ | |
+ // To give a margin for error, the timeout is calculated as | |
+ // messageLatencyMultiplier * message latency | |
+ messageLatencyMultiplier = 2 | |
+) | |
+ | |
+// PeerConnection is a connection to a peer that can be pinged, and the | |
+// average latency measured | |
+type PeerConnection interface { | |
+ // Ping the peer | |
+ Ping(context.Context) ping.Result | |
+ // The average latency of all pings | |
+ Latency() time.Duration | |
+} | |
+ | |
+// pendingWant keeps track of a want that has been sent and we're waiting | |
+// for a response or for a timeout to expire | |
+type pendingWant struct { | |
+ c cid.Cid | |
+ active bool | |
+ sent time.Time | |
+} | |
+ | |
+// dontHaveTimeoutMgr simulates a DONT_HAVE message if the peer takes too long | |
+// to respond to a message. | |
+// The timeout is based on latency - we start with a default latency, while | |
+// we ping the peer to estimate latency. If we receive a response from the | |
+// peer we use the response latency. | |
+type dontHaveTimeoutMgr struct { | |
+ clock clock.Clock | |
+ ctx context.Context | |
+ shutdown func() | |
+ peerConn PeerConnection | |
+ onDontHaveTimeout func([]cid.Cid) | |
+ defaultTimeout time.Duration | |
+ maxTimeout time.Duration | |
+ pingLatencyMultiplier int | |
+ messageLatencyMultiplier int | |
+ maxExpectedWantProcessTime time.Duration | |
+ | |
+ // All variables below here must be protected by the lock | |
+ lk sync.RWMutex | |
+ // has the timeout manager started | |
+ started bool | |
+ // wants that are active (waiting for a response or timeout) | |
+ activeWants map[cid.Cid]*pendingWant | |
+ // queue of wants, from oldest to newest | |
+ wantQueue []*pendingWant | |
+ // time to wait for a response (depends on latency) | |
+ timeout time.Duration | |
+ // ewma of message latency (time from message sent to response received) | |
+ messageLatency *latencyEwma | |
+ // timer used to wait until want at front of queue expires | |
+ checkForTimeoutsTimer *clock.Timer | |
+ // used for testing -- timeoutsTriggered when a scheduled dont have timeouts were triggered | |
+ timeoutsTriggered chan struct{} | |
+} | |
+ | |
+// newDontHaveTimeoutMgr creates a new dontHaveTimeoutMgr | |
+// onDontHaveTimeout is called when pending keys expire (not cancelled before timeout) | |
+func newDontHaveTimeoutMgr(pc PeerConnection, onDontHaveTimeout func([]cid.Cid), clock clock.Clock) *dontHaveTimeoutMgr { | |
+ return newDontHaveTimeoutMgrWithParams(pc, onDontHaveTimeout, dontHaveTimeout, maxTimeout, | |
+ pingLatencyMultiplier, messageLatencyMultiplier, maxExpectedWantProcessTime, clock, nil) | |
+} | |
+ | |
+// newDontHaveTimeoutMgrWithParams is used by the tests | |
+func newDontHaveTimeoutMgrWithParams( | |
+ pc PeerConnection, | |
+ onDontHaveTimeout func([]cid.Cid), | |
+ defaultTimeout time.Duration, | |
+ maxTimeout time.Duration, | |
+ pingLatencyMultiplier int, | |
+ messageLatencyMultiplier int, | |
+ maxExpectedWantProcessTime time.Duration, | |
+ clock clock.Clock, | |
+ timeoutsTriggered chan struct{}) *dontHaveTimeoutMgr { | |
+ | |
+ ctx, shutdown := context.WithCancel(context.Background()) | |
+ mqp := &dontHaveTimeoutMgr{ | |
+ clock: clock, | |
+ ctx: ctx, | |
+ shutdown: shutdown, | |
+ peerConn: pc, | |
+ activeWants: make(map[cid.Cid]*pendingWant), | |
+ timeout: defaultTimeout, | |
+ messageLatency: &latencyEwma{alpha: messageLatencyAlpha}, | |
+ defaultTimeout: defaultTimeout, | |
+ maxTimeout: maxTimeout, | |
+ pingLatencyMultiplier: pingLatencyMultiplier, | |
+ messageLatencyMultiplier: messageLatencyMultiplier, | |
+ maxExpectedWantProcessTime: maxExpectedWantProcessTime, | |
+ onDontHaveTimeout: onDontHaveTimeout, | |
+ timeoutsTriggered: timeoutsTriggered, | |
+ } | |
+ | |
+ return mqp | |
+} | |
+ | |
+// Shutdown the dontHaveTimeoutMgr. Any subsequent call to Start() will be ignored | |
+func (dhtm *dontHaveTimeoutMgr) Shutdown() { | |
+ dhtm.shutdown() | |
+ | |
+ dhtm.lk.Lock() | |
+ defer dhtm.lk.Unlock() | |
+ | |
+ // Clear any pending check for timeouts | |
+ if dhtm.checkForTimeoutsTimer != nil { | |
+ dhtm.checkForTimeoutsTimer.Stop() | |
+ } | |
+} | |
+ | |
+// Start the dontHaveTimeoutMgr. This method is idempotent | |
+func (dhtm *dontHaveTimeoutMgr) Start() { | |
+ dhtm.lk.Lock() | |
+ defer dhtm.lk.Unlock() | |
+ | |
+ // Make sure the dont have timeout manager hasn't already been started | |
+ if dhtm.started { | |
+ return | |
+ } | |
+ dhtm.started = true | |
+ | |
+ // If we already have a measure of latency to the peer, use it to | |
+ // calculate a reasonable timeout | |
+ latency := dhtm.peerConn.Latency() | |
+ if latency.Nanoseconds() > 0 { | |
+ dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) | |
+ return | |
+ } | |
+ | |
+ // Otherwise measure latency by pinging the peer | |
+ go dhtm.measurePingLatency() | |
+} | |
+ | |
+// UpdateMessageLatency is called when we receive a response from the peer. | |
+// It is the time between sending a request and receiving the corresponding | |
+// response. | |
+func (dhtm *dontHaveTimeoutMgr) UpdateMessageLatency(elapsed time.Duration) { | |
+ dhtm.lk.Lock() | |
+ defer dhtm.lk.Unlock() | |
+ | |
+ // Update the message latency and the timeout | |
+ dhtm.messageLatency.update(elapsed) | |
+ oldTimeout := dhtm.timeout | |
+ dhtm.timeout = dhtm.calculateTimeoutFromMessageLatency() | |
+ | |
+ // If the timeout has decreased | |
+ if dhtm.timeout < oldTimeout { | |
+ // Check if after changing the timeout there are any pending wants that | |
+ // are now over the timeout | |
+ dhtm.checkForTimeouts() | |
+ } | |
+} | |
+ | |
+// measurePingLatency measures the latency to the peer by pinging it | |
+func (dhtm *dontHaveTimeoutMgr) measurePingLatency() { | |
+ // Wait up to defaultTimeout for a response to the ping | |
+ ctx, cancel := context.WithTimeout(dhtm.ctx, dhtm.defaultTimeout) | |
+ defer cancel() | |
+ | |
+ // Ping the peer | |
+ res := dhtm.peerConn.Ping(ctx) | |
+ if res.Error != nil { | |
+ // If there was an error, we'll just leave the timeout as | |
+ // defaultTimeout | |
+ return | |
+ } | |
+ | |
+ // Get the average latency to the peer | |
+ latency := dhtm.peerConn.Latency() | |
+ | |
+ dhtm.lk.Lock() | |
+ defer dhtm.lk.Unlock() | |
+ | |
+ // A message has arrived so we already set the timeout based on message latency | |
+ if dhtm.messageLatency.samples > 0 { | |
+ return | |
+ } | |
+ | |
+ // Calculate a reasonable timeout based on latency | |
+ dhtm.timeout = dhtm.calculateTimeoutFromPingLatency(latency) | |
+ | |
+ // Check if after changing the timeout there are any pending wants that are | |
+ // now over the timeout | |
+ dhtm.checkForTimeouts() | |
+} | |
+ | |
+// checkForTimeouts checks pending wants to see if any are over the timeout. | |
+// Note: this function should only be called within the lock. | |
+func (dhtm *dontHaveTimeoutMgr) checkForTimeouts() { | |
+ | |
+ if len(dhtm.wantQueue) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Figure out which of the blocks that were wanted were not received | |
+ // within the timeout | |
+ expired := make([]cid.Cid, 0, len(dhtm.activeWants)) | |
+ for len(dhtm.wantQueue) > 0 { | |
+ pw := dhtm.wantQueue[0] | |
+ | |
+ // If the want is still active | |
+ if pw.active { | |
+ // The queue is in order from earliest to latest, so if we | |
+ // didn't find an expired entry we can stop iterating | |
+ if dhtm.clock.Since(pw.sent) < dhtm.timeout { | |
+ break | |
+ } | |
+ | |
+ // Add the want to the expired list | |
+ expired = append(expired, pw.c) | |
+ // Remove the want from the activeWants map | |
+ delete(dhtm.activeWants, pw.c) | |
+ } | |
+ | |
+ // Remove expired or cancelled wants from the want queue | |
+ dhtm.wantQueue = dhtm.wantQueue[1:] | |
+ } | |
+ | |
+ // Fire the timeout event for the expired wants | |
+ if len(expired) > 0 { | |
+ go dhtm.fireTimeout(expired) | |
+ } | |
+ | |
+ if len(dhtm.wantQueue) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Make sure the timeout manager is still running | |
+ if dhtm.ctx.Err() != nil { | |
+ return | |
+ } | |
+ | |
+ // Schedule the next check for the moment when the oldest pending want will | |
+ // timeout | |
+ oldestStart := dhtm.wantQueue[0].sent | |
+ until := oldestStart.Add(dhtm.timeout).Sub(dhtm.clock.Now()) | |
+ if dhtm.checkForTimeoutsTimer == nil { | |
+ dhtm.checkForTimeoutsTimer = dhtm.clock.Timer(until) | |
+ go dhtm.consumeTimeouts() | |
+ } else { | |
+ dhtm.checkForTimeoutsTimer.Stop() | |
+ dhtm.checkForTimeoutsTimer.Reset(until) | |
+ } | |
+} | |
+ | |
+func (dhtm *dontHaveTimeoutMgr) consumeTimeouts() { | |
+ for { | |
+ select { | |
+ case <-dhtm.ctx.Done(): | |
+ return | |
+ case <-dhtm.checkForTimeoutsTimer.C: | |
+ dhtm.lk.Lock() | |
+ dhtm.checkForTimeouts() | |
+ dhtm.lk.Unlock() | |
+ } | |
+ } | |
+} | |
+ | |
+// AddPending adds the given keys that will expire if not cancelled before | |
+// the timeout | |
+func (dhtm *dontHaveTimeoutMgr) AddPending(ks []cid.Cid) { | |
+ if len(ks) == 0 { | |
+ return | |
+ } | |
+ | |
+ start := dhtm.clock.Now() | |
+ | |
+ dhtm.lk.Lock() | |
+ defer dhtm.lk.Unlock() | |
+ | |
+ queueWasEmpty := len(dhtm.activeWants) == 0 | |
+ | |
+ // Record the start time for each key | |
+ for _, c := range ks { | |
+ if _, ok := dhtm.activeWants[c]; !ok { | |
+ pw := pendingWant{ | |
+ c: c, | |
+ sent: start, | |
+ active: true, | |
+ } | |
+ dhtm.activeWants[c] = &pw | |
+ dhtm.wantQueue = append(dhtm.wantQueue, &pw) | |
+ } | |
+ } | |
+ | |
+ // If there was already an earlier pending item in the queue, then there | |
+ // must already be a timeout check scheduled. If there is nothing in the | |
+ // queue then we should make sure to schedule a check. | |
+ if queueWasEmpty { | |
+ dhtm.checkForTimeouts() | |
+ } | |
+} | |
+ | |
+// CancelPending is called when we receive a response for a key | |
+func (dhtm *dontHaveTimeoutMgr) CancelPending(ks []cid.Cid) { | |
+ dhtm.lk.Lock() | |
+ defer dhtm.lk.Unlock() | |
+ | |
+ // Mark the wants as cancelled | |
+ for _, c := range ks { | |
+ if pw, ok := dhtm.activeWants[c]; ok { | |
+ pw.active = false | |
+ delete(dhtm.activeWants, c) | |
+ } | |
+ } | |
+} | |
+ | |
+// fireTimeout fires the onDontHaveTimeout method with the timed out keys | |
+func (dhtm *dontHaveTimeoutMgr) fireTimeout(pending []cid.Cid) { | |
+ // Make sure the timeout manager has not been shut down | |
+ if dhtm.ctx.Err() != nil { | |
+ return | |
+ } | |
+ | |
+ // Fire the timeout | |
+ dhtm.onDontHaveTimeout(pending) | |
+ | |
+ // signal a timeout fired | |
+ if dhtm.timeoutsTriggered != nil { | |
+ dhtm.timeoutsTriggered <- struct{}{} | |
+ } | |
+} | |
+ | |
+// calculateTimeoutFromPingLatency calculates a reasonable timeout derived from latency | |
+func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromPingLatency(latency time.Duration) time.Duration { | |
+ // The maximum expected time for a response is | |
+ // the expected time to process the want + (latency * multiplier) | |
+ // The multiplier is to provide some padding for variable latency. | |
+ timeout := dhtm.maxExpectedWantProcessTime + time.Duration(dhtm.pingLatencyMultiplier)*latency | |
+ if timeout > dhtm.maxTimeout { | |
+ timeout = dhtm.maxTimeout | |
+ } | |
+ return timeout | |
+} | |
+ | |
+// calculateTimeoutFromMessageLatency calculates a timeout derived from message latency | |
+func (dhtm *dontHaveTimeoutMgr) calculateTimeoutFromMessageLatency() time.Duration { | |
+ timeout := dhtm.messageLatency.latency * time.Duration(dhtm.messageLatencyMultiplier) | |
+ if timeout > dhtm.maxTimeout { | |
+ timeout = dhtm.maxTimeout | |
+ } | |
+ return timeout | |
+} | |
+ | |
+// latencyEwma is an EWMA of message latency | |
+type latencyEwma struct { | |
+ alpha float64 | |
+ samples uint64 | |
+ latency time.Duration | |
+} | |
+ | |
+// update the EWMA with the given sample | |
+func (le *latencyEwma) update(elapsed time.Duration) { | |
+ le.samples++ | |
+ | |
+ // Initially set alpha to be 1.0 / <the number of samples> | |
+ alpha := 1.0 / float64(le.samples) | |
+ if alpha < le.alpha { | |
+ // Once we have enough samples, clamp alpha | |
+ alpha = le.alpha | |
+ } | |
+ le.latency = time.Duration(float64(elapsed)*alpha + (1-alpha)*float64(le.latency)) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/messagequeue.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/messagequeue.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/messagequeue.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue/messagequeue.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,843 @@ | |
+package messagequeue | |
+ | |
+import ( | |
+ "context" | |
+ "math" | |
+ "sync" | |
+ "time" | |
+ | |
+ "github.com/benbjohnson/clock" | |
+ cid "github.com/ipfs/go-cid" | |
+ bswl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" | |
+ bsmsg "github.com/ipfs/go-libipfs/bitswap/message" | |
+ pb "github.com/ipfs/go-libipfs/bitswap/message/pb" | |
+ bsnet "github.com/ipfs/go-libipfs/bitswap/network" | |
+ logging "github.com/ipfs/go-log" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
+ "go.uber.org/zap" | |
+) | |
+ | |
+var log = logging.Logger("bitswap") | |
+var sflog = log.Desugar() | |
+ | |
+const ( | |
+ defaultRebroadcastInterval = 30 * time.Second | |
+ // maxRetries is the number of times to attempt to send a message before | |
+ // giving up | |
+ maxRetries = 3 | |
+ sendTimeout = 30 * time.Second | |
+ // maxMessageSize is the maximum message size in bytes | |
+ maxMessageSize = 1024 * 1024 * 2 | |
+ // sendErrorBackoff is the time to wait before retrying to connect after | |
+ // an error when trying to send a message | |
+ sendErrorBackoff = 100 * time.Millisecond | |
+ // maxPriority is the max priority as defined by the bitswap protocol | |
+ maxPriority = math.MaxInt32 | |
+ // sendMessageDebounce is the debounce duration when calling sendMessage() | |
+ sendMessageDebounce = time.Millisecond | |
+ // when we reach sendMessageCutoff wants/cancels, we'll send the message immediately. | |
+ sendMessageCutoff = 256 | |
+ // when we debounce for more than sendMessageMaxDelay, we'll send the | |
+ // message immediately. | |
+ sendMessageMaxDelay = 20 * time.Millisecond | |
+ // The maximum amount of time in which to accept a response as being valid | |
+ // for latency calculation (as opposed to discarding it as an outlier) | |
+ maxValidLatency = 30 * time.Second | |
+) | |
+ | |
+// MessageNetwork is any network that can connect peers and generate a message | |
+// sender. | |
+type MessageNetwork interface { | |
+ ConnectTo(context.Context, peer.ID) error | |
+ NewMessageSender(context.Context, peer.ID, *bsnet.MessageSenderOpts) (bsnet.MessageSender, error) | |
+ Latency(peer.ID) time.Duration | |
+ Ping(context.Context, peer.ID) ping.Result | |
+ Self() peer.ID | |
+} | |
+ | |
+// MessageQueue implements queue of want messages to send to peers. | |
+type MessageQueue struct { | |
+ ctx context.Context | |
+ shutdown func() | |
+ p peer.ID | |
+ network MessageNetwork | |
+ dhTimeoutMgr DontHaveTimeoutManager | |
+ | |
+ // The maximum size of a message in bytes. Any overflow is put into the | |
+ // next message | |
+ maxMessageSize int | |
+ | |
+ // The amount of time to wait when there's an error sending to a peer | |
+ // before retrying | |
+ sendErrorBackoff time.Duration | |
+ | |
+ // The maximum amount of time in which to accept a response as being valid | |
+ // for latency calculation | |
+ maxValidLatency time.Duration | |
+ | |
+ // Signals that there are outgoing wants / cancels ready to be processed | |
+ outgoingWork chan time.Time | |
+ | |
+ // Channel of CIDs of blocks / HAVEs / DONT_HAVEs received from the peer | |
+ responses chan []cid.Cid | |
+ | |
+ // Take lock whenever any of these variables are modified | |
+ wllock sync.Mutex | |
+ bcstWants recallWantlist | |
+ peerWants recallWantlist | |
+ cancels *cid.Set | |
+ priority int32 | |
+ | |
+ // Dont touch any of these variables outside of run loop | |
+ sender bsnet.MessageSender | |
+ rebroadcastIntervalLk sync.RWMutex | |
+ rebroadcastInterval time.Duration | |
+ rebroadcastTimer *clock.Timer | |
+ // For performance reasons we just clear out the fields of the message | |
+ // instead of creating a new one every time. | |
+ msg bsmsg.BitSwapMessage | |
+ | |
+ // For simulating time -- uses mock in test | |
+ clock clock.Clock | |
+ | |
+ // Used to track things that happen asynchronously -- used only in test | |
+ events chan messageEvent | |
+} | |
+ | |
+// recallWantlist keeps a list of pending wants and a list of sent wants | |
+type recallWantlist struct { | |
+ // The list of wants that have not yet been sent | |
+ pending *bswl.Wantlist | |
+ // The list of wants that have been sent | |
+ sent *bswl.Wantlist | |
+ // The time at which each want was sent | |
+ sentAt map[cid.Cid]time.Time | |
+} | |
+ | |
+func newRecallWantList() recallWantlist { | |
+ return recallWantlist{ | |
+ pending: bswl.New(), | |
+ sent: bswl.New(), | |
+ sentAt: make(map[cid.Cid]time.Time), | |
+ } | |
+} | |
+ | |
+// Add want to the pending list | |
+func (r *recallWantlist) Add(c cid.Cid, priority int32, wtype pb.Message_Wantlist_WantType) { | |
+ r.pending.Add(c, priority, wtype) | |
+} | |
+ | |
+// Remove wants from both the pending list and the list of sent wants | |
+func (r *recallWantlist) Remove(c cid.Cid) { | |
+ r.pending.Remove(c) | |
+ r.sent.Remove(c) | |
+ delete(r.sentAt, c) | |
+} | |
+ | |
+// Remove wants by type from both the pending list and the list of sent wants | |
+func (r *recallWantlist) RemoveType(c cid.Cid, wtype pb.Message_Wantlist_WantType) { | |
+ r.pending.RemoveType(c, wtype) | |
+ r.sent.RemoveType(c, wtype) | |
+ if _, ok := r.sent.Contains(c); !ok { | |
+ delete(r.sentAt, c) | |
+ } | |
+} | |
+ | |
+// MarkSent moves the want from the pending to the sent list | |
+// | |
+// Returns true if the want was marked as sent. Returns false if the want wasn't | |
+// pending. | |
+func (r *recallWantlist) MarkSent(e bswl.Entry) bool { | |
+ if !r.pending.RemoveType(e.Cid, e.WantType) { | |
+ return false | |
+ } | |
+ r.sent.Add(e.Cid, e.Priority, e.WantType) | |
+ return true | |
+} | |
+ | |
+// SentAt records the time at which a want was sent | |
+func (r *recallWantlist) SentAt(c cid.Cid, at time.Time) { | |
+ // The want may have been cancelled in the interim | |
+ if _, ok := r.sent.Contains(c); ok { | |
+ if _, ok := r.sentAt[c]; !ok { | |
+ r.sentAt[c] = at | |
+ } | |
+ } | |
+} | |
+ | |
+// ClearSentAt clears out the record of the time a want was sent. | |
+// We clear the sent at time when we receive a response for a key as we | |
+// only need the first response for latency measurement. | |
+func (r *recallWantlist) ClearSentAt(c cid.Cid) { | |
+ delete(r.sentAt, c) | |
+} | |
+ | |
+type peerConn struct { | |
+ p peer.ID | |
+ network MessageNetwork | |
+} | |
+ | |
+func newPeerConnection(p peer.ID, network MessageNetwork) *peerConn { | |
+ return &peerConn{p, network} | |
+} | |
+ | |
+func (pc *peerConn) Ping(ctx context.Context) ping.Result { | |
+ return pc.network.Ping(ctx, pc.p) | |
+} | |
+ | |
+func (pc *peerConn) Latency() time.Duration { | |
+ return pc.network.Latency(pc.p) | |
+} | |
+ | |
+// Fires when a timeout occurs waiting for a response from a peer running an | |
+// older version of Bitswap that doesn't support DONT_HAVE messages. | |
+type OnDontHaveTimeout func(peer.ID, []cid.Cid) | |
+ | |
+// DontHaveTimeoutManager pings a peer to estimate latency so it can set a reasonable | |
+// upper bound on when to consider a DONT_HAVE request as timed out (when connected to | |
+// a peer that doesn't support DONT_HAVE messages) | |
+type DontHaveTimeoutManager interface { | |
+ // Start the manager (idempotent) | |
+ Start() | |
+ // Shutdown the manager (Shutdown is final, manager cannot be restarted) | |
+ Shutdown() | |
+ // AddPending adds the wants as pending a response. If the are not | |
+ // cancelled before the timeout, the OnDontHaveTimeout method will be called. | |
+ AddPending([]cid.Cid) | |
+ // CancelPending removes the wants | |
+ CancelPending([]cid.Cid) | |
+ // UpdateMessageLatency informs the manager of a new latency measurement | |
+ UpdateMessageLatency(time.Duration) | |
+} | |
+ | |
+// New creates a new MessageQueue. | |
+func New(ctx context.Context, p peer.ID, network MessageNetwork, onDontHaveTimeout OnDontHaveTimeout) *MessageQueue { | |
+ onTimeout := func(ks []cid.Cid) { | |
+ log.Infow("Bitswap: timeout waiting for blocks", "cids", ks, "peer", p) | |
+ onDontHaveTimeout(p, ks) | |
+ } | |
+ clock := clock.New() | |
+ dhTimeoutMgr := newDontHaveTimeoutMgr(newPeerConnection(p, network), onTimeout, clock) | |
+ return newMessageQueue(ctx, p, network, maxMessageSize, sendErrorBackoff, maxValidLatency, dhTimeoutMgr, clock, nil) | |
+} | |
+ | |
+type messageEvent int | |
+ | |
+const ( | |
+ messageQueued messageEvent = iota | |
+ messageFinishedSending | |
+ latenciesRecorded | |
+) | |
+ | |
+// This constructor is used by the tests | |
+func newMessageQueue( | |
+ ctx context.Context, | |
+ p peer.ID, | |
+ network MessageNetwork, | |
+ maxMsgSize int, | |
+ sendErrorBackoff time.Duration, | |
+ maxValidLatency time.Duration, | |
+ dhTimeoutMgr DontHaveTimeoutManager, | |
+ clock clock.Clock, | |
+ events chan messageEvent) *MessageQueue { | |
+ | |
+ ctx, cancel := context.WithCancel(ctx) | |
+ return &MessageQueue{ | |
+ ctx: ctx, | |
+ shutdown: cancel, | |
+ p: p, | |
+ network: network, | |
+ dhTimeoutMgr: dhTimeoutMgr, | |
+ maxMessageSize: maxMsgSize, | |
+ bcstWants: newRecallWantList(), | |
+ peerWants: newRecallWantList(), | |
+ cancels: cid.NewSet(), | |
+ outgoingWork: make(chan time.Time, 1), | |
+ responses: make(chan []cid.Cid, 8), | |
+ rebroadcastInterval: defaultRebroadcastInterval, | |
+ sendErrorBackoff: sendErrorBackoff, | |
+ maxValidLatency: maxValidLatency, | |
+ priority: maxPriority, | |
+ // For performance reasons we just clear out the fields of the message | |
+ // after using it, instead of creating a new one every time. | |
+ msg: bsmsg.New(false), | |
+ clock: clock, | |
+ events: events, | |
+ } | |
+} | |
+ | |
+// Add want-haves that are part of a broadcast to all connected peers | |
+func (mq *MessageQueue) AddBroadcastWantHaves(wantHaves []cid.Cid) { | |
+ if len(wantHaves) == 0 { | |
+ return | |
+ } | |
+ | |
+ mq.wllock.Lock() | |
+ defer mq.wllock.Unlock() | |
+ | |
+ for _, c := range wantHaves { | |
+ mq.bcstWants.Add(c, mq.priority, pb.Message_Wantlist_Have) | |
+ mq.priority-- | |
+ | |
+ // We're adding a want-have for the cid, so clear any pending cancel | |
+ // for the cid | |
+ mq.cancels.Remove(c) | |
+ } | |
+ | |
+ // Schedule a message send | |
+ mq.signalWorkReady() | |
+} | |
+ | |
+// Add want-haves and want-blocks for the peer for this message queue. | |
+func (mq *MessageQueue) AddWants(wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
+ if len(wantBlocks) == 0 && len(wantHaves) == 0 { | |
+ return | |
+ } | |
+ | |
+ mq.wllock.Lock() | |
+ defer mq.wllock.Unlock() | |
+ | |
+ for _, c := range wantHaves { | |
+ mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Have) | |
+ mq.priority-- | |
+ | |
+ // We're adding a want-have for the cid, so clear any pending cancel | |
+ // for the cid | |
+ mq.cancels.Remove(c) | |
+ } | |
+ for _, c := range wantBlocks { | |
+ mq.peerWants.Add(c, mq.priority, pb.Message_Wantlist_Block) | |
+ mq.priority-- | |
+ | |
+ // We're adding a want-block for the cid, so clear any pending cancel | |
+ // for the cid | |
+ mq.cancels.Remove(c) | |
+ } | |
+ | |
+ // Schedule a message send | |
+ mq.signalWorkReady() | |
+} | |
+ | |
+// Add cancel messages for the given keys. | |
+func (mq *MessageQueue) AddCancels(cancelKs []cid.Cid) { | |
+ if len(cancelKs) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Cancel any outstanding DONT_HAVE timers | |
+ mq.dhTimeoutMgr.CancelPending(cancelKs) | |
+ | |
+ mq.wllock.Lock() | |
+ | |
+ workReady := false | |
+ | |
+ // Remove keys from broadcast and peer wants, and add to cancels | |
+ for _, c := range cancelKs { | |
+ // Check if a want for the key was sent | |
+ _, wasSentBcst := mq.bcstWants.sent.Contains(c) | |
+ _, wasSentPeer := mq.peerWants.sent.Contains(c) | |
+ | |
+ // Remove the want from tracking wantlists | |
+ mq.bcstWants.Remove(c) | |
+ mq.peerWants.Remove(c) | |
+ | |
+ // Only send a cancel if a want was sent | |
+ if wasSentBcst || wasSentPeer { | |
+ mq.cancels.Add(c) | |
+ workReady = true | |
+ } | |
+ } | |
+ | |
+ mq.wllock.Unlock() | |
+ | |
+ // Unlock first to be nice to the scheduler. | |
+ | |
+ // Schedule a message send | |
+ if workReady { | |
+ mq.signalWorkReady() | |
+ } | |
+} | |
+ | |
+// ResponseReceived is called when a message is received from the network. | |
+// ks is the set of blocks, HAVEs and DONT_HAVEs in the message | |
+// Note that this is just used to calculate latency. | |
+func (mq *MessageQueue) ResponseReceived(ks []cid.Cid) { | |
+ if len(ks) == 0 { | |
+ return | |
+ } | |
+ | |
+ // These messages are just used to approximate latency, so if we get so | |
+ // many responses that they get backed up, just ignore the overflow. | |
+ select { | |
+ case mq.responses <- ks: | |
+ default: | |
+ } | |
+} | |
+ | |
+// SetRebroadcastInterval sets a new interval on which to rebroadcast the full wantlist | |
+func (mq *MessageQueue) SetRebroadcastInterval(delay time.Duration) { | |
+ mq.rebroadcastIntervalLk.Lock() | |
+ mq.rebroadcastInterval = delay | |
+ if mq.rebroadcastTimer != nil { | |
+ mq.rebroadcastTimer.Reset(delay) | |
+ } | |
+ mq.rebroadcastIntervalLk.Unlock() | |
+} | |
+ | |
+// Startup starts the processing of messages and rebroadcasting. | |
+func (mq *MessageQueue) Startup() { | |
+ mq.rebroadcastIntervalLk.RLock() | |
+ mq.rebroadcastTimer = mq.clock.Timer(mq.rebroadcastInterval) | |
+ mq.rebroadcastIntervalLk.RUnlock() | |
+ go mq.runQueue() | |
+} | |
+ | |
+// Shutdown stops the processing of messages for a message queue. | |
+func (mq *MessageQueue) Shutdown() { | |
+ mq.shutdown() | |
+} | |
+ | |
+func (mq *MessageQueue) onShutdown() { | |
+ // Shut down the DONT_HAVE timeout manager | |
+ mq.dhTimeoutMgr.Shutdown() | |
+ | |
+ // Reset the streamMessageSender | |
+ if mq.sender != nil { | |
+ _ = mq.sender.Reset() | |
+ } | |
+} | |
+ | |
+func (mq *MessageQueue) runQueue() { | |
+ defer mq.onShutdown() | |
+ | |
+ // Create a timer for debouncing scheduled work. | |
+ scheduleWork := mq.clock.Timer(0) | |
+ if !scheduleWork.Stop() { | |
+ // Need to drain the timer if Stop() returns false | |
+ // See: https://golang.org/pkg/time/#Timer.Stop | |
+ <-scheduleWork.C | |
+ } | |
+ | |
+ var workScheduled time.Time | |
+ for mq.ctx.Err() == nil { | |
+ select { | |
+ case <-mq.rebroadcastTimer.C: | |
+ mq.rebroadcastWantlist() | |
+ | |
+ case when := <-mq.outgoingWork: | |
+ // If we have work scheduled, cancel the timer. If we | |
+ // don't, record when the work was scheduled. | |
+ // We send the time on the channel so we accurately | |
+ // track delay. | |
+ if workScheduled.IsZero() { | |
+ workScheduled = when | |
+ } else if !scheduleWork.Stop() { | |
+ // Need to drain the timer if Stop() returns false | |
+ <-scheduleWork.C | |
+ } | |
+ | |
+ // If we have too many updates and/or we've waited too | |
+ // long, send immediately. | |
+ if mq.pendingWorkCount() > sendMessageCutoff || | |
+ mq.clock.Since(workScheduled) >= sendMessageMaxDelay { | |
+ mq.sendIfReady() | |
+ workScheduled = time.Time{} | |
+ } else { | |
+ // Otherwise, extend the timer. | |
+ scheduleWork.Reset(sendMessageDebounce) | |
+ if mq.events != nil { | |
+ mq.events <- messageQueued | |
+ } | |
+ } | |
+ | |
+ case <-scheduleWork.C: | |
+ // We have work scheduled and haven't seen any updates | |
+ // in sendMessageDebounce. Send immediately. | |
+ workScheduled = time.Time{} | |
+ mq.sendIfReady() | |
+ | |
+ case res := <-mq.responses: | |
+ // We received a response from the peer, calculate latency | |
+ mq.handleResponse(res) | |
+ | |
+ case <-mq.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+// Periodically resend the list of wants to the peer | |
+func (mq *MessageQueue) rebroadcastWantlist() { | |
+ mq.rebroadcastIntervalLk.RLock() | |
+ mq.rebroadcastTimer.Reset(mq.rebroadcastInterval) | |
+ mq.rebroadcastIntervalLk.RUnlock() | |
+ | |
+ // If some wants were transferred from the rebroadcast list | |
+ if mq.transferRebroadcastWants() { | |
+ // Send them out | |
+ mq.sendMessage() | |
+ } | |
+} | |
+ | |
+// Transfer wants from the rebroadcast lists into the pending lists. | |
+func (mq *MessageQueue) transferRebroadcastWants() bool { | |
+ mq.wllock.Lock() | |
+ defer mq.wllock.Unlock() | |
+ | |
+ // Check if there are any wants to rebroadcast | |
+ if mq.bcstWants.sent.Len() == 0 && mq.peerWants.sent.Len() == 0 { | |
+ return false | |
+ } | |
+ | |
+ // Copy sent wants into pending wants lists | |
+ mq.bcstWants.pending.Absorb(mq.bcstWants.sent) | |
+ mq.peerWants.pending.Absorb(mq.peerWants.sent) | |
+ | |
+ return true | |
+} | |
+ | |
+func (mq *MessageQueue) signalWorkReady() { | |
+ select { | |
+ case mq.outgoingWork <- mq.clock.Now(): | |
+ default: | |
+ } | |
+} | |
+ | |
+func (mq *MessageQueue) sendIfReady() { | |
+ if mq.hasPendingWork() { | |
+ mq.sendMessage() | |
+ } | |
+} | |
+ | |
+func (mq *MessageQueue) sendMessage() { | |
+ sender, err := mq.initializeSender() | |
+ if err != nil { | |
+ // If we fail to initialize the sender, the networking layer will | |
+ // emit a Disconnect event and the MessageQueue will get cleaned up | |
+ log.Infof("Could not open message sender to peer %s: %s", mq.p, err) | |
+ mq.Shutdown() | |
+ return | |
+ } | |
+ | |
+ // Make sure the DONT_HAVE timeout manager has started | |
+ // Note: Start is idempotent | |
+ mq.dhTimeoutMgr.Start() | |
+ | |
+ // Convert want lists to a Bitswap Message | |
+ message, onSent := mq.extractOutgoingMessage(mq.sender.SupportsHave()) | |
+ | |
+ // After processing the message, clear out its fields to save memory | |
+ defer mq.msg.Reset(false) | |
+ | |
+ if message.Empty() { | |
+ return | |
+ } | |
+ | |
+ wantlist := message.Wantlist() | |
+ mq.logOutgoingMessage(wantlist) | |
+ | |
+ if err := sender.SendMsg(mq.ctx, message); err != nil { | |
+ // If the message couldn't be sent, the networking layer will | |
+ // emit a Disconnect event and the MessageQueue will get cleaned up | |
+ log.Infof("Could not send message to peer %s: %s", mq.p, err) | |
+ mq.Shutdown() | |
+ return | |
+ } | |
+ | |
+ // Record sent time so as to calculate message latency | |
+ onSent() | |
+ | |
+ // Set a timer to wait for responses | |
+ mq.simulateDontHaveWithTimeout(wantlist) | |
+ | |
+ // If the message was too big and only a subset of wants could be | |
+ // sent, schedule sending the rest of the wants in the next | |
+ // iteration of the event loop. | |
+ if mq.hasPendingWork() { | |
+ mq.signalWorkReady() | |
+ } | |
+} | |
+ | |
+// If want-block times out, simulate a DONT_HAVE reponse. | |
+// This is necessary when making requests to peers running an older version of | |
+// Bitswap that doesn't support the DONT_HAVE response, and is also useful to | |
+// mitigate getting blocked by a peer that takes a long time to respond. | |
+func (mq *MessageQueue) simulateDontHaveWithTimeout(wantlist []bsmsg.Entry) { | |
+ // Get the CID of each want-block that expects a DONT_HAVE response | |
+ wants := make([]cid.Cid, 0, len(wantlist)) | |
+ | |
+ mq.wllock.Lock() | |
+ | |
+ for _, entry := range wantlist { | |
+ if entry.WantType == pb.Message_Wantlist_Block && entry.SendDontHave { | |
+ // Unlikely, but just in case check that the block hasn't been | |
+ // received in the interim | |
+ c := entry.Cid | |
+ if _, ok := mq.peerWants.sent.Contains(c); ok { | |
+ wants = append(wants, c) | |
+ } | |
+ } | |
+ } | |
+ | |
+ mq.wllock.Unlock() | |
+ | |
+ // Add wants to DONT_HAVE timeout manager | |
+ mq.dhTimeoutMgr.AddPending(wants) | |
+} | |
+ | |
+// handleResponse is called when a response is received from the peer, | |
+// with the CIDs of received blocks / HAVEs / DONT_HAVEs | |
+func (mq *MessageQueue) handleResponse(ks []cid.Cid) { | |
+ now := mq.clock.Now() | |
+ earliest := time.Time{} | |
+ | |
+ mq.wllock.Lock() | |
+ | |
+ // Check if the keys in the response correspond to any request that was | |
+ // sent to the peer. | |
+ // | |
+ // - Find the earliest request so as to calculate the longest latency as | |
+ // we want to be conservative when setting the timeout | |
+ // - Ignore latencies that are very long, as these are likely to be outliers | |
+ // caused when | |
+ // - we send a want to peer A | |
+ // - peer A does not have the block | |
+ // - peer A later receives the block from peer B | |
+ // - peer A sends us HAVE / block | |
+ for _, c := range ks { | |
+ if at, ok := mq.bcstWants.sentAt[c]; ok { | |
+ if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { | |
+ earliest = at | |
+ } | |
+ mq.bcstWants.ClearSentAt(c) | |
+ } | |
+ if at, ok := mq.peerWants.sentAt[c]; ok { | |
+ if (earliest.IsZero() || at.Before(earliest)) && now.Sub(at) < mq.maxValidLatency { | |
+ earliest = at | |
+ } | |
+ // Clear out the sent time for the CID because we only want to | |
+ // record the latency between the request and the first response | |
+ // for that CID (not subsequent responses) | |
+ mq.peerWants.ClearSentAt(c) | |
+ } | |
+ } | |
+ | |
+ mq.wllock.Unlock() | |
+ | |
+ if !earliest.IsZero() { | |
+ // Inform the timeout manager of the calculated latency | |
+ mq.dhTimeoutMgr.UpdateMessageLatency(now.Sub(earliest)) | |
+ } | |
+ if mq.events != nil { | |
+ mq.events <- latenciesRecorded | |
+ } | |
+} | |
+ | |
+func (mq *MessageQueue) logOutgoingMessage(wantlist []bsmsg.Entry) { | |
+ // Save some CPU cycles and allocations if log level is higher than debug | |
+ if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { | |
+ return | |
+ } | |
+ | |
+ self := mq.network.Self() | |
+ for _, e := range wantlist { | |
+ if e.Cancel { | |
+ if e.WantType == pb.Message_Wantlist_Have { | |
+ log.Debugw("sent message", | |
+ "type", "CANCEL_WANT_HAVE", | |
+ "cid", e.Cid, | |
+ "local", self, | |
+ "to", mq.p, | |
+ ) | |
+ } else { | |
+ log.Debugw("sent message", | |
+ "type", "CANCEL_WANT_BLOCK", | |
+ "cid", e.Cid, | |
+ "local", self, | |
+ "to", mq.p, | |
+ ) | |
+ } | |
+ } else { | |
+ if e.WantType == pb.Message_Wantlist_Have { | |
+ log.Debugw("sent message", | |
+ "type", "WANT_HAVE", | |
+ "cid", e.Cid, | |
+ "local", self, | |
+ "to", mq.p, | |
+ ) | |
+ } else { | |
+ log.Debugw("sent message", | |
+ "type", "WANT_BLOCK", | |
+ "cid", e.Cid, | |
+ "local", self, | |
+ "to", mq.p, | |
+ ) | |
+ } | |
+ } | |
+ } | |
+} | |
+ | |
+// Whether there is work to be processed | |
+func (mq *MessageQueue) hasPendingWork() bool { | |
+ return mq.pendingWorkCount() > 0 | |
+} | |
+ | |
+// The amount of work that is waiting to be processed | |
+func (mq *MessageQueue) pendingWorkCount() int { | |
+ mq.wllock.Lock() | |
+ defer mq.wllock.Unlock() | |
+ | |
+ return mq.bcstWants.pending.Len() + mq.peerWants.pending.Len() + mq.cancels.Len() | |
+} | |
+ | |
+// Convert the lists of wants into a Bitswap message | |
+func (mq *MessageQueue) extractOutgoingMessage(supportsHave bool) (bsmsg.BitSwapMessage, func()) { | |
+ // Get broadcast and regular wantlist entries. | |
+ mq.wllock.Lock() | |
+ peerEntries := mq.peerWants.pending.Entries() | |
+ bcstEntries := mq.bcstWants.pending.Entries() | |
+ cancels := mq.cancels.Keys() | |
+ if !supportsHave { | |
+ filteredPeerEntries := peerEntries[:0] | |
+ // If the remote peer doesn't support HAVE / DONT_HAVE messages, | |
+ // don't send want-haves (only send want-blocks) | |
+ // | |
+ // Doing this here under the lock makes everything else in this | |
+ // function simpler. | |
+ // | |
+ // TODO: We should _try_ to avoid recording these in the first | |
+ // place if possible. | |
+ for _, e := range peerEntries { | |
+ if e.WantType == pb.Message_Wantlist_Have { | |
+ mq.peerWants.RemoveType(e.Cid, pb.Message_Wantlist_Have) | |
+ } else { | |
+ filteredPeerEntries = append(filteredPeerEntries, e) | |
+ } | |
+ } | |
+ peerEntries = filteredPeerEntries | |
+ } | |
+ mq.wllock.Unlock() | |
+ | |
+ // We prioritize cancels, then regular wants, then broadcast wants. | |
+ | |
+ var ( | |
+ msgSize = 0 // size of message so far | |
+ sentCancels = 0 // number of cancels in message | |
+ sentPeerEntries = 0 // number of peer entries in message | |
+ sentBcstEntries = 0 // number of broadcast entries in message | |
+ ) | |
+ | |
+ // Add each cancel to the message | |
+ for _, c := range cancels { | |
+ msgSize += mq.msg.Cancel(c) | |
+ sentCancels++ | |
+ | |
+ if msgSize >= mq.maxMessageSize { | |
+ goto FINISH | |
+ } | |
+ } | |
+ | |
+ // Next, add the wants. If we have too many entries to fit into a single | |
+ // message, sort by priority and include the high priority ones first. | |
+ | |
+ for _, e := range peerEntries { | |
+ msgSize += mq.msg.AddEntry(e.Cid, e.Priority, e.WantType, true) | |
+ sentPeerEntries++ | |
+ | |
+ if msgSize >= mq.maxMessageSize { | |
+ goto FINISH | |
+ } | |
+ } | |
+ | |
+ // Add each broadcast want-have to the message | |
+ for _, e := range bcstEntries { | |
+ // Broadcast wants are sent as want-have | |
+ wantType := pb.Message_Wantlist_Have | |
+ | |
+ // If the remote peer doesn't support HAVE / DONT_HAVE messages, | |
+ // send a want-block instead | |
+ if !supportsHave { | |
+ wantType = pb.Message_Wantlist_Block | |
+ } | |
+ | |
+ msgSize += mq.msg.AddEntry(e.Cid, e.Priority, wantType, false) | |
+ sentBcstEntries++ | |
+ | |
+ if msgSize >= mq.maxMessageSize { | |
+ goto FINISH | |
+ } | |
+ } | |
+ | |
+FINISH: | |
+ | |
+ // Finally, re-take the lock, mark sent and remove any entries from our | |
+ // message that we've decided to cancel at the last minute. | |
+ mq.wllock.Lock() | |
+ for i, e := range peerEntries[:sentPeerEntries] { | |
+ if !mq.peerWants.MarkSent(e) { | |
+ // It changed. | |
+ mq.msg.Remove(e.Cid) | |
+ peerEntries[i].Cid = cid.Undef | |
+ } | |
+ } | |
+ | |
+ for i, e := range bcstEntries[:sentBcstEntries] { | |
+ if !mq.bcstWants.MarkSent(e) { | |
+ mq.msg.Remove(e.Cid) | |
+ bcstEntries[i].Cid = cid.Undef | |
+ } | |
+ } | |
+ | |
+ for _, c := range cancels[:sentCancels] { | |
+ if !mq.cancels.Has(c) { | |
+ mq.msg.Remove(c) | |
+ } else { | |
+ mq.cancels.Remove(c) | |
+ } | |
+ } | |
+ mq.wllock.Unlock() | |
+ | |
+ // When the message has been sent, record the time at which each want was | |
+ // sent so we can calculate message latency | |
+ onSent := func() { | |
+ now := mq.clock.Now() | |
+ | |
+ mq.wllock.Lock() | |
+ defer mq.wllock.Unlock() | |
+ | |
+ for _, e := range peerEntries[:sentPeerEntries] { | |
+ if e.Cid.Defined() { // Check if want was cancelled in the interim | |
+ mq.peerWants.SentAt(e.Cid, now) | |
+ } | |
+ } | |
+ | |
+ for _, e := range bcstEntries[:sentBcstEntries] { | |
+ if e.Cid.Defined() { // Check if want was cancelled in the interim | |
+ mq.bcstWants.SentAt(e.Cid, now) | |
+ } | |
+ } | |
+ if mq.events != nil { | |
+ mq.events <- messageFinishedSending | |
+ } | |
+ } | |
+ | |
+ return mq.msg, onSent | |
+} | |
+ | |
+func (mq *MessageQueue) initializeSender() (bsnet.MessageSender, error) { | |
+ if mq.sender == nil { | |
+ opts := &bsnet.MessageSenderOpts{ | |
+ MaxRetries: maxRetries, | |
+ SendTimeout: sendTimeout, | |
+ SendErrorBackoff: sendErrorBackoff, | |
+ } | |
+ nsender, err := mq.network.NewMessageSender(mq.ctx, mq.p, opts) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ mq.sender = nsender | |
+ } | |
+ return mq.sender, nil | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/notifications/notifications.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/notifications/notifications.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/notifications/notifications.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/notifications/notifications.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,139 @@ | |
+package notifications | |
+ | |
+import ( | |
+ "context" | |
+ "sync" | |
+ | |
+ pubsub "github.com/cskr/pubsub" | |
+ cid "github.com/ipfs/go-cid" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+) | |
+ | |
+const bufferSize = 16 | |
+ | |
+// PubSub is a simple interface for publishing blocks and being able to subscribe | |
+// for cids. It's used internally by bitswap to decouple receiving blocks | |
+// and actually providing them back to the GetBlocks caller. | |
+type PubSub interface { | |
+ Publish(blocks ...blocks.Block) | |
+ Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block | |
+ Shutdown() | |
+} | |
+ | |
+// New generates a new PubSub interface. | |
+func New() PubSub { | |
+ return &impl{ | |
+ wrapped: *pubsub.New(bufferSize), | |
+ closed: make(chan struct{}), | |
+ } | |
+} | |
+ | |
+type impl struct { | |
+ lk sync.RWMutex | |
+ wrapped pubsub.PubSub | |
+ | |
+ closed chan struct{} | |
+} | |
+ | |
+func (ps *impl) Publish(blocks ...blocks.Block) { | |
+ ps.lk.RLock() | |
+ defer ps.lk.RUnlock() | |
+ select { | |
+ case <-ps.closed: | |
+ return | |
+ default: | |
+ } | |
+ | |
+ for _, block := range blocks { | |
+ ps.wrapped.Pub(block, block.Cid().KeyString()) | |
+ } | |
+} | |
+ | |
+func (ps *impl) Shutdown() { | |
+ ps.lk.Lock() | |
+ defer ps.lk.Unlock() | |
+ select { | |
+ case <-ps.closed: | |
+ return | |
+ default: | |
+ } | |
+ close(ps.closed) | |
+ ps.wrapped.Shutdown() | |
+} | |
+ | |
+// Subscribe returns a channel of blocks for the given |keys|. |blockChannel| | |
+// is closed if the |ctx| times out or is cancelled, or after receiving the blocks | |
+// corresponding to |keys|. | |
+func (ps *impl) Subscribe(ctx context.Context, keys ...cid.Cid) <-chan blocks.Block { | |
+ | |
+ blocksCh := make(chan blocks.Block, len(keys)) | |
+ valuesCh := make(chan interface{}, len(keys)) // provide our own channel to control buffer, prevent blocking | |
+ if len(keys) == 0 { | |
+ close(blocksCh) | |
+ return blocksCh | |
+ } | |
+ | |
+ // prevent shutdown | |
+ ps.lk.RLock() | |
+ defer ps.lk.RUnlock() | |
+ | |
+ select { | |
+ case <-ps.closed: | |
+ close(blocksCh) | |
+ return blocksCh | |
+ default: | |
+ } | |
+ | |
+ // AddSubOnceEach listens for each key in the list, and closes the channel | |
+ // once all keys have been received | |
+ ps.wrapped.AddSubOnceEach(valuesCh, toStrings(keys)...) | |
+ go func() { | |
+ defer func() { | |
+ close(blocksCh) | |
+ | |
+ ps.lk.RLock() | |
+ defer ps.lk.RUnlock() | |
+ // Don't touch the pubsub instance if we're | |
+ // already closed. | |
+ select { | |
+ case <-ps.closed: | |
+ return | |
+ default: | |
+ } | |
+ | |
+ ps.wrapped.Unsub(valuesCh) | |
+ }() | |
+ | |
+ for { | |
+ select { | |
+ case <-ctx.Done(): | |
+ return | |
+ case <-ps.closed: | |
+ case val, ok := <-valuesCh: | |
+ if !ok { | |
+ return | |
+ } | |
+ block, ok := val.(blocks.Block) | |
+ if !ok { | |
+ return | |
+ } | |
+ select { | |
+ case <-ctx.Done(): | |
+ return | |
+ case blocksCh <- block: // continue | |
+ case <-ps.closed: | |
+ } | |
+ } | |
+ } | |
+ }() | |
+ | |
+ return blocksCh | |
+} | |
+ | |
+func toStrings(keys []cid.Cid) []string { | |
+ strs := make([]string, 0, len(keys)) | |
+ for _, key := range keys { | |
+ strs = append(strs, key.KeyString()) | |
+ } | |
+ return strs | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peermanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peermanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peermanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peermanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,246 @@ | |
+package peermanager | |
+ | |
+import ( | |
+ "context" | |
+ "sync" | |
+ | |
+ logging "github.com/ipfs/go-log" | |
+ "github.com/ipfs/go-metrics-interface" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+var log = logging.Logger("bs:peermgr") | |
+ | |
+// PeerQueue provides a queue of messages to be sent for a single peer. | |
+type PeerQueue interface { | |
+ AddBroadcastWantHaves([]cid.Cid) | |
+ AddWants([]cid.Cid, []cid.Cid) | |
+ AddCancels([]cid.Cid) | |
+ ResponseReceived(ks []cid.Cid) | |
+ Startup() | |
+ Shutdown() | |
+} | |
+ | |
+type Session interface { | |
+ ID() uint64 | |
+ SignalAvailability(peer.ID, bool) | |
+} | |
+ | |
+// PeerQueueFactory provides a function that will create a PeerQueue. | |
+type PeerQueueFactory func(ctx context.Context, p peer.ID) PeerQueue | |
+ | |
+// PeerManager manages a pool of peers and sends messages to peers in the pool. | |
+type PeerManager struct { | |
+ // sync access to peerQueues and peerWantManager | |
+ pqLk sync.RWMutex | |
+ // peerQueues -- interact through internal utility functions get/set/remove/iterate | |
+ peerQueues map[peer.ID]PeerQueue | |
+ pwm *peerWantManager | |
+ | |
+ createPeerQueue PeerQueueFactory | |
+ ctx context.Context | |
+ | |
+ psLk sync.RWMutex | |
+ sessions map[uint64]Session | |
+ peerSessions map[peer.ID]map[uint64]struct{} | |
+ | |
+ self peer.ID | |
+} | |
+ | |
+// New creates a new PeerManager, given a context and a peerQueueFactory. | |
+func New(ctx context.Context, createPeerQueue PeerQueueFactory, self peer.ID) *PeerManager { | |
+ wantGauge := metrics.NewCtx(ctx, "wantlist_total", "Number of items in wantlist.").Gauge() | |
+ wantBlockGauge := metrics.NewCtx(ctx, "want_blocks_total", "Number of want-blocks in wantlist.").Gauge() | |
+ return &PeerManager{ | |
+ peerQueues: make(map[peer.ID]PeerQueue), | |
+ pwm: newPeerWantManager(wantGauge, wantBlockGauge), | |
+ createPeerQueue: createPeerQueue, | |
+ ctx: ctx, | |
+ self: self, | |
+ | |
+ sessions: make(map[uint64]Session), | |
+ peerSessions: make(map[peer.ID]map[uint64]struct{}), | |
+ } | |
+} | |
+ | |
+func (pm *PeerManager) AvailablePeers() []peer.ID { | |
+ // TODO: Rate-limit peers | |
+ return pm.ConnectedPeers() | |
+} | |
+ | |
+// ConnectedPeers returns a list of peers this PeerManager is managing. | |
+func (pm *PeerManager) ConnectedPeers() []peer.ID { | |
+ pm.pqLk.RLock() | |
+ defer pm.pqLk.RUnlock() | |
+ | |
+ peers := make([]peer.ID, 0, len(pm.peerQueues)) | |
+ for p := range pm.peerQueues { | |
+ peers = append(peers, p) | |
+ } | |
+ return peers | |
+} | |
+ | |
+// Connected is called to add a new peer to the pool, and send it an initial set | |
+// of wants. | |
+func (pm *PeerManager) Connected(p peer.ID) { | |
+ pm.pqLk.Lock() | |
+ defer pm.pqLk.Unlock() | |
+ | |
+ pq := pm.getOrCreate(p) | |
+ | |
+ // Inform the peer want manager that there's a new peer | |
+ pm.pwm.addPeer(pq, p) | |
+ | |
+ // Inform the sessions that the peer has connected | |
+ pm.signalAvailability(p, true) | |
+} | |
+ | |
+// Disconnected is called to remove a peer from the pool. | |
+func (pm *PeerManager) Disconnected(p peer.ID) { | |
+ pm.pqLk.Lock() | |
+ defer pm.pqLk.Unlock() | |
+ | |
+ pq, ok := pm.peerQueues[p] | |
+ | |
+ if !ok { | |
+ return | |
+ } | |
+ | |
+ // Inform the sessions that the peer has disconnected | |
+ pm.signalAvailability(p, false) | |
+ | |
+ // Clean up the peer | |
+ delete(pm.peerQueues, p) | |
+ pq.Shutdown() | |
+ pm.pwm.removePeer(p) | |
+} | |
+ | |
+// ResponseReceived is called when a message is received from the network. | |
+// ks is the set of blocks, HAVEs and DONT_HAVEs in the message | |
+// Note that this is just used to calculate latency. | |
+func (pm *PeerManager) ResponseReceived(p peer.ID, ks []cid.Cid) { | |
+ pm.pqLk.Lock() | |
+ pq, ok := pm.peerQueues[p] | |
+ pm.pqLk.Unlock() | |
+ | |
+ if ok { | |
+ pq.ResponseReceived(ks) | |
+ } | |
+} | |
+ | |
+// BroadcastWantHaves broadcasts want-haves to all peers (used by the session | |
+// to discover seeds). | |
+// For each peer it filters out want-haves that have previously been sent to | |
+// the peer. | |
+func (pm *PeerManager) BroadcastWantHaves(ctx context.Context, wantHaves []cid.Cid) { | |
+ pm.pqLk.Lock() | |
+ defer pm.pqLk.Unlock() | |
+ | |
+ pm.pwm.broadcastWantHaves(wantHaves) | |
+} | |
+ | |
+// SendWants sends the given want-blocks and want-haves to the given peer. | |
+// It filters out wants that have previously been sent to the peer. | |
+func (pm *PeerManager) SendWants(ctx context.Context, p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
+ pm.pqLk.Lock() | |
+ defer pm.pqLk.Unlock() | |
+ | |
+ if _, ok := pm.peerQueues[p]; ok { | |
+ pm.pwm.sendWants(p, wantBlocks, wantHaves) | |
+ } | |
+} | |
+ | |
+// SendCancels sends cancels for the given keys to all peers who had previously | |
+// received a want for those keys. | |
+func (pm *PeerManager) SendCancels(ctx context.Context, cancelKs []cid.Cid) { | |
+ pm.pqLk.Lock() | |
+ defer pm.pqLk.Unlock() | |
+ | |
+ // Send a CANCEL to each peer that has been sent a want-block or want-have | |
+ pm.pwm.sendCancels(cancelKs) | |
+} | |
+ | |
+// CurrentWants returns the list of pending wants (both want-haves and want-blocks). | |
+func (pm *PeerManager) CurrentWants() []cid.Cid { | |
+ pm.pqLk.RLock() | |
+ defer pm.pqLk.RUnlock() | |
+ | |
+ return pm.pwm.getWants() | |
+} | |
+ | |
+// CurrentWantBlocks returns the list of pending want-blocks | |
+func (pm *PeerManager) CurrentWantBlocks() []cid.Cid { | |
+ pm.pqLk.RLock() | |
+ defer pm.pqLk.RUnlock() | |
+ | |
+ return pm.pwm.getWantBlocks() | |
+} | |
+ | |
+// CurrentWantHaves returns the list of pending want-haves | |
+func (pm *PeerManager) CurrentWantHaves() []cid.Cid { | |
+ pm.pqLk.RLock() | |
+ defer pm.pqLk.RUnlock() | |
+ | |
+ return pm.pwm.getWantHaves() | |
+} | |
+ | |
+func (pm *PeerManager) getOrCreate(p peer.ID) PeerQueue { | |
+ pq, ok := pm.peerQueues[p] | |
+ if !ok { | |
+ pq = pm.createPeerQueue(pm.ctx, p) | |
+ pq.Startup() | |
+ pm.peerQueues[p] = pq | |
+ } | |
+ return pq | |
+} | |
+ | |
+// RegisterSession tells the PeerManager that the given session is interested | |
+// in events about the given peer. | |
+func (pm *PeerManager) RegisterSession(p peer.ID, s Session) { | |
+ pm.psLk.Lock() | |
+ defer pm.psLk.Unlock() | |
+ | |
+ if _, ok := pm.sessions[s.ID()]; !ok { | |
+ pm.sessions[s.ID()] = s | |
+ } | |
+ | |
+ if _, ok := pm.peerSessions[p]; !ok { | |
+ pm.peerSessions[p] = make(map[uint64]struct{}) | |
+ } | |
+ pm.peerSessions[p][s.ID()] = struct{}{} | |
+} | |
+ | |
+// UnregisterSession tells the PeerManager that the given session is no longer | |
+// interested in PeerManager events. | |
+func (pm *PeerManager) UnregisterSession(ses uint64) { | |
+ pm.psLk.Lock() | |
+ defer pm.psLk.Unlock() | |
+ | |
+ for p := range pm.peerSessions { | |
+ delete(pm.peerSessions[p], ses) | |
+ if len(pm.peerSessions[p]) == 0 { | |
+ delete(pm.peerSessions, p) | |
+ } | |
+ } | |
+ | |
+ delete(pm.sessions, ses) | |
+} | |
+ | |
+// signalAvailability is called when a peer's connectivity changes. | |
+// It informs interested sessions. | |
+func (pm *PeerManager) signalAvailability(p peer.ID, isConnected bool) { | |
+ pm.psLk.Lock() | |
+ defer pm.psLk.Unlock() | |
+ | |
+ sesIds, ok := pm.peerSessions[p] | |
+ if !ok { | |
+ return | |
+ } | |
+ for sesId := range sesIds { | |
+ if s, ok := pm.sessions[sesId]; ok { | |
+ s.SignalAvailability(p, isConnected) | |
+ } | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peerwantmanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peerwantmanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peerwantmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager/peerwantmanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,464 @@ | |
+package peermanager | |
+ | |
+import ( | |
+ "bytes" | |
+ "fmt" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// Gauge can be used to keep track of a metric that increases and decreases | |
+// incrementally. It is used by the peerWantManager to track the number of | |
+// want-blocks that are active (ie sent but no response received) | |
+type Gauge interface { | |
+ Inc() | |
+ Dec() | |
+} | |
+ | |
+// peerWantManager keeps track of which want-haves and want-blocks have been | |
+// sent to each peer, so that the PeerManager doesn't send duplicates. | |
+type peerWantManager struct { | |
+ // peerWants maps peers to outstanding wants. | |
+ // A peer's wants is the _union_ of the broadcast wants and the wants in | |
+ // this list. | |
+ peerWants map[peer.ID]*peerWant | |
+ | |
+ // Reverse index of all wants in peerWants. | |
+ wantPeers map[cid.Cid]map[peer.ID]struct{} | |
+ | |
+ // broadcastWants tracks all the current broadcast wants. | |
+ broadcastWants *cid.Set | |
+ | |
+ // Keeps track of the number of active want-haves & want-blocks | |
+ wantGauge Gauge | |
+ // Keeps track of the number of active want-blocks | |
+ wantBlockGauge Gauge | |
+} | |
+ | |
+type peerWant struct { | |
+ wantBlocks *cid.Set | |
+ wantHaves *cid.Set | |
+ peerQueue PeerQueue | |
+} | |
+ | |
+// New creates a new peerWantManager with a Gauge that keeps track of the | |
+// number of active want-blocks (ie sent but no response received) | |
+func newPeerWantManager(wantGauge Gauge, wantBlockGauge Gauge) *peerWantManager { | |
+ return &peerWantManager{ | |
+ broadcastWants: cid.NewSet(), | |
+ peerWants: make(map[peer.ID]*peerWant), | |
+ wantPeers: make(map[cid.Cid]map[peer.ID]struct{}), | |
+ wantGauge: wantGauge, | |
+ wantBlockGauge: wantBlockGauge, | |
+ } | |
+} | |
+ | |
+// addPeer adds a peer whose wants we need to keep track of. It sends the | |
+// current list of broadcast wants to the peer. | |
+func (pwm *peerWantManager) addPeer(peerQueue PeerQueue, p peer.ID) { | |
+ if _, ok := pwm.peerWants[p]; ok { | |
+ return | |
+ } | |
+ | |
+ pwm.peerWants[p] = &peerWant{ | |
+ wantBlocks: cid.NewSet(), | |
+ wantHaves: cid.NewSet(), | |
+ peerQueue: peerQueue, | |
+ } | |
+ | |
+ // Broadcast any live want-haves to the newly connected peer | |
+ if pwm.broadcastWants.Len() > 0 { | |
+ wants := pwm.broadcastWants.Keys() | |
+ peerQueue.AddBroadcastWantHaves(wants) | |
+ } | |
+} | |
+ | |
+// RemovePeer removes a peer and its associated wants from tracking | |
+func (pwm *peerWantManager) removePeer(p peer.ID) { | |
+ pws, ok := pwm.peerWants[p] | |
+ if !ok { | |
+ return | |
+ } | |
+ | |
+ // Clean up want-blocks | |
+ _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { | |
+ // Clean up want-blocks from the reverse index | |
+ pwm.reverseIndexRemove(c, p) | |
+ | |
+ // Decrement the gauges by the number of pending want-blocks to the peer | |
+ peerCounts := pwm.wantPeerCounts(c) | |
+ if peerCounts.wantBlock == 0 { | |
+ pwm.wantBlockGauge.Dec() | |
+ } | |
+ if !peerCounts.wanted() { | |
+ pwm.wantGauge.Dec() | |
+ } | |
+ | |
+ return nil | |
+ }) | |
+ | |
+ // Clean up want-haves | |
+ _ = pws.wantHaves.ForEach(func(c cid.Cid) error { | |
+ // Clean up want-haves from the reverse index | |
+ pwm.reverseIndexRemove(c, p) | |
+ | |
+ // Decrement the gauge by the number of pending want-haves to the peer | |
+ peerCounts := pwm.wantPeerCounts(c) | |
+ if !peerCounts.wanted() { | |
+ pwm.wantGauge.Dec() | |
+ } | |
+ return nil | |
+ }) | |
+ | |
+ delete(pwm.peerWants, p) | |
+} | |
+ | |
+// broadcastWantHaves sends want-haves to any peers that have not yet been sent them. | |
+func (pwm *peerWantManager) broadcastWantHaves(wantHaves []cid.Cid) { | |
+ unsent := make([]cid.Cid, 0, len(wantHaves)) | |
+ for _, c := range wantHaves { | |
+ if pwm.broadcastWants.Has(c) { | |
+ // Already a broadcast want, skip it. | |
+ continue | |
+ } | |
+ pwm.broadcastWants.Add(c) | |
+ unsent = append(unsent, c) | |
+ | |
+ // If no peer has a pending want for the key | |
+ if _, ok := pwm.wantPeers[c]; !ok { | |
+ // Increment the total wants gauge | |
+ pwm.wantGauge.Inc() | |
+ } | |
+ } | |
+ | |
+ if len(unsent) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Allocate a single buffer to filter broadcast wants for each peer | |
+ bcstWantsBuffer := make([]cid.Cid, 0, len(unsent)) | |
+ | |
+ // Send broadcast wants to each peer | |
+ for _, pws := range pwm.peerWants { | |
+ peerUnsent := bcstWantsBuffer[:0] | |
+ for _, c := range unsent { | |
+ // If we've already sent a want to this peer, skip them. | |
+ if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { | |
+ peerUnsent = append(peerUnsent, c) | |
+ } | |
+ } | |
+ | |
+ if len(peerUnsent) > 0 { | |
+ pws.peerQueue.AddBroadcastWantHaves(peerUnsent) | |
+ } | |
+ } | |
+} | |
+ | |
+// sendWants only sends the peer the want-blocks and want-haves that have not | |
+// already been sent to it. | |
+func (pwm *peerWantManager) sendWants(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
+ fltWantBlks := make([]cid.Cid, 0, len(wantBlocks)) | |
+ fltWantHvs := make([]cid.Cid, 0, len(wantHaves)) | |
+ | |
+ // Get the existing want-blocks and want-haves for the peer | |
+ pws, ok := pwm.peerWants[p] | |
+ if !ok { | |
+ // In practice this should never happen | |
+ log.Errorf("sendWants() called with peer %s but peer not found in peerWantManager", string(p)) | |
+ return | |
+ } | |
+ | |
+ // Iterate over the requested want-blocks | |
+ for _, c := range wantBlocks { | |
+ // If the want-block hasn't been sent to the peer | |
+ if pws.wantBlocks.Has(c) { | |
+ continue | |
+ } | |
+ | |
+ // Increment the want gauges | |
+ peerCounts := pwm.wantPeerCounts(c) | |
+ if peerCounts.wantBlock == 0 { | |
+ pwm.wantBlockGauge.Inc() | |
+ } | |
+ if !peerCounts.wanted() { | |
+ pwm.wantGauge.Inc() | |
+ } | |
+ | |
+ // Make sure the CID is no longer recorded as a want-have | |
+ pws.wantHaves.Remove(c) | |
+ | |
+ // Record that the CID was sent as a want-block | |
+ pws.wantBlocks.Add(c) | |
+ | |
+ // Add the CID to the results | |
+ fltWantBlks = append(fltWantBlks, c) | |
+ | |
+ // Update the reverse index | |
+ pwm.reverseIndexAdd(c, p) | |
+ } | |
+ | |
+ // Iterate over the requested want-haves | |
+ for _, c := range wantHaves { | |
+ // If we've already broadcasted this want, don't bother with a | |
+ // want-have. | |
+ if pwm.broadcastWants.Has(c) { | |
+ continue | |
+ } | |
+ | |
+ // If the CID has not been sent as a want-block or want-have | |
+ if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { | |
+ // Increment the total wants gauge | |
+ peerCounts := pwm.wantPeerCounts(c) | |
+ if !peerCounts.wanted() { | |
+ pwm.wantGauge.Inc() | |
+ } | |
+ | |
+ // Record that the CID was sent as a want-have | |
+ pws.wantHaves.Add(c) | |
+ | |
+ // Add the CID to the results | |
+ fltWantHvs = append(fltWantHvs, c) | |
+ | |
+ // Update the reverse index | |
+ pwm.reverseIndexAdd(c, p) | |
+ } | |
+ } | |
+ | |
+ // Send the want-blocks and want-haves to the peer | |
+ pws.peerQueue.AddWants(fltWantBlks, fltWantHvs) | |
+} | |
+ | |
+// sendCancels sends a cancel to each peer to which a corresponding want was | |
+// sent | |
+func (pwm *peerWantManager) sendCancels(cancelKs []cid.Cid) { | |
+ if len(cancelKs) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Record how many peers have a pending want-block and want-have for each | |
+ // key to be cancelled | |
+ peerCounts := make(map[cid.Cid]wantPeerCnts, len(cancelKs)) | |
+ for _, c := range cancelKs { | |
+ peerCounts[c] = pwm.wantPeerCounts(c) | |
+ } | |
+ | |
+ // Create a buffer to use for filtering cancels per peer, with the | |
+ // broadcast wants at the front of the buffer (broadcast wants are sent to | |
+ // all peers) | |
+ broadcastCancels := make([]cid.Cid, 0, len(cancelKs)) | |
+ for _, c := range cancelKs { | |
+ if pwm.broadcastWants.Has(c) { | |
+ broadcastCancels = append(broadcastCancels, c) | |
+ } | |
+ } | |
+ | |
+ // Send cancels to a particular peer | |
+ send := func(p peer.ID, pws *peerWant) { | |
+ // Start from the broadcast cancels | |
+ toCancel := broadcastCancels | |
+ | |
+ // For each key to be cancelled | |
+ for _, c := range cancelKs { | |
+ // Check if a want was sent for the key | |
+ if !pws.wantBlocks.Has(c) && !pws.wantHaves.Has(c) { | |
+ continue | |
+ } | |
+ | |
+ // Unconditionally remove from the want lists. | |
+ pws.wantBlocks.Remove(c) | |
+ pws.wantHaves.Remove(c) | |
+ | |
+ // If it's a broadcast want, we've already added it to | |
+ // the peer cancels. | |
+ if !pwm.broadcastWants.Has(c) { | |
+ toCancel = append(toCancel, c) | |
+ } | |
+ } | |
+ | |
+ // Send cancels to the peer | |
+ if len(toCancel) > 0 { | |
+ pws.peerQueue.AddCancels(toCancel) | |
+ } | |
+ } | |
+ | |
+ if len(broadcastCancels) > 0 { | |
+ // If a broadcast want is being cancelled, send the cancel to all | |
+ // peers | |
+ for p, pws := range pwm.peerWants { | |
+ send(p, pws) | |
+ } | |
+ } else { | |
+ // Only send cancels to peers that received a corresponding want | |
+ cancelPeers := make(map[peer.ID]struct{}, len(pwm.wantPeers[cancelKs[0]])) | |
+ for _, c := range cancelKs { | |
+ for p := range pwm.wantPeers[c] { | |
+ cancelPeers[p] = struct{}{} | |
+ } | |
+ } | |
+ for p := range cancelPeers { | |
+ pws, ok := pwm.peerWants[p] | |
+ if !ok { | |
+ // Should never happen but check just in case | |
+ log.Errorf("sendCancels - peerWantManager index missing peer %s", p) | |
+ continue | |
+ } | |
+ | |
+ send(p, pws) | |
+ } | |
+ } | |
+ | |
+ // Decrement the wants gauges | |
+ for _, c := range cancelKs { | |
+ peerCnts := peerCounts[c] | |
+ | |
+ // If there were any peers that had a pending want-block for the key | |
+ if peerCnts.wantBlock > 0 { | |
+ // Decrement the want-block gauge | |
+ pwm.wantBlockGauge.Dec() | |
+ } | |
+ | |
+ // If there was a peer that had a pending want or it was a broadcast want | |
+ if peerCnts.wanted() { | |
+ // Decrement the total wants gauge | |
+ pwm.wantGauge.Dec() | |
+ } | |
+ } | |
+ | |
+ // Remove cancelled broadcast wants | |
+ for _, c := range broadcastCancels { | |
+ pwm.broadcastWants.Remove(c) | |
+ } | |
+ | |
+ // Batch-remove the reverse-index. There's no need to clear this index | |
+ // peer-by-peer. | |
+ for _, c := range cancelKs { | |
+ delete(pwm.wantPeers, c) | |
+ } | |
+} | |
+ | |
+// wantPeerCnts stores the number of peers that have pending wants for a CID | |
+type wantPeerCnts struct { | |
+ // number of peers that have a pending want-block for the CID | |
+ wantBlock int | |
+ // number of peers that have a pending want-have for the CID | |
+ wantHave int | |
+ // whether the CID is a broadcast want | |
+ isBroadcast bool | |
+} | |
+ | |
+// wanted returns true if any peer wants the CID or it's a broadcast want | |
+func (pwm *wantPeerCnts) wanted() bool { | |
+ return pwm.wantBlock > 0 || pwm.wantHave > 0 || pwm.isBroadcast | |
+} | |
+ | |
+// wantPeerCounts counts how many peers have a pending want-block and want-have | |
+// for the given CID | |
+func (pwm *peerWantManager) wantPeerCounts(c cid.Cid) wantPeerCnts { | |
+ blockCount := 0 | |
+ haveCount := 0 | |
+ for p := range pwm.wantPeers[c] { | |
+ pws, ok := pwm.peerWants[p] | |
+ if !ok { | |
+ log.Errorf("reverse index has extra peer %s for key %s in peerWantManager", string(p), c) | |
+ continue | |
+ } | |
+ | |
+ if pws.wantBlocks.Has(c) { | |
+ blockCount++ | |
+ } else if pws.wantHaves.Has(c) { | |
+ haveCount++ | |
+ } | |
+ } | |
+ | |
+ return wantPeerCnts{blockCount, haveCount, pwm.broadcastWants.Has(c)} | |
+} | |
+ | |
+// Add the peer to the list of peers that have sent a want with the cid | |
+func (pwm *peerWantManager) reverseIndexAdd(c cid.Cid, p peer.ID) bool { | |
+ peers, ok := pwm.wantPeers[c] | |
+ if !ok { | |
+ peers = make(map[peer.ID]struct{}, 10) | |
+ pwm.wantPeers[c] = peers | |
+ } | |
+ peers[p] = struct{}{} | |
+ return !ok | |
+} | |
+ | |
+// Remove the peer from the list of peers that have sent a want with the cid | |
+func (pwm *peerWantManager) reverseIndexRemove(c cid.Cid, p peer.ID) { | |
+ if peers, ok := pwm.wantPeers[c]; ok { | |
+ delete(peers, p) | |
+ if len(peers) == 0 { | |
+ delete(pwm.wantPeers, c) | |
+ } | |
+ } | |
+} | |
+ | |
+// GetWantBlocks returns the set of all want-blocks sent to all peers | |
+func (pwm *peerWantManager) getWantBlocks() []cid.Cid { | |
+ res := cid.NewSet() | |
+ | |
+ // Iterate over all known peers | |
+ for _, pws := range pwm.peerWants { | |
+ // Iterate over all want-blocks | |
+ _ = pws.wantBlocks.ForEach(func(c cid.Cid) error { | |
+ // Add the CID to the results | |
+ res.Add(c) | |
+ return nil | |
+ }) | |
+ } | |
+ | |
+ return res.Keys() | |
+} | |
+ | |
+// GetWantHaves returns the set of all want-haves sent to all peers | |
+func (pwm *peerWantManager) getWantHaves() []cid.Cid { | |
+ res := cid.NewSet() | |
+ | |
+ // Iterate over all peers with active wants. | |
+ for _, pws := range pwm.peerWants { | |
+ // Iterate over all want-haves | |
+ _ = pws.wantHaves.ForEach(func(c cid.Cid) error { | |
+ // Add the CID to the results | |
+ res.Add(c) | |
+ return nil | |
+ }) | |
+ } | |
+ _ = pwm.broadcastWants.ForEach(func(c cid.Cid) error { | |
+ res.Add(c) | |
+ return nil | |
+ }) | |
+ | |
+ return res.Keys() | |
+} | |
+ | |
+// GetWants returns the set of all wants (both want-blocks and want-haves). | |
+func (pwm *peerWantManager) getWants() []cid.Cid { | |
+ res := pwm.broadcastWants.Keys() | |
+ | |
+ // Iterate over all targeted wants, removing ones that are also in the | |
+ // broadcast list. | |
+ for c := range pwm.wantPeers { | |
+ if pwm.broadcastWants.Has(c) { | |
+ continue | |
+ } | |
+ res = append(res, c) | |
+ } | |
+ | |
+ return res | |
+} | |
+ | |
+func (pwm *peerWantManager) String() string { | |
+ var b bytes.Buffer | |
+ for p, ws := range pwm.peerWants { | |
+ b.WriteString(fmt.Sprintf("Peer %s: %d want-have / %d want-block:\n", p, ws.wantHaves.Len(), ws.wantBlocks.Len())) | |
+ for _, c := range ws.wantHaves.Keys() { | |
+ b.WriteString(fmt.Sprintf(" want-have %s\n", c)) | |
+ } | |
+ for _, c := range ws.wantBlocks.Keys() { | |
+ b.WriteString(fmt.Sprintf(" want-block %s\n", c)) | |
+ } | |
+ } | |
+ return b.String() | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager/providerquerymanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager/providerquerymanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager/providerquerymanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager/providerquerymanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,430 @@ | |
+package providerquerymanager | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ "sync" | |
+ "time" | |
+ | |
+ "github.com/ipfs/go-cid" | |
+ logging "github.com/ipfs/go-log" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+var log = logging.Logger("bitswap") | |
+ | |
+const ( | |
+ maxProviders = 10 | |
+ maxInProcessRequests = 6 | |
+ defaultTimeout = 10 * time.Second | |
+) | |
+ | |
+type inProgressRequestStatus struct { | |
+ ctx context.Context | |
+ cancelFn func() | |
+ providersSoFar []peer.ID | |
+ listeners map[chan peer.ID]struct{} | |
+} | |
+ | |
+type findProviderRequest struct { | |
+ k cid.Cid | |
+ ctx context.Context | |
+} | |
+ | |
+// ProviderQueryNetwork is an interface for finding providers and connecting to | |
+// peers. | |
+type ProviderQueryNetwork interface { | |
+ ConnectTo(context.Context, peer.ID) error | |
+ FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID | |
+} | |
+ | |
+type providerQueryMessage interface { | |
+ debugMessage() string | |
+ handle(pqm *ProviderQueryManager) | |
+} | |
+ | |
+type receivedProviderMessage struct { | |
+ ctx context.Context | |
+ k cid.Cid | |
+ p peer.ID | |
+} | |
+ | |
+type finishedProviderQueryMessage struct { | |
+ ctx context.Context | |
+ k cid.Cid | |
+} | |
+ | |
+type newProvideQueryMessage struct { | |
+ ctx context.Context | |
+ k cid.Cid | |
+ inProgressRequestChan chan<- inProgressRequest | |
+} | |
+ | |
+type cancelRequestMessage struct { | |
+ incomingProviders chan peer.ID | |
+ k cid.Cid | |
+} | |
+ | |
+// ProviderQueryManager manages requests to find more providers for blocks | |
+// for bitswap sessions. It's main goals are to: | |
+// - rate limit requests -- don't have too many find provider calls running | |
+// simultaneously | |
+// - connect to found peers and filter them if it can't connect | |
+// - ensure two findprovider calls for the same block don't run concurrently | |
+// - manage timeouts | |
+type ProviderQueryManager struct { | |
+ ctx context.Context | |
+ network ProviderQueryNetwork | |
+ providerQueryMessages chan providerQueryMessage | |
+ providerRequestsProcessing chan *findProviderRequest | |
+ incomingFindProviderRequests chan *findProviderRequest | |
+ | |
+ findProviderTimeout time.Duration | |
+ timeoutMutex sync.RWMutex | |
+ | |
+ // do not touch outside the run loop | |
+ inProgressRequestStatuses map[cid.Cid]*inProgressRequestStatus | |
+} | |
+ | |
+// New initializes a new ProviderQueryManager for a given context and a given | |
+// network provider. | |
+func New(ctx context.Context, network ProviderQueryNetwork) *ProviderQueryManager { | |
+ return &ProviderQueryManager{ | |
+ ctx: ctx, | |
+ network: network, | |
+ providerQueryMessages: make(chan providerQueryMessage, 16), | |
+ providerRequestsProcessing: make(chan *findProviderRequest), | |
+ incomingFindProviderRequests: make(chan *findProviderRequest), | |
+ inProgressRequestStatuses: make(map[cid.Cid]*inProgressRequestStatus), | |
+ findProviderTimeout: defaultTimeout, | |
+ } | |
+} | |
+ | |
+// Startup starts processing for the ProviderQueryManager. | |
+func (pqm *ProviderQueryManager) Startup() { | |
+ go pqm.run() | |
+} | |
+ | |
+type inProgressRequest struct { | |
+ providersSoFar []peer.ID | |
+ incoming chan peer.ID | |
+} | |
+ | |
+// SetFindProviderTimeout changes the timeout for finding providers | |
+func (pqm *ProviderQueryManager) SetFindProviderTimeout(findProviderTimeout time.Duration) { | |
+ pqm.timeoutMutex.Lock() | |
+ pqm.findProviderTimeout = findProviderTimeout | |
+ pqm.timeoutMutex.Unlock() | |
+} | |
+ | |
+// FindProvidersAsync finds providers for the given block. | |
+func (pqm *ProviderQueryManager) FindProvidersAsync(sessionCtx context.Context, k cid.Cid) <-chan peer.ID { | |
+ inProgressRequestChan := make(chan inProgressRequest) | |
+ | |
+ select { | |
+ case pqm.providerQueryMessages <- &newProvideQueryMessage{ | |
+ ctx: sessionCtx, | |
+ k: k, | |
+ inProgressRequestChan: inProgressRequestChan, | |
+ }: | |
+ case <-pqm.ctx.Done(): | |
+ ch := make(chan peer.ID) | |
+ close(ch) | |
+ return ch | |
+ case <-sessionCtx.Done(): | |
+ ch := make(chan peer.ID) | |
+ close(ch) | |
+ return ch | |
+ } | |
+ | |
+ // DO NOT select on sessionCtx. We only want to abort here if we're | |
+ // shutting down because we can't actually _cancel_ the request till we | |
+ // get to receiveProviders. | |
+ var receivedInProgressRequest inProgressRequest | |
+ select { | |
+ case <-pqm.ctx.Done(): | |
+ ch := make(chan peer.ID) | |
+ close(ch) | |
+ return ch | |
+ case receivedInProgressRequest = <-inProgressRequestChan: | |
+ } | |
+ | |
+ return pqm.receiveProviders(sessionCtx, k, receivedInProgressRequest) | |
+} | |
+ | |
+func (pqm *ProviderQueryManager) receiveProviders(sessionCtx context.Context, k cid.Cid, receivedInProgressRequest inProgressRequest) <-chan peer.ID { | |
+ // maintains an unbuffered queue for incoming providers for given request for a given session | |
+ // essentially, as a provider comes in, for a given CID, we want to immediately broadcast to all | |
+ // sessions that queried that CID, without worrying about whether the client code is actually | |
+ // reading from the returned channel -- so that the broadcast never blocks | |
+ // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd | |
+ returnedProviders := make(chan peer.ID) | |
+ receivedProviders := append([]peer.ID(nil), receivedInProgressRequest.providersSoFar[0:]...) | |
+ incomingProviders := receivedInProgressRequest.incoming | |
+ | |
+ go func() { | |
+ defer close(returnedProviders) | |
+ outgoingProviders := func() chan<- peer.ID { | |
+ if len(receivedProviders) == 0 { | |
+ return nil | |
+ } | |
+ return returnedProviders | |
+ } | |
+ nextProvider := func() peer.ID { | |
+ if len(receivedProviders) == 0 { | |
+ return "" | |
+ } | |
+ return receivedProviders[0] | |
+ } | |
+ for len(receivedProviders) > 0 || incomingProviders != nil { | |
+ select { | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ case <-sessionCtx.Done(): | |
+ if incomingProviders != nil { | |
+ pqm.cancelProviderRequest(k, incomingProviders) | |
+ } | |
+ return | |
+ case provider, ok := <-incomingProviders: | |
+ if !ok { | |
+ incomingProviders = nil | |
+ } else { | |
+ receivedProviders = append(receivedProviders, provider) | |
+ } | |
+ case outgoingProviders() <- nextProvider(): | |
+ receivedProviders = receivedProviders[1:] | |
+ } | |
+ } | |
+ }() | |
+ return returnedProviders | |
+} | |
+ | |
+func (pqm *ProviderQueryManager) cancelProviderRequest(k cid.Cid, incomingProviders chan peer.ID) { | |
+ cancelMessageChannel := pqm.providerQueryMessages | |
+ for { | |
+ select { | |
+ case cancelMessageChannel <- &cancelRequestMessage{ | |
+ incomingProviders: incomingProviders, | |
+ k: k, | |
+ }: | |
+ cancelMessageChannel = nil | |
+ // clear out any remaining providers, in case and "incoming provider" | |
+ // messages get processed before our cancel message | |
+ case _, ok := <-incomingProviders: | |
+ if !ok { | |
+ return | |
+ } | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (pqm *ProviderQueryManager) findProviderWorker() { | |
+ // findProviderWorker just cycles through incoming provider queries one | |
+ // at a time. We have six of these workers running at once | |
+ // to let requests go in parallel but keep them rate limited | |
+ for { | |
+ select { | |
+ case fpr, ok := <-pqm.providerRequestsProcessing: | |
+ if !ok { | |
+ return | |
+ } | |
+ k := fpr.k | |
+ log.Debugf("Beginning Find Provider Request for cid: %s", k.String()) | |
+ pqm.timeoutMutex.RLock() | |
+ findProviderCtx, cancel := context.WithTimeout(fpr.ctx, pqm.findProviderTimeout) | |
+ pqm.timeoutMutex.RUnlock() | |
+ providers := pqm.network.FindProvidersAsync(findProviderCtx, k, maxProviders) | |
+ wg := &sync.WaitGroup{} | |
+ for p := range providers { | |
+ wg.Add(1) | |
+ go func(p peer.ID) { | |
+ defer wg.Done() | |
+ err := pqm.network.ConnectTo(findProviderCtx, p) | |
+ if err != nil { | |
+ log.Debugf("failed to connect to provider %s: %s", p, err) | |
+ return | |
+ } | |
+ select { | |
+ case pqm.providerQueryMessages <- &receivedProviderMessage{ | |
+ ctx: findProviderCtx, | |
+ k: k, | |
+ p: p, | |
+ }: | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ }(p) | |
+ } | |
+ wg.Wait() | |
+ cancel() | |
+ select { | |
+ case pqm.providerQueryMessages <- &finishedProviderQueryMessage{ | |
+ ctx: findProviderCtx, | |
+ k: k, | |
+ }: | |
+ case <-pqm.ctx.Done(): | |
+ } | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (pqm *ProviderQueryManager) providerRequestBufferWorker() { | |
+ // the provider request buffer worker just maintains an unbounded | |
+ // buffer for incoming provider queries and dispatches to the find | |
+ // provider workers as they become available | |
+ // based on: https://medium.com/capital-one-tech/building-an-unbounded-channel-in-go-789e175cd2cd | |
+ var providerQueryRequestBuffer []*findProviderRequest | |
+ nextProviderQuery := func() *findProviderRequest { | |
+ if len(providerQueryRequestBuffer) == 0 { | |
+ return nil | |
+ } | |
+ return providerQueryRequestBuffer[0] | |
+ } | |
+ outgoingRequests := func() chan<- *findProviderRequest { | |
+ if len(providerQueryRequestBuffer) == 0 { | |
+ return nil | |
+ } | |
+ return pqm.providerRequestsProcessing | |
+ } | |
+ | |
+ for { | |
+ select { | |
+ case incomingRequest, ok := <-pqm.incomingFindProviderRequests: | |
+ if !ok { | |
+ return | |
+ } | |
+ providerQueryRequestBuffer = append(providerQueryRequestBuffer, incomingRequest) | |
+ case outgoingRequests() <- nextProviderQuery(): | |
+ providerQueryRequestBuffer = providerQueryRequestBuffer[1:] | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (pqm *ProviderQueryManager) cleanupInProcessRequests() { | |
+ for _, requestStatus := range pqm.inProgressRequestStatuses { | |
+ for listener := range requestStatus.listeners { | |
+ close(listener) | |
+ } | |
+ requestStatus.cancelFn() | |
+ } | |
+} | |
+ | |
+func (pqm *ProviderQueryManager) run() { | |
+ defer pqm.cleanupInProcessRequests() | |
+ | |
+ go pqm.providerRequestBufferWorker() | |
+ for i := 0; i < maxInProcessRequests; i++ { | |
+ go pqm.findProviderWorker() | |
+ } | |
+ | |
+ for { | |
+ select { | |
+ case nextMessage := <-pqm.providerQueryMessages: | |
+ log.Debug(nextMessage.debugMessage()) | |
+ nextMessage.handle(pqm) | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (rpm *receivedProviderMessage) debugMessage() string { | |
+ return fmt.Sprintf("Received provider (%s) for cid (%s)", rpm.p.String(), rpm.k.String()) | |
+} | |
+ | |
+func (rpm *receivedProviderMessage) handle(pqm *ProviderQueryManager) { | |
+ requestStatus, ok := pqm.inProgressRequestStatuses[rpm.k] | |
+ if !ok { | |
+ log.Errorf("Received provider (%s) for cid (%s) not requested", rpm.p.String(), rpm.k.String()) | |
+ return | |
+ } | |
+ requestStatus.providersSoFar = append(requestStatus.providersSoFar, rpm.p) | |
+ for listener := range requestStatus.listeners { | |
+ select { | |
+ case listener <- rpm.p: | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (fpqm *finishedProviderQueryMessage) debugMessage() string { | |
+ return fmt.Sprintf("Finished Provider Query on cid: %s", fpqm.k.String()) | |
+} | |
+ | |
+func (fpqm *finishedProviderQueryMessage) handle(pqm *ProviderQueryManager) { | |
+ requestStatus, ok := pqm.inProgressRequestStatuses[fpqm.k] | |
+ if !ok { | |
+ // we canceled the request as it finished. | |
+ return | |
+ } | |
+ for listener := range requestStatus.listeners { | |
+ close(listener) | |
+ } | |
+ delete(pqm.inProgressRequestStatuses, fpqm.k) | |
+ requestStatus.cancelFn() | |
+} | |
+ | |
+func (npqm *newProvideQueryMessage) debugMessage() string { | |
+ return fmt.Sprintf("New Provider Query on cid: %s", npqm.k.String()) | |
+} | |
+ | |
+func (npqm *newProvideQueryMessage) handle(pqm *ProviderQueryManager) { | |
+ requestStatus, ok := pqm.inProgressRequestStatuses[npqm.k] | |
+ if !ok { | |
+ | |
+ ctx, cancelFn := context.WithCancel(pqm.ctx) | |
+ requestStatus = &inProgressRequestStatus{ | |
+ listeners: make(map[chan peer.ID]struct{}), | |
+ ctx: ctx, | |
+ cancelFn: cancelFn, | |
+ } | |
+ pqm.inProgressRequestStatuses[npqm.k] = requestStatus | |
+ select { | |
+ case pqm.incomingFindProviderRequests <- &findProviderRequest{ | |
+ k: npqm.k, | |
+ ctx: ctx, | |
+ }: | |
+ case <-pqm.ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+ inProgressChan := make(chan peer.ID) | |
+ requestStatus.listeners[inProgressChan] = struct{}{} | |
+ select { | |
+ case npqm.inProgressRequestChan <- inProgressRequest{ | |
+ providersSoFar: requestStatus.providersSoFar, | |
+ incoming: inProgressChan, | |
+ }: | |
+ case <-pqm.ctx.Done(): | |
+ } | |
+} | |
+ | |
+func (crm *cancelRequestMessage) debugMessage() string { | |
+ return fmt.Sprintf("Cancel provider query on cid: %s", crm.k.String()) | |
+} | |
+ | |
+func (crm *cancelRequestMessage) handle(pqm *ProviderQueryManager) { | |
+ requestStatus, ok := pqm.inProgressRequestStatuses[crm.k] | |
+ if !ok { | |
+ // Request finished while queued. | |
+ return | |
+ } | |
+ _, ok = requestStatus.listeners[crm.incomingProviders] | |
+ if !ok { | |
+ // Request finished and _restarted_ while queued. | |
+ return | |
+ } | |
+ delete(requestStatus.listeners, crm.incomingProviders) | |
+ close(crm.incomingProviders) | |
+ if len(requestStatus.listeners) == 0 { | |
+ delete(pqm.inProgressRequestStatuses, crm.k) | |
+ requestStatus.cancelFn() | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/cidqueue.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/cidqueue.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/cidqueue.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/cidqueue.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,63 @@ | |
+package session | |
+ | |
+import cid "github.com/ipfs/go-cid" | |
+ | |
+type cidQueue struct { | |
+ elems []cid.Cid | |
+ eset *cid.Set | |
+} | |
+ | |
+func newCidQueue() *cidQueue { | |
+ return &cidQueue{eset: cid.NewSet()} | |
+} | |
+ | |
+func (cq *cidQueue) Pop() cid.Cid { | |
+ for { | |
+ if len(cq.elems) == 0 { | |
+ return cid.Cid{} | |
+ } | |
+ | |
+ out := cq.elems[0] | |
+ cq.elems = cq.elems[1:] | |
+ | |
+ if cq.eset.Has(out) { | |
+ cq.eset.Remove(out) | |
+ return out | |
+ } | |
+ } | |
+} | |
+ | |
+func (cq *cidQueue) Cids() []cid.Cid { | |
+ // Lazily delete from the list any cids that were removed from the set | |
+ if len(cq.elems) > cq.eset.Len() { | |
+ i := 0 | |
+ for _, c := range cq.elems { | |
+ if cq.eset.Has(c) { | |
+ cq.elems[i] = c | |
+ i++ | |
+ } | |
+ } | |
+ cq.elems = cq.elems[:i] | |
+ } | |
+ | |
+ // Make a copy of the cids | |
+ return append([]cid.Cid{}, cq.elems...) | |
+} | |
+ | |
+func (cq *cidQueue) Push(c cid.Cid) { | |
+ if cq.eset.Visit(c) { | |
+ cq.elems = append(cq.elems, c) | |
+ } | |
+} | |
+ | |
+func (cq *cidQueue) Remove(c cid.Cid) { | |
+ cq.eset.Remove(c) | |
+} | |
+ | |
+func (cq *cidQueue) Has(c cid.Cid) bool { | |
+ return cq.eset.Has(c) | |
+} | |
+ | |
+func (cq *cidQueue) Len() int { | |
+ return cq.eset.Len() | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/peerresponsetracker.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/peerresponsetracker.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/peerresponsetracker.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/peerresponsetracker.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,70 @@ | |
+package session | |
+ | |
+import ( | |
+ "math/rand" | |
+ | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// peerResponseTracker keeps track of how many times each peer was the first | |
+// to send us a block for a given CID (used to rank peers) | |
+type peerResponseTracker struct { | |
+ firstResponder map[peer.ID]int | |
+} | |
+ | |
+func newPeerResponseTracker() *peerResponseTracker { | |
+ return &peerResponseTracker{ | |
+ firstResponder: make(map[peer.ID]int), | |
+ } | |
+} | |
+ | |
+// receivedBlockFrom is called when a block is received from a peer | |
+// (only called first time block is received) | |
+func (prt *peerResponseTracker) receivedBlockFrom(from peer.ID) { | |
+ prt.firstResponder[from]++ | |
+} | |
+ | |
+// choose picks a peer from the list of candidate peers, favouring those peers | |
+// that were first to send us previous blocks | |
+func (prt *peerResponseTracker) choose(peers []peer.ID) peer.ID { | |
+ if len(peers) == 0 { | |
+ return "" | |
+ } | |
+ | |
+ rnd := rand.Float64() | |
+ | |
+ // Find the total received blocks for all candidate peers | |
+ total := 0 | |
+ for _, p := range peers { | |
+ total += prt.getPeerCount(p) | |
+ } | |
+ | |
+ // Choose one of the peers with a chance proportional to the number | |
+ // of blocks received from that peer | |
+ counted := 0.0 | |
+ for _, p := range peers { | |
+ counted += float64(prt.getPeerCount(p)) / float64(total) | |
+ if counted > rnd { | |
+ return p | |
+ } | |
+ } | |
+ | |
+ // We shouldn't get here unless there is some weirdness with floating point | |
+ // math that doesn't quite cover the whole range of peers in the for loop | |
+ // so just choose the last peer. | |
+ index := len(peers) - 1 | |
+ return peers[index] | |
+} | |
+ | |
+// getPeerCount returns the number of times the peer was first to send us a | |
+// block | |
+func (prt *peerResponseTracker) getPeerCount(p peer.ID) int { | |
+ count, ok := prt.firstResponder[p] | |
+ if ok { | |
+ return count | |
+ } | |
+ | |
+ // Make sure there is always at least a small chance a new peer | |
+ // will be chosen | |
+ return 1 | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sentwantblockstracker.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sentwantblockstracker.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sentwantblockstracker.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sentwantblockstracker.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,33 @@ | |
+package session | |
+ | |
+import ( | |
+ cid "github.com/ipfs/go-cid" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// sentWantBlocksTracker keeps track of which peers we've sent a want-block to | |
+type sentWantBlocksTracker struct { | |
+ sentWantBlocks map[peer.ID]map[cid.Cid]struct{} | |
+} | |
+ | |
+func newSentWantBlocksTracker() *sentWantBlocksTracker { | |
+ return &sentWantBlocksTracker{ | |
+ sentWantBlocks: make(map[peer.ID]map[cid.Cid]struct{}), | |
+ } | |
+} | |
+ | |
+func (s *sentWantBlocksTracker) addSentWantBlocksTo(p peer.ID, ks []cid.Cid) { | |
+ cids, ok := s.sentWantBlocks[p] | |
+ if !ok { | |
+ cids = make(map[cid.Cid]struct{}, len(ks)) | |
+ s.sentWantBlocks[p] = cids | |
+ } | |
+ for _, c := range ks { | |
+ cids[c] = struct{}{} | |
+ } | |
+} | |
+ | |
+func (s *sentWantBlocksTracker) haveSentWantBlockTo(p peer.ID, c cid.Cid) bool { | |
+ _, ok := s.sentWantBlocks[p][c] | |
+ return ok | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/session.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/session.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/session.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/session.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,508 @@ | |
+package session | |
+ | |
+import ( | |
+ "context" | |
+ "time" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ delay "github.com/ipfs/go-ipfs-delay" | |
+ "github.com/ipfs/go-libipfs/bitswap/client/internal" | |
+ bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" | |
+ bsgetter "github.com/ipfs/go-libipfs/bitswap/client/internal/getter" | |
+ notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" | |
+ bspm "github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager" | |
+ bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ logging "github.com/ipfs/go-log" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+ "go.uber.org/zap" | |
+) | |
+ | |
+var log = logging.Logger("bs:sess") | |
+var sflog = log.Desugar() | |
+ | |
+const ( | |
+ broadcastLiveWantsLimit = 64 | |
+) | |
+ | |
+// PeerManager keeps track of which sessions are interested in which peers | |
+// and takes care of sending wants for the sessions | |
+type PeerManager interface { | |
+ // RegisterSession tells the PeerManager that the session is interested | |
+ // in a peer's connection state | |
+ RegisterSession(peer.ID, bspm.Session) | |
+ // UnregisterSession tells the PeerManager that the session is no longer | |
+ // interested in a peer's connection state | |
+ UnregisterSession(uint64) | |
+ // SendWants tells the PeerManager to send wants to the given peer | |
+ SendWants(ctx context.Context, peerId peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) | |
+ // BroadcastWantHaves sends want-haves to all connected peers (used for | |
+ // session discovery) | |
+ BroadcastWantHaves(context.Context, []cid.Cid) | |
+ // SendCancels tells the PeerManager to send cancels to all peers | |
+ SendCancels(context.Context, []cid.Cid) | |
+} | |
+ | |
+// SessionManager manages all the sessions | |
+type SessionManager interface { | |
+ // Remove a session (called when the session shuts down) | |
+ RemoveSession(sesid uint64) | |
+ // Cancel wants (called when a call to GetBlocks() is cancelled) | |
+ CancelSessionWants(sid uint64, wants []cid.Cid) | |
+} | |
+ | |
+// SessionPeerManager keeps track of peers in the session | |
+type SessionPeerManager interface { | |
+ // PeersDiscovered indicates if any peers have been discovered yet | |
+ PeersDiscovered() bool | |
+ // Shutdown the SessionPeerManager | |
+ Shutdown() | |
+ // Adds a peer to the session, returning true if the peer is new | |
+ AddPeer(peer.ID) bool | |
+ // Removes a peer from the session, returning true if the peer existed | |
+ RemovePeer(peer.ID) bool | |
+ // All peers in the session | |
+ Peers() []peer.ID | |
+ // Whether there are any peers in the session | |
+ HasPeers() bool | |
+ // Protect connection from being pruned by the connection manager | |
+ ProtectConnection(peer.ID) | |
+} | |
+ | |
+// ProviderFinder is used to find providers for a given key | |
+type ProviderFinder interface { | |
+ // FindProvidersAsync searches for peers that provide the given CID | |
+ FindProvidersAsync(ctx context.Context, k cid.Cid) <-chan peer.ID | |
+} | |
+ | |
+// opType is the kind of operation that is being processed by the event loop | |
+type opType int | |
+ | |
+const ( | |
+ // Receive blocks | |
+ opReceive opType = iota | |
+ // Want blocks | |
+ opWant | |
+ // Cancel wants | |
+ opCancel | |
+ // Broadcast want-haves | |
+ opBroadcast | |
+ // Wants sent to peers | |
+ opWantsSent | |
+) | |
+ | |
+type op struct { | |
+ op opType | |
+ keys []cid.Cid | |
+} | |
+ | |
+// Session holds state for an individual bitswap transfer operation. | |
+// This allows bitswap to make smarter decisions about who to send wantlist | |
+// info to, and who to request blocks from. | |
+type Session struct { | |
+ // dependencies | |
+ ctx context.Context | |
+ shutdown func() | |
+ sm SessionManager | |
+ pm PeerManager | |
+ sprm SessionPeerManager | |
+ providerFinder ProviderFinder | |
+ sim *bssim.SessionInterestManager | |
+ | |
+ sw sessionWants | |
+ sws sessionWantSender | |
+ | |
+ latencyTrkr latencyTracker | |
+ | |
+ // channels | |
+ incoming chan op | |
+ tickDelayReqs chan time.Duration | |
+ | |
+ // do not touch outside run loop | |
+ idleTick *time.Timer | |
+ periodicSearchTimer *time.Timer | |
+ baseTickDelay time.Duration | |
+ consecutiveTicks int | |
+ initialSearchDelay time.Duration | |
+ periodicSearchDelay delay.D | |
+ // identifiers | |
+ notif notifications.PubSub | |
+ id uint64 | |
+ | |
+ self peer.ID | |
+} | |
+ | |
+// New creates a new bitswap session whose lifetime is bounded by the | |
+// given context. | |
+func New( | |
+ ctx context.Context, | |
+ sm SessionManager, | |
+ id uint64, | |
+ sprm SessionPeerManager, | |
+ providerFinder ProviderFinder, | |
+ sim *bssim.SessionInterestManager, | |
+ pm PeerManager, | |
+ bpm *bsbpm.BlockPresenceManager, | |
+ notif notifications.PubSub, | |
+ initialSearchDelay time.Duration, | |
+ periodicSearchDelay delay.D, | |
+ self peer.ID) *Session { | |
+ | |
+ ctx, cancel := context.WithCancel(ctx) | |
+ s := &Session{ | |
+ sw: newSessionWants(broadcastLiveWantsLimit), | |
+ tickDelayReqs: make(chan time.Duration), | |
+ ctx: ctx, | |
+ shutdown: cancel, | |
+ sm: sm, | |
+ pm: pm, | |
+ sprm: sprm, | |
+ providerFinder: providerFinder, | |
+ sim: sim, | |
+ incoming: make(chan op, 128), | |
+ latencyTrkr: latencyTracker{}, | |
+ notif: notif, | |
+ baseTickDelay: time.Millisecond * 500, | |
+ id: id, | |
+ initialSearchDelay: initialSearchDelay, | |
+ periodicSearchDelay: periodicSearchDelay, | |
+ self: self, | |
+ } | |
+ s.sws = newSessionWantSender(id, pm, sprm, sm, bpm, s.onWantsSent, s.onPeersExhausted) | |
+ | |
+ go s.run(ctx) | |
+ | |
+ return s | |
+} | |
+ | |
+func (s *Session) ID() uint64 { | |
+ return s.id | |
+} | |
+ | |
+func (s *Session) Shutdown() { | |
+ s.shutdown() | |
+} | |
+ | |
+// ReceiveFrom receives incoming blocks from the given peer. | |
+func (s *Session) ReceiveFrom(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
+ // The SessionManager tells each Session about all keys that it may be | |
+ // interested in. Here the Session filters the keys to the ones that this | |
+ // particular Session is interested in. | |
+ interestedRes := s.sim.FilterSessionInterested(s.id, ks, haves, dontHaves) | |
+ ks = interestedRes[0] | |
+ haves = interestedRes[1] | |
+ dontHaves = interestedRes[2] | |
+ s.logReceiveFrom(from, ks, haves, dontHaves) | |
+ | |
+ // Inform the session want sender that a message has been received | |
+ s.sws.Update(from, ks, haves, dontHaves) | |
+ | |
+ if len(ks) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Inform the session that blocks have been received | |
+ select { | |
+ case s.incoming <- op{op: opReceive, keys: ks}: | |
+ case <-s.ctx.Done(): | |
+ } | |
+} | |
+ | |
+func (s *Session) logReceiveFrom(from peer.ID, interestedKs []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
+ // Save some CPU cycles if log level is higher than debug | |
+ if ce := sflog.Check(zap.DebugLevel, "Bitswap <- rcv message"); ce == nil { | |
+ return | |
+ } | |
+ | |
+ for _, c := range interestedKs { | |
+ log.Debugw("Bitswap <- block", "local", s.self, "from", from, "cid", c, "session", s.id) | |
+ } | |
+ for _, c := range haves { | |
+ log.Debugw("Bitswap <- HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) | |
+ } | |
+ for _, c := range dontHaves { | |
+ log.Debugw("Bitswap <- DONT_HAVE", "local", s.self, "from", from, "cid", c, "session", s.id) | |
+ } | |
+} | |
+ | |
+// GetBlock fetches a single block. | |
+func (s *Session) GetBlock(ctx context.Context, k cid.Cid) (blocks.Block, error) { | |
+ ctx, span := internal.StartSpan(ctx, "Session.GetBlock") | |
+ defer span.End() | |
+ return bsgetter.SyncGetBlock(ctx, k, s.GetBlocks) | |
+} | |
+ | |
+// GetBlocks fetches a set of blocks within the context of this session and | |
+// returns a channel that found blocks will be returned on. No order is | |
+// guaranteed on the returned blocks. | |
+func (s *Session) GetBlocks(ctx context.Context, keys []cid.Cid) (<-chan blocks.Block, error) { | |
+ ctx, span := internal.StartSpan(ctx, "Session.GetBlocks") | |
+ defer span.End() | |
+ | |
+ return bsgetter.AsyncGetBlocks(ctx, s.ctx, keys, s.notif, | |
+ func(ctx context.Context, keys []cid.Cid) { | |
+ select { | |
+ case s.incoming <- op{op: opWant, keys: keys}: | |
+ case <-ctx.Done(): | |
+ case <-s.ctx.Done(): | |
+ } | |
+ }, | |
+ func(keys []cid.Cid) { | |
+ select { | |
+ case s.incoming <- op{op: opCancel, keys: keys}: | |
+ case <-s.ctx.Done(): | |
+ } | |
+ }, | |
+ ) | |
+} | |
+ | |
+// SetBaseTickDelay changes the rate at which ticks happen. | |
+func (s *Session) SetBaseTickDelay(baseTickDelay time.Duration) { | |
+ select { | |
+ case s.tickDelayReqs <- baseTickDelay: | |
+ case <-s.ctx.Done(): | |
+ } | |
+} | |
+ | |
+// onWantsSent is called when wants are sent to a peer by the session wants sender | |
+func (s *Session) onWantsSent(p peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) { | |
+ allBlks := append(wantBlocks[:len(wantBlocks):len(wantBlocks)], wantHaves...) | |
+ s.nonBlockingEnqueue(op{op: opWantsSent, keys: allBlks}) | |
+} | |
+ | |
+// onPeersExhausted is called when all available peers have sent DONT_HAVE for | |
+// a set of cids (or all peers become unavailable) | |
+func (s *Session) onPeersExhausted(ks []cid.Cid) { | |
+ s.nonBlockingEnqueue(op{op: opBroadcast, keys: ks}) | |
+} | |
+ | |
+// We don't want to block the sessionWantSender if the incoming channel | |
+// is full. So if we can't immediately send on the incoming channel spin | |
+// it off into a go-routine. | |
+func (s *Session) nonBlockingEnqueue(o op) { | |
+ select { | |
+ case s.incoming <- o: | |
+ default: | |
+ go func() { | |
+ select { | |
+ case s.incoming <- o: | |
+ case <-s.ctx.Done(): | |
+ } | |
+ }() | |
+ } | |
+} | |
+ | |
+// Session run loop -- everything in this function should not be called | |
+// outside of this loop | |
+func (s *Session) run(ctx context.Context) { | |
+ go s.sws.Run() | |
+ | |
+ s.idleTick = time.NewTimer(s.initialSearchDelay) | |
+ s.periodicSearchTimer = time.NewTimer(s.periodicSearchDelay.NextWaitTime()) | |
+ for { | |
+ select { | |
+ case oper := <-s.incoming: | |
+ switch oper.op { | |
+ case opReceive: | |
+ // Received blocks | |
+ s.handleReceive(oper.keys) | |
+ case opWant: | |
+ // Client wants blocks | |
+ s.wantBlocks(ctx, oper.keys) | |
+ case opCancel: | |
+ // Wants were cancelled | |
+ s.sw.CancelPending(oper.keys) | |
+ s.sws.Cancel(oper.keys) | |
+ case opWantsSent: | |
+ // Wants were sent to a peer | |
+ s.sw.WantsSent(oper.keys) | |
+ case opBroadcast: | |
+ // Broadcast want-haves to all peers | |
+ s.broadcast(ctx, oper.keys) | |
+ default: | |
+ panic("unhandled operation") | |
+ } | |
+ case <-s.idleTick.C: | |
+ // The session hasn't received blocks for a while, broadcast | |
+ s.broadcast(ctx, nil) | |
+ case <-s.periodicSearchTimer.C: | |
+ // Periodically search for a random live want | |
+ s.handlePeriodicSearch(ctx) | |
+ case baseTickDelay := <-s.tickDelayReqs: | |
+ // Set the base tick delay | |
+ s.baseTickDelay = baseTickDelay | |
+ case <-ctx.Done(): | |
+ // Shutdown | |
+ s.handleShutdown() | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+// Called when the session hasn't received any blocks for some time, or when | |
+// all peers in the session have sent DONT_HAVE for a particular set of CIDs. | |
+// Send want-haves to all connected peers, and search for new peers with the CID. | |
+func (s *Session) broadcast(ctx context.Context, wants []cid.Cid) { | |
+ // If this broadcast is because of an idle timeout (we haven't received | |
+ // any blocks for a while) then broadcast all pending wants | |
+ if wants == nil { | |
+ wants = s.sw.PrepareBroadcast() | |
+ } | |
+ | |
+ // Broadcast a want-have for the live wants to everyone we're connected to | |
+ s.broadcastWantHaves(ctx, wants) | |
+ | |
+ // do not find providers on consecutive ticks | |
+ // -- just rely on periodic search widening | |
+ if len(wants) > 0 && (s.consecutiveTicks == 0) { | |
+ // Search for providers who have the first want in the list. | |
+ // Typically if the provider has the first block they will have | |
+ // the rest of the blocks also. | |
+ log.Debugw("FindMorePeers", "session", s.id, "cid", wants[0], "pending", len(wants)) | |
+ s.findMorePeers(ctx, wants[0]) | |
+ } | |
+ s.resetIdleTick() | |
+ | |
+ // If we have live wants record a consecutive tick | |
+ if s.sw.HasLiveWants() { | |
+ s.consecutiveTicks++ | |
+ } | |
+} | |
+ | |
+// handlePeriodicSearch is called periodically to search for providers of a | |
+// randomly chosen CID in the sesssion. | |
+func (s *Session) handlePeriodicSearch(ctx context.Context) { | |
+ randomWant := s.sw.RandomLiveWant() | |
+ if !randomWant.Defined() { | |
+ return | |
+ } | |
+ | |
+ // TODO: come up with a better strategy for determining when to search | |
+ // for new providers for blocks. | |
+ s.findMorePeers(ctx, randomWant) | |
+ | |
+ s.broadcastWantHaves(ctx, []cid.Cid{randomWant}) | |
+ | |
+ s.periodicSearchTimer.Reset(s.periodicSearchDelay.NextWaitTime()) | |
+} | |
+ | |
+// findMorePeers attempts to find more peers for a session by searching for | |
+// providers for the given Cid | |
+func (s *Session) findMorePeers(ctx context.Context, c cid.Cid) { | |
+ go func(k cid.Cid) { | |
+ for p := range s.providerFinder.FindProvidersAsync(ctx, k) { | |
+ // When a provider indicates that it has a cid, it's equivalent to | |
+ // the providing peer sending a HAVE | |
+ s.sws.Update(p, nil, []cid.Cid{c}, nil) | |
+ } | |
+ }(c) | |
+} | |
+ | |
+// handleShutdown is called when the session shuts down | |
+func (s *Session) handleShutdown() { | |
+ // Stop the idle timer | |
+ s.idleTick.Stop() | |
+ // Shut down the session peer manager | |
+ s.sprm.Shutdown() | |
+ // Shut down the sessionWantSender (blocks until sessionWantSender stops | |
+ // sending) | |
+ s.sws.Shutdown() | |
+ // Signal to the SessionManager that the session has been shutdown | |
+ // and can be cleaned up | |
+ s.sm.RemoveSession(s.id) | |
+} | |
+ | |
+// handleReceive is called when the session receives blocks from a peer | |
+func (s *Session) handleReceive(ks []cid.Cid) { | |
+ // Record which blocks have been received and figure out the total latency | |
+ // for fetching the blocks | |
+ wanted, totalLatency := s.sw.BlocksReceived(ks) | |
+ if len(wanted) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Record latency | |
+ s.latencyTrkr.receiveUpdate(len(wanted), totalLatency) | |
+ | |
+ // Inform the SessionInterestManager that this session is no longer | |
+ // expecting to receive the wanted keys | |
+ s.sim.RemoveSessionWants(s.id, wanted) | |
+ | |
+ s.idleTick.Stop() | |
+ | |
+ // We've received new wanted blocks, so reset the number of ticks | |
+ // that have occurred since the last new block | |
+ s.consecutiveTicks = 0 | |
+ | |
+ s.resetIdleTick() | |
+} | |
+ | |
+// wantBlocks is called when blocks are requested by the client | |
+func (s *Session) wantBlocks(ctx context.Context, newks []cid.Cid) { | |
+ if len(newks) > 0 { | |
+ // Inform the SessionInterestManager that this session is interested in the keys | |
+ s.sim.RecordSessionInterest(s.id, newks) | |
+ // Tell the sessionWants tracker that that the wants have been requested | |
+ s.sw.BlocksRequested(newks) | |
+ // Tell the sessionWantSender that the blocks have been requested | |
+ s.sws.Add(newks) | |
+ } | |
+ | |
+ // If we have discovered peers already, the sessionWantSender will | |
+ // send wants to them | |
+ if s.sprm.PeersDiscovered() { | |
+ return | |
+ } | |
+ | |
+ // No peers discovered yet, broadcast some want-haves | |
+ ks := s.sw.GetNextWants() | |
+ if len(ks) > 0 { | |
+ log.Infow("No peers - broadcasting", "session", s.id, "want-count", len(ks)) | |
+ s.broadcastWantHaves(ctx, ks) | |
+ } | |
+} | |
+ | |
+// Send want-haves to all connected peers | |
+func (s *Session) broadcastWantHaves(ctx context.Context, wants []cid.Cid) { | |
+ log.Debugw("broadcastWantHaves", "session", s.id, "cids", wants) | |
+ s.pm.BroadcastWantHaves(ctx, wants) | |
+} | |
+ | |
+// The session will broadcast if it has outstanding wants and doesn't receive | |
+// any blocks for some time. | |
+// The length of time is calculated | |
+// - initially | |
+// as a fixed delay | |
+// - once some blocks are received | |
+// from a base delay and average latency, with a backoff | |
+func (s *Session) resetIdleTick() { | |
+ var tickDelay time.Duration | |
+ if !s.latencyTrkr.hasLatency() { | |
+ tickDelay = s.initialSearchDelay | |
+ } else { | |
+ avLat := s.latencyTrkr.averageLatency() | |
+ tickDelay = s.baseTickDelay + (3 * avLat) | |
+ } | |
+ tickDelay = tickDelay * time.Duration(1+s.consecutiveTicks) | |
+ s.idleTick.Reset(tickDelay) | |
+} | |
+ | |
+// latencyTracker keeps track of the average latency between sending a want | |
+// and receiving the corresponding block | |
+type latencyTracker struct { | |
+ totalLatency time.Duration | |
+ count int | |
+} | |
+ | |
+func (lt *latencyTracker) hasLatency() bool { | |
+ return lt.totalLatency > 0 && lt.count > 0 | |
+} | |
+ | |
+func (lt *latencyTracker) averageLatency() time.Duration { | |
+ return lt.totalLatency / time.Duration(lt.count) | |
+} | |
+ | |
+func (lt *latencyTracker) receiveUpdate(count int, totalLatency time.Duration) { | |
+ lt.totalLatency += totalLatency | |
+ lt.count += count | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwantsender.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwantsender.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwantsender.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwantsender.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,766 @@ | |
+package session | |
+ | |
+import ( | |
+ "context" | |
+ | |
+ bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+const ( | |
+ // Maximum number of changes to accept before blocking | |
+ changesBufferSize = 128 | |
+ // If the session receives this many DONT_HAVEs in a row from a peer, | |
+ // it prunes the peer from the session | |
+ peerDontHaveLimit = 16 | |
+) | |
+ | |
+// BlockPresence indicates whether a peer has a block. | |
+// Note that the order is important, we decide which peer to send a want to | |
+// based on knowing whether peer has the block. eg we're more likely to send | |
+// a want to a peer that has the block than a peer that doesnt have the block | |
+// so BPHave > BPDontHave | |
+type BlockPresence int | |
+ | |
+const ( | |
+ BPDontHave BlockPresence = iota | |
+ BPUnknown | |
+ BPHave | |
+) | |
+ | |
+// SessionWantsCanceller provides a method to cancel wants | |
+type SessionWantsCanceller interface { | |
+ // Cancel wants for this session | |
+ CancelSessionWants(sid uint64, wants []cid.Cid) | |
+} | |
+ | |
+// update encapsulates a message received by the session | |
+type update struct { | |
+ // Which peer sent the update | |
+ from peer.ID | |
+ // cids of blocks received | |
+ ks []cid.Cid | |
+ // HAVE message | |
+ haves []cid.Cid | |
+ // DONT_HAVE message | |
+ dontHaves []cid.Cid | |
+} | |
+ | |
+// peerAvailability indicates a peer's connection state | |
+type peerAvailability struct { | |
+ target peer.ID | |
+ available bool | |
+} | |
+ | |
+// change can be new wants, a new message received by the session, | |
+// or a change in the connect status of a peer | |
+type change struct { | |
+ // new wants requested | |
+ add []cid.Cid | |
+ // wants cancelled | |
+ cancel []cid.Cid | |
+ // new message received by session (blocks / HAVEs / DONT_HAVEs) | |
+ update update | |
+ // peer has connected / disconnected | |
+ availability peerAvailability | |
+} | |
+ | |
+type onSendFn func(to peer.ID, wantBlocks []cid.Cid, wantHaves []cid.Cid) | |
+type onPeersExhaustedFn func([]cid.Cid) | |
+ | |
+// sessionWantSender is responsible for sending want-have and want-block to | |
+// peers. For each want, it sends a single optimistic want-block request to | |
+// one peer and want-have requests to all other peers in the session. | |
+// To choose the best peer for the optimistic want-block it maintains a list | |
+// of how peers have responded to each want (HAVE / DONT_HAVE / Unknown) and | |
+// consults the peer response tracker (records which peers sent us blocks). | |
+type sessionWantSender struct { | |
+ // The context is used when sending wants | |
+ ctx context.Context | |
+ // Called to shutdown the sessionWantSender | |
+ shutdown func() | |
+ // The sessionWantSender uses the closed channel to signal when it's | |
+ // finished shutting down | |
+ closed chan struct{} | |
+ // The session ID | |
+ sessionID uint64 | |
+ // A channel that collects incoming changes (events) | |
+ changes chan change | |
+ // Information about each want indexed by CID | |
+ wants map[cid.Cid]*wantInfo | |
+ // Keeps track of how many consecutive DONT_HAVEs a peer has sent | |
+ peerConsecutiveDontHaves map[peer.ID]int | |
+ // Tracks which peers we have send want-block to | |
+ swbt *sentWantBlocksTracker | |
+ // Tracks the number of blocks each peer sent us | |
+ peerRspTrkr *peerResponseTracker | |
+ // Sends wants to peers | |
+ pm PeerManager | |
+ // Keeps track of peers in the session | |
+ spm SessionPeerManager | |
+ // Cancels wants | |
+ canceller SessionWantsCanceller | |
+ // Keeps track of which peer has / doesn't have a block | |
+ bpm *bsbpm.BlockPresenceManager | |
+ // Called when wants are sent | |
+ onSend onSendFn | |
+ // Called when all peers explicitly don't have a block | |
+ onPeersExhausted onPeersExhaustedFn | |
+} | |
+ | |
+func newSessionWantSender(sid uint64, pm PeerManager, spm SessionPeerManager, canceller SessionWantsCanceller, | |
+ bpm *bsbpm.BlockPresenceManager, onSend onSendFn, onPeersExhausted onPeersExhaustedFn) sessionWantSender { | |
+ | |
+ ctx, cancel := context.WithCancel(context.Background()) | |
+ sws := sessionWantSender{ | |
+ ctx: ctx, | |
+ shutdown: cancel, | |
+ closed: make(chan struct{}), | |
+ sessionID: sid, | |
+ changes: make(chan change, changesBufferSize), | |
+ wants: make(map[cid.Cid]*wantInfo), | |
+ peerConsecutiveDontHaves: make(map[peer.ID]int), | |
+ swbt: newSentWantBlocksTracker(), | |
+ peerRspTrkr: newPeerResponseTracker(), | |
+ | |
+ pm: pm, | |
+ spm: spm, | |
+ canceller: canceller, | |
+ bpm: bpm, | |
+ onSend: onSend, | |
+ onPeersExhausted: onPeersExhausted, | |
+ } | |
+ | |
+ return sws | |
+} | |
+ | |
+func (sws *sessionWantSender) ID() uint64 { | |
+ return sws.sessionID | |
+} | |
+ | |
+// Add is called when new wants are added to the session | |
+func (sws *sessionWantSender) Add(ks []cid.Cid) { | |
+ if len(ks) == 0 { | |
+ return | |
+ } | |
+ sws.addChange(change{add: ks}) | |
+} | |
+ | |
+// Cancel is called when a request is cancelled | |
+func (sws *sessionWantSender) Cancel(ks []cid.Cid) { | |
+ if len(ks) == 0 { | |
+ return | |
+ } | |
+ sws.addChange(change{cancel: ks}) | |
+} | |
+ | |
+// Update is called when the session receives a message with incoming blocks | |
+// or HAVE / DONT_HAVE | |
+func (sws *sessionWantSender) Update(from peer.ID, ks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
+ hasUpdate := len(ks) > 0 || len(haves) > 0 || len(dontHaves) > 0 | |
+ if !hasUpdate { | |
+ return | |
+ } | |
+ | |
+ sws.addChange(change{ | |
+ update: update{from, ks, haves, dontHaves}, | |
+ }) | |
+} | |
+ | |
+// SignalAvailability is called by the PeerManager to signal that a peer has | |
+// connected / disconnected | |
+func (sws *sessionWantSender) SignalAvailability(p peer.ID, isAvailable bool) { | |
+ availability := peerAvailability{p, isAvailable} | |
+ // Add the change in a non-blocking manner to avoid the possibility of a | |
+ // deadlock | |
+ sws.addChangeNonBlocking(change{availability: availability}) | |
+} | |
+ | |
+// Run is the main loop for processing incoming changes | |
+func (sws *sessionWantSender) Run() { | |
+ for { | |
+ select { | |
+ case ch := <-sws.changes: | |
+ sws.onChange([]change{ch}) | |
+ case <-sws.ctx.Done(): | |
+ // Unregister the session with the PeerManager | |
+ sws.pm.UnregisterSession(sws.sessionID) | |
+ | |
+ // Close the 'closed' channel to signal to Shutdown() that the run | |
+ // loop has exited | |
+ close(sws.closed) | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+// Shutdown the sessionWantSender | |
+func (sws *sessionWantSender) Shutdown() { | |
+ // Signal to the run loop to stop processing | |
+ sws.shutdown() | |
+ // Wait for run loop to complete | |
+ <-sws.closed | |
+} | |
+ | |
+// addChange adds a new change to the queue | |
+func (sws *sessionWantSender) addChange(c change) { | |
+ select { | |
+ case sws.changes <- c: | |
+ case <-sws.ctx.Done(): | |
+ } | |
+} | |
+ | |
+// addChangeNonBlocking adds a new change to the queue, using a go-routine | |
+// if the change blocks, so as to avoid potential deadlocks | |
+func (sws *sessionWantSender) addChangeNonBlocking(c change) { | |
+ select { | |
+ case sws.changes <- c: | |
+ default: | |
+ // changes channel is full, so add change in a go routine instead | |
+ go func() { | |
+ select { | |
+ case sws.changes <- c: | |
+ case <-sws.ctx.Done(): | |
+ } | |
+ }() | |
+ } | |
+} | |
+ | |
+// collectChanges collects all the changes that have occurred since the last | |
+// invocation of onChange | |
+func (sws *sessionWantSender) collectChanges(changes []change) []change { | |
+ for len(changes) < changesBufferSize { | |
+ select { | |
+ case next := <-sws.changes: | |
+ changes = append(changes, next) | |
+ default: | |
+ return changes | |
+ } | |
+ } | |
+ return changes | |
+} | |
+ | |
+// onChange processes the next set of changes | |
+func (sws *sessionWantSender) onChange(changes []change) { | |
+ // Several changes may have been recorded since the last time we checked, | |
+ // so pop all outstanding changes from the channel | |
+ changes = sws.collectChanges(changes) | |
+ | |
+ // Apply each change | |
+ availability := make(map[peer.ID]bool, len(changes)) | |
+ cancels := make([]cid.Cid, 0) | |
+ var updates []update | |
+ for _, chng := range changes { | |
+ // Initialize info for new wants | |
+ for _, c := range chng.add { | |
+ sws.trackWant(c) | |
+ } | |
+ | |
+ // Remove cancelled wants | |
+ for _, c := range chng.cancel { | |
+ sws.untrackWant(c) | |
+ cancels = append(cancels, c) | |
+ } | |
+ | |
+ // Consolidate updates and changes to availability | |
+ if chng.update.from != "" { | |
+ // If the update includes blocks or haves, treat it as signaling that | |
+ // the peer is available | |
+ if len(chng.update.ks) > 0 || len(chng.update.haves) > 0 { | |
+ p := chng.update.from | |
+ availability[p] = true | |
+ | |
+ // Register with the PeerManager | |
+ sws.pm.RegisterSession(p, sws) | |
+ } | |
+ | |
+ updates = append(updates, chng.update) | |
+ } | |
+ if chng.availability.target != "" { | |
+ availability[chng.availability.target] = chng.availability.available | |
+ } | |
+ } | |
+ | |
+ // Update peer availability | |
+ newlyAvailable, newlyUnavailable := sws.processAvailability(availability) | |
+ | |
+ // Update wants | |
+ dontHaves := sws.processUpdates(updates) | |
+ | |
+ // Check if there are any wants for which all peers have indicated they | |
+ // don't have the want | |
+ sws.checkForExhaustedWants(dontHaves, newlyUnavailable) | |
+ | |
+ // If there are any cancels, send them | |
+ if len(cancels) > 0 { | |
+ sws.canceller.CancelSessionWants(sws.sessionID, cancels) | |
+ } | |
+ | |
+ // If there are some connected peers, send any pending wants | |
+ if sws.spm.HasPeers() { | |
+ sws.sendNextWants(newlyAvailable) | |
+ } | |
+} | |
+ | |
+// processAvailability updates the want queue with any changes in | |
+// peer availability | |
+// It returns the peers that have become | |
+// - newly available | |
+// - newly unavailable | |
+func (sws *sessionWantSender) processAvailability(availability map[peer.ID]bool) (avail []peer.ID, unavail []peer.ID) { | |
+ var newlyAvailable []peer.ID | |
+ var newlyUnavailable []peer.ID | |
+ for p, isNowAvailable := range availability { | |
+ stateChange := false | |
+ if isNowAvailable { | |
+ isNewPeer := sws.spm.AddPeer(p) | |
+ if isNewPeer { | |
+ stateChange = true | |
+ newlyAvailable = append(newlyAvailable, p) | |
+ } | |
+ } else { | |
+ wasAvailable := sws.spm.RemovePeer(p) | |
+ if wasAvailable { | |
+ stateChange = true | |
+ newlyUnavailable = append(newlyUnavailable, p) | |
+ } | |
+ } | |
+ | |
+ // If the state has changed | |
+ if stateChange { | |
+ sws.updateWantsPeerAvailability(p, isNowAvailable) | |
+ // Reset the count of consecutive DONT_HAVEs received from the | |
+ // peer | |
+ delete(sws.peerConsecutiveDontHaves, p) | |
+ } | |
+ } | |
+ | |
+ return newlyAvailable, newlyUnavailable | |
+} | |
+ | |
+// trackWant creates a new entry in the map of CID -> want info | |
+func (sws *sessionWantSender) trackWant(c cid.Cid) { | |
+ if _, ok := sws.wants[c]; ok { | |
+ return | |
+ } | |
+ | |
+ // Create the want info | |
+ wi := newWantInfo(sws.peerRspTrkr) | |
+ sws.wants[c] = wi | |
+ | |
+ // For each available peer, register any information we know about | |
+ // whether the peer has the block | |
+ for _, p := range sws.spm.Peers() { | |
+ sws.updateWantBlockPresence(c, p) | |
+ } | |
+} | |
+ | |
+// untrackWant removes an entry from the map of CID -> want info | |
+func (sws *sessionWantSender) untrackWant(c cid.Cid) { | |
+ delete(sws.wants, c) | |
+} | |
+ | |
+// processUpdates processes incoming blocks and HAVE / DONT_HAVEs. | |
+// It returns all DONT_HAVEs. | |
+func (sws *sessionWantSender) processUpdates(updates []update) []cid.Cid { | |
+ // Process received blocks keys | |
+ blkCids := cid.NewSet() | |
+ for _, upd := range updates { | |
+ for _, c := range upd.ks { | |
+ blkCids.Add(c) | |
+ | |
+ // Remove the want | |
+ removed := sws.removeWant(c) | |
+ if removed != nil { | |
+ // Inform the peer tracker that this peer was the first to send | |
+ // us the block | |
+ sws.peerRspTrkr.receivedBlockFrom(upd.from) | |
+ | |
+ // Protect the connection to this peer so that we can ensure | |
+ // that the connection doesn't get pruned by the connection | |
+ // manager | |
+ sws.spm.ProtectConnection(upd.from) | |
+ } | |
+ delete(sws.peerConsecutiveDontHaves, upd.from) | |
+ } | |
+ } | |
+ | |
+ // Process received DONT_HAVEs | |
+ dontHaves := cid.NewSet() | |
+ prunePeers := make(map[peer.ID]struct{}) | |
+ for _, upd := range updates { | |
+ for _, c := range upd.dontHaves { | |
+ // Track the number of consecutive DONT_HAVEs each peer receives | |
+ if sws.peerConsecutiveDontHaves[upd.from] == peerDontHaveLimit { | |
+ prunePeers[upd.from] = struct{}{} | |
+ } else { | |
+ sws.peerConsecutiveDontHaves[upd.from]++ | |
+ } | |
+ | |
+ // If we already received a block for the want, there's no need to | |
+ // update block presence etc | |
+ if blkCids.Has(c) { | |
+ continue | |
+ } | |
+ | |
+ dontHaves.Add(c) | |
+ | |
+ // Update the block presence for the peer | |
+ sws.updateWantBlockPresence(c, upd.from) | |
+ | |
+ // Check if the DONT_HAVE is in response to a want-block | |
+ // (could also be in response to want-have) | |
+ if sws.swbt.haveSentWantBlockTo(upd.from, c) { | |
+ // If we were waiting for a response from this peer, clear | |
+ // sentTo so that we can send the want to another peer | |
+ if sentTo, ok := sws.getWantSentTo(c); ok && sentTo == upd.from { | |
+ sws.setWantSentTo(c, "") | |
+ } | |
+ } | |
+ } | |
+ } | |
+ | |
+ // Process received HAVEs | |
+ for _, upd := range updates { | |
+ for _, c := range upd.haves { | |
+ // If we haven't already received a block for the want | |
+ if !blkCids.Has(c) { | |
+ // Update the block presence for the peer | |
+ sws.updateWantBlockPresence(c, upd.from) | |
+ } | |
+ | |
+ // Clear the consecutive DONT_HAVE count for the peer | |
+ delete(sws.peerConsecutiveDontHaves, upd.from) | |
+ delete(prunePeers, upd.from) | |
+ } | |
+ } | |
+ | |
+ // If any peers have sent us too many consecutive DONT_HAVEs, remove them | |
+ // from the session | |
+ for p := range prunePeers { | |
+ // Before removing the peer from the session, check if the peer | |
+ // sent us a HAVE for a block that we want | |
+ for c := range sws.wants { | |
+ if sws.bpm.PeerHasBlock(p, c) { | |
+ delete(prunePeers, p) | |
+ break | |
+ } | |
+ } | |
+ } | |
+ if len(prunePeers) > 0 { | |
+ go func() { | |
+ for p := range prunePeers { | |
+ // Peer doesn't have anything we want, so remove it | |
+ log.Infof("peer %s sent too many dont haves, removing from session %d", p, sws.ID()) | |
+ sws.SignalAvailability(p, false) | |
+ } | |
+ }() | |
+ } | |
+ | |
+ return dontHaves.Keys() | |
+} | |
+ | |
+// checkForExhaustedWants checks if there are any wants for which all peers | |
+// have sent a DONT_HAVE. We call these "exhausted" wants. | |
+func (sws *sessionWantSender) checkForExhaustedWants(dontHaves []cid.Cid, newlyUnavailable []peer.ID) { | |
+ // If there are no new DONT_HAVEs, and no peers became unavailable, then | |
+ // we don't need to check for exhausted wants | |
+ if len(dontHaves) == 0 && len(newlyUnavailable) == 0 { | |
+ return | |
+ } | |
+ | |
+ // We need to check each want for which we just received a DONT_HAVE | |
+ wants := dontHaves | |
+ | |
+ // If a peer just became unavailable, then we need to check all wants | |
+ // (because it may be the last peer who hadn't sent a DONT_HAVE for a CID) | |
+ if len(newlyUnavailable) > 0 { | |
+ // Collect all pending wants | |
+ wants = make([]cid.Cid, len(sws.wants)) | |
+ for c := range sws.wants { | |
+ wants = append(wants, c) | |
+ } | |
+ | |
+ // If the last available peer in the session has become unavailable | |
+ // then we need to broadcast all pending wants | |
+ if !sws.spm.HasPeers() { | |
+ sws.processExhaustedWants(wants) | |
+ return | |
+ } | |
+ } | |
+ | |
+ // If all available peers for a cid sent a DONT_HAVE, signal to the session | |
+ // that we've exhausted available peers | |
+ if len(wants) > 0 { | |
+ exhausted := sws.bpm.AllPeersDoNotHaveBlock(sws.spm.Peers(), wants) | |
+ sws.processExhaustedWants(exhausted) | |
+ } | |
+} | |
+ | |
+// processExhaustedWants filters the list so that only those wants that haven't | |
+// already been marked as exhausted are passed to onPeersExhausted() | |
+func (sws *sessionWantSender) processExhaustedWants(exhausted []cid.Cid) { | |
+ newlyExhausted := sws.newlyExhausted(exhausted) | |
+ if len(newlyExhausted) > 0 { | |
+ sws.onPeersExhausted(newlyExhausted) | |
+ } | |
+} | |
+ | |
+// convenience structs for passing around want-blocks and want-haves for a peer | |
+type wantSets struct { | |
+ wantBlocks *cid.Set | |
+ wantHaves *cid.Set | |
+} | |
+ | |
+type allWants map[peer.ID]*wantSets | |
+ | |
+func (aw allWants) forPeer(p peer.ID) *wantSets { | |
+ if _, ok := aw[p]; !ok { | |
+ aw[p] = &wantSets{ | |
+ wantBlocks: cid.NewSet(), | |
+ wantHaves: cid.NewSet(), | |
+ } | |
+ } | |
+ return aw[p] | |
+} | |
+ | |
+// sendNextWants sends wants to peers according to the latest information | |
+// about which peers have / dont have blocks | |
+func (sws *sessionWantSender) sendNextWants(newlyAvailable []peer.ID) { | |
+ toSend := make(allWants) | |
+ | |
+ for c, wi := range sws.wants { | |
+ // Ensure we send want-haves to any newly available peers | |
+ for _, p := range newlyAvailable { | |
+ toSend.forPeer(p).wantHaves.Add(c) | |
+ } | |
+ | |
+ // We already sent a want-block to a peer and haven't yet received a | |
+ // response yet | |
+ if wi.sentTo != "" { | |
+ continue | |
+ } | |
+ | |
+ // All the peers have indicated that they don't have the block | |
+ // corresponding to this want, so we must wait to discover more peers | |
+ if wi.bestPeer == "" { | |
+ // TODO: work this out in real time instead of using bestP? | |
+ continue | |
+ } | |
+ | |
+ // Record that we are sending a want-block for this want to the peer | |
+ sws.setWantSentTo(c, wi.bestPeer) | |
+ | |
+ // Send a want-block to the chosen peer | |
+ toSend.forPeer(wi.bestPeer).wantBlocks.Add(c) | |
+ | |
+ // Send a want-have to each other peer | |
+ for _, op := range sws.spm.Peers() { | |
+ if op != wi.bestPeer { | |
+ toSend.forPeer(op).wantHaves.Add(c) | |
+ } | |
+ } | |
+ } | |
+ | |
+ // Send any wants we've collected | |
+ sws.sendWants(toSend) | |
+} | |
+ | |
+// sendWants sends want-have and want-blocks to the appropriate peers | |
+func (sws *sessionWantSender) sendWants(sends allWants) { | |
+ // For each peer we're sending a request to | |
+ for p, snd := range sends { | |
+ // Piggyback some other want-haves onto the request to the peer | |
+ for _, c := range sws.getPiggybackWantHaves(p, snd.wantBlocks) { | |
+ snd.wantHaves.Add(c) | |
+ } | |
+ | |
+ // Send the wants to the peer. | |
+ // Note that the PeerManager ensures that we don't sent duplicate | |
+ // want-haves / want-blocks to a peer, and that want-blocks take | |
+ // precedence over want-haves. | |
+ wblks := snd.wantBlocks.Keys() | |
+ whaves := snd.wantHaves.Keys() | |
+ sws.pm.SendWants(sws.ctx, p, wblks, whaves) | |
+ | |
+ // Inform the session that we've sent the wants | |
+ sws.onSend(p, wblks, whaves) | |
+ | |
+ // Record which peers we send want-block to | |
+ sws.swbt.addSentWantBlocksTo(p, wblks) | |
+ } | |
+} | |
+ | |
+// getPiggybackWantHaves gets the want-haves that should be piggybacked onto | |
+// a request that we are making to send want-blocks to a peer | |
+func (sws *sessionWantSender) getPiggybackWantHaves(p peer.ID, wantBlocks *cid.Set) []cid.Cid { | |
+ var whs []cid.Cid | |
+ for c := range sws.wants { | |
+ // Don't send want-have if we're already sending a want-block | |
+ // (or have previously) | |
+ if !wantBlocks.Has(c) && !sws.swbt.haveSentWantBlockTo(p, c) { | |
+ whs = append(whs, c) | |
+ } | |
+ } | |
+ return whs | |
+} | |
+ | |
+// newlyExhausted filters the list of keys for wants that have not already | |
+// been marked as exhausted (all peers indicated they don't have the block) | |
+func (sws *sessionWantSender) newlyExhausted(ks []cid.Cid) []cid.Cid { | |
+ var res []cid.Cid | |
+ for _, c := range ks { | |
+ if wi, ok := sws.wants[c]; ok { | |
+ if !wi.exhausted { | |
+ res = append(res, c) | |
+ wi.exhausted = true | |
+ } | |
+ } | |
+ } | |
+ return res | |
+} | |
+ | |
+// removeWant is called when the corresponding block is received | |
+func (sws *sessionWantSender) removeWant(c cid.Cid) *wantInfo { | |
+ if wi, ok := sws.wants[c]; ok { | |
+ delete(sws.wants, c) | |
+ return wi | |
+ } | |
+ return nil | |
+} | |
+ | |
+// updateWantsPeerAvailability is called when the availability changes for a | |
+// peer. It updates all the wants accordingly. | |
+func (sws *sessionWantSender) updateWantsPeerAvailability(p peer.ID, isNowAvailable bool) { | |
+ for c, wi := range sws.wants { | |
+ if isNowAvailable { | |
+ sws.updateWantBlockPresence(c, p) | |
+ } else { | |
+ wi.removePeer(p) | |
+ } | |
+ } | |
+} | |
+ | |
+// updateWantBlockPresence is called when a HAVE / DONT_HAVE is received for the given | |
+// want / peer | |
+func (sws *sessionWantSender) updateWantBlockPresence(c cid.Cid, p peer.ID) { | |
+ wi, ok := sws.wants[c] | |
+ if !ok { | |
+ return | |
+ } | |
+ | |
+ // If the peer sent us a HAVE or DONT_HAVE for the cid, adjust the | |
+ // block presence for the peer / cid combination | |
+ if sws.bpm.PeerHasBlock(p, c) { | |
+ wi.setPeerBlockPresence(p, BPHave) | |
+ } else if sws.bpm.PeerDoesNotHaveBlock(p, c) { | |
+ wi.setPeerBlockPresence(p, BPDontHave) | |
+ } else { | |
+ wi.setPeerBlockPresence(p, BPUnknown) | |
+ } | |
+} | |
+ | |
+// Which peer was the want sent to | |
+func (sws *sessionWantSender) getWantSentTo(c cid.Cid) (peer.ID, bool) { | |
+ if wi, ok := sws.wants[c]; ok { | |
+ return wi.sentTo, true | |
+ } | |
+ return "", false | |
+} | |
+ | |
+// Record which peer the want was sent to | |
+func (sws *sessionWantSender) setWantSentTo(c cid.Cid, p peer.ID) { | |
+ if wi, ok := sws.wants[c]; ok { | |
+ wi.sentTo = p | |
+ } | |
+} | |
+ | |
+// wantInfo keeps track of the information for a want | |
+type wantInfo struct { | |
+ // Tracks HAVE / DONT_HAVE sent to us for the want by each peer | |
+ blockPresence map[peer.ID]BlockPresence | |
+ // The peer that we've sent a want-block to (cleared when we get a response) | |
+ sentTo peer.ID | |
+ // The "best" peer to send the want to next | |
+ bestPeer peer.ID | |
+ // Keeps track of how many hits / misses each peer has sent us for wants | |
+ // in the session | |
+ peerRspTrkr *peerResponseTracker | |
+ // true if all known peers have sent a DONT_HAVE for this want | |
+ exhausted bool | |
+} | |
+ | |
+// func newWantInfo(prt *peerResponseTracker, c cid.Cid, startIndex int) *wantInfo { | |
+func newWantInfo(prt *peerResponseTracker) *wantInfo { | |
+ return &wantInfo{ | |
+ blockPresence: make(map[peer.ID]BlockPresence), | |
+ peerRspTrkr: prt, | |
+ exhausted: false, | |
+ } | |
+} | |
+ | |
+// setPeerBlockPresence sets the block presence for the given peer | |
+func (wi *wantInfo) setPeerBlockPresence(p peer.ID, bp BlockPresence) { | |
+ wi.blockPresence[p] = bp | |
+ wi.calculateBestPeer() | |
+ | |
+ // If a peer informed us that it has a block then make sure the want is no | |
+ // longer flagged as exhausted (exhausted means no peers have the block) | |
+ if bp == BPHave { | |
+ wi.exhausted = false | |
+ } | |
+} | |
+ | |
+// removePeer deletes the given peer from the want info | |
+func (wi *wantInfo) removePeer(p peer.ID) { | |
+ // If we were waiting to hear back from the peer that is being removed, | |
+ // clear the sentTo field so we no longer wait | |
+ if p == wi.sentTo { | |
+ wi.sentTo = "" | |
+ } | |
+ delete(wi.blockPresence, p) | |
+ wi.calculateBestPeer() | |
+} | |
+ | |
+// calculateBestPeer finds the best peer to send the want to next | |
+func (wi *wantInfo) calculateBestPeer() { | |
+ // Recalculate the best peer | |
+ bestBP := BPDontHave | |
+ bestPeer := peer.ID("") | |
+ | |
+ // Find the peer with the best block presence, recording how many peers | |
+ // share the block presence | |
+ countWithBest := 0 | |
+ for p, bp := range wi.blockPresence { | |
+ if bp > bestBP { | |
+ bestBP = bp | |
+ bestPeer = p | |
+ countWithBest = 1 | |
+ } else if bp == bestBP { | |
+ countWithBest++ | |
+ } | |
+ } | |
+ wi.bestPeer = bestPeer | |
+ | |
+ // If no peer has a block presence better than DONT_HAVE, bail out | |
+ if bestPeer == "" { | |
+ return | |
+ } | |
+ | |
+ // If there was only one peer with the best block presence, we're done | |
+ if countWithBest <= 1 { | |
+ return | |
+ } | |
+ | |
+ // There were multiple peers with the best block presence, so choose one of | |
+ // them to be the best | |
+ var peersWithBest []peer.ID | |
+ for p, bp := range wi.blockPresence { | |
+ if bp == bestBP { | |
+ peersWithBest = append(peersWithBest, p) | |
+ } | |
+ } | |
+ wi.bestPeer = wi.peerRspTrkr.choose(peersWithBest) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwants.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwants.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwants.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/session/sessionwants.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,193 @@ | |
+package session | |
+ | |
+import ( | |
+ "fmt" | |
+ "math/rand" | |
+ "time" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+) | |
+ | |
+// liveWantsOrder and liveWants will get out of sync as blocks are received. | |
+// This constant is the maximum amount to allow them to be out of sync before | |
+// cleaning up the ordering array. | |
+const liveWantsOrderGCLimit = 32 | |
+ | |
+// sessionWants keeps track of which cids are waiting to be sent out, and which | |
+// peers are "live" - ie, we've sent a request but haven't received a block yet | |
+type sessionWants struct { | |
+ // The wants that have not yet been sent out | |
+ toFetch *cidQueue | |
+ // Wants that have been sent but have not received a response | |
+ liveWants map[cid.Cid]time.Time | |
+ // The order in which wants were requested | |
+ liveWantsOrder []cid.Cid | |
+ // The maximum number of want-haves to send in a broadcast | |
+ broadcastLimit int | |
+} | |
+ | |
+func newSessionWants(broadcastLimit int) sessionWants { | |
+ return sessionWants{ | |
+ toFetch: newCidQueue(), | |
+ liveWants: make(map[cid.Cid]time.Time), | |
+ broadcastLimit: broadcastLimit, | |
+ } | |
+} | |
+ | |
+func (sw *sessionWants) String() string { | |
+ return fmt.Sprintf("%d pending / %d live", sw.toFetch.Len(), len(sw.liveWants)) | |
+} | |
+ | |
+// BlocksRequested is called when the client makes a request for blocks | |
+func (sw *sessionWants) BlocksRequested(newWants []cid.Cid) { | |
+ for _, k := range newWants { | |
+ sw.toFetch.Push(k) | |
+ } | |
+} | |
+ | |
+// GetNextWants is called when the session has not yet discovered peers with | |
+// the blocks that it wants. It moves as many CIDs from the fetch queue to | |
+// the live wants queue as possible (given the broadcast limit). | |
+// Returns the newly live wants. | |
+func (sw *sessionWants) GetNextWants() []cid.Cid { | |
+ now := time.Now() | |
+ | |
+ // Move CIDs from fetch queue to the live wants queue (up to the broadcast | |
+ // limit) | |
+ currentLiveCount := len(sw.liveWants) | |
+ toAdd := sw.broadcastLimit - currentLiveCount | |
+ | |
+ var live []cid.Cid | |
+ for ; toAdd > 0 && sw.toFetch.Len() > 0; toAdd-- { | |
+ c := sw.toFetch.Pop() | |
+ live = append(live, c) | |
+ sw.liveWantsOrder = append(sw.liveWantsOrder, c) | |
+ sw.liveWants[c] = now | |
+ } | |
+ | |
+ return live | |
+} | |
+ | |
+// WantsSent is called when wants are sent to a peer | |
+func (sw *sessionWants) WantsSent(ks []cid.Cid) { | |
+ now := time.Now() | |
+ for _, c := range ks { | |
+ if _, ok := sw.liveWants[c]; !ok && sw.toFetch.Has(c) { | |
+ sw.toFetch.Remove(c) | |
+ sw.liveWantsOrder = append(sw.liveWantsOrder, c) | |
+ sw.liveWants[c] = now | |
+ } | |
+ } | |
+} | |
+ | |
+// BlocksReceived removes received block CIDs from the live wants list and | |
+// measures latency. It returns the CIDs of blocks that were actually | |
+// wanted (as opposed to duplicates) and the total latency for all incoming blocks. | |
+func (sw *sessionWants) BlocksReceived(ks []cid.Cid) ([]cid.Cid, time.Duration) { | |
+ wanted := make([]cid.Cid, 0, len(ks)) | |
+ totalLatency := time.Duration(0) | |
+ if len(ks) == 0 { | |
+ return wanted, totalLatency | |
+ } | |
+ | |
+ // Filter for blocks that were actually wanted (as opposed to duplicates) | |
+ now := time.Now() | |
+ for _, c := range ks { | |
+ if sw.isWanted(c) { | |
+ wanted = append(wanted, c) | |
+ | |
+ // Measure latency | |
+ sentAt, ok := sw.liveWants[c] | |
+ if ok && !sentAt.IsZero() { | |
+ totalLatency += now.Sub(sentAt) | |
+ } | |
+ | |
+ // Remove the CID from the live wants / toFetch queue | |
+ delete(sw.liveWants, c) | |
+ sw.toFetch.Remove(c) | |
+ } | |
+ } | |
+ | |
+ // If the live wants ordering array is a long way out of sync with the | |
+ // live wants map, clean up the ordering array | |
+ if len(sw.liveWantsOrder)-len(sw.liveWants) > liveWantsOrderGCLimit { | |
+ cleaned := sw.liveWantsOrder[:0] | |
+ for _, c := range sw.liveWantsOrder { | |
+ if _, ok := sw.liveWants[c]; ok { | |
+ cleaned = append(cleaned, c) | |
+ } | |
+ } | |
+ sw.liveWantsOrder = cleaned | |
+ } | |
+ | |
+ return wanted, totalLatency | |
+} | |
+ | |
+// PrepareBroadcast saves the current time for each live want and returns the | |
+// live want CIDs up to the broadcast limit. | |
+func (sw *sessionWants) PrepareBroadcast() []cid.Cid { | |
+ now := time.Now() | |
+ live := make([]cid.Cid, 0, len(sw.liveWants)) | |
+ for _, c := range sw.liveWantsOrder { | |
+ if _, ok := sw.liveWants[c]; ok { | |
+ // No response was received for the want, so reset the sent time | |
+ // to now as we're about to broadcast | |
+ sw.liveWants[c] = now | |
+ | |
+ live = append(live, c) | |
+ if len(live) == sw.broadcastLimit { | |
+ break | |
+ } | |
+ } | |
+ } | |
+ | |
+ return live | |
+} | |
+ | |
+// CancelPending removes the given CIDs from the fetch queue. | |
+func (sw *sessionWants) CancelPending(keys []cid.Cid) { | |
+ for _, k := range keys { | |
+ sw.toFetch.Remove(k) | |
+ } | |
+} | |
+ | |
+// LiveWants returns a list of live wants | |
+func (sw *sessionWants) LiveWants() []cid.Cid { | |
+ live := make([]cid.Cid, 0, len(sw.liveWants)) | |
+ for c := range sw.liveWants { | |
+ live = append(live, c) | |
+ } | |
+ | |
+ return live | |
+} | |
+ | |
+// RandomLiveWant returns a randomly selected live want | |
+func (sw *sessionWants) RandomLiveWant() cid.Cid { | |
+ if len(sw.liveWants) == 0 { | |
+ return cid.Cid{} | |
+ } | |
+ | |
+ // picking a random live want | |
+ i := rand.Intn(len(sw.liveWants)) | |
+ for k := range sw.liveWants { | |
+ if i == 0 { | |
+ return k | |
+ } | |
+ i-- | |
+ } | |
+ return cid.Cid{} | |
+} | |
+ | |
+// Has live wants indicates if there are any live wants | |
+func (sw *sessionWants) HasLiveWants() bool { | |
+ return len(sw.liveWants) > 0 | |
+} | |
+ | |
+// Indicates whether the want is in either of the fetch or live queues | |
+func (sw *sessionWants) isWanted(c cid.Cid) bool { | |
+ _, ok := sw.liveWants[c] | |
+ if !ok { | |
+ ok = sw.toFetch.Has(c) | |
+ } | |
+ return ok | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager/sessioninterestmanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,201 @@ | |
+package sessioninterestmanager | |
+ | |
+import ( | |
+ "sync" | |
+ | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+) | |
+ | |
+// SessionInterestManager records the CIDs that each session is interested in. | |
+type SessionInterestManager struct { | |
+ lk sync.RWMutex | |
+ wants map[cid.Cid]map[uint64]bool | |
+} | |
+ | |
+// New initializes a new SessionInterestManager. | |
+func New() *SessionInterestManager { | |
+ return &SessionInterestManager{ | |
+ // Map of cids -> sessions -> bool | |
+ // | |
+ // The boolean indicates whether the session still wants the block | |
+ // or is just interested in receiving messages about it. | |
+ // | |
+ // Note that once the block is received the session no longer wants | |
+ // the block, but still wants to receive messages from peers who have | |
+ // the block as they may have other blocks the session is interested in. | |
+ wants: make(map[cid.Cid]map[uint64]bool), | |
+ } | |
+} | |
+ | |
+// When the client asks the session for blocks, the session calls | |
+// RecordSessionInterest() with those cids. | |
+func (sim *SessionInterestManager) RecordSessionInterest(ses uint64, ks []cid.Cid) { | |
+ sim.lk.Lock() | |
+ defer sim.lk.Unlock() | |
+ | |
+ // For each key | |
+ for _, c := range ks { | |
+ // Record that the session wants the blocks | |
+ if want, ok := sim.wants[c]; ok { | |
+ want[ses] = true | |
+ } else { | |
+ sim.wants[c] = map[uint64]bool{ses: true} | |
+ } | |
+ } | |
+} | |
+ | |
+// When the session shuts down it calls RemoveSessionInterest(). | |
+// Returns the keys that no session is interested in any more. | |
+func (sim *SessionInterestManager) RemoveSession(ses uint64) []cid.Cid { | |
+ sim.lk.Lock() | |
+ defer sim.lk.Unlock() | |
+ | |
+ // The keys that no session is interested in | |
+ deletedKs := make([]cid.Cid, 0) | |
+ | |
+ // For each known key | |
+ for c := range sim.wants { | |
+ // Remove the session from the list of sessions that want the key | |
+ delete(sim.wants[c], ses) | |
+ | |
+ // If there are no more sessions that want the key | |
+ if len(sim.wants[c]) == 0 { | |
+ // Clean up the list memory | |
+ delete(sim.wants, c) | |
+ // Add the key to the list of keys that no session is interested in | |
+ deletedKs = append(deletedKs, c) | |
+ } | |
+ } | |
+ | |
+ return deletedKs | |
+} | |
+ | |
+// When the session receives blocks, it calls RemoveSessionWants(). | |
+func (sim *SessionInterestManager) RemoveSessionWants(ses uint64, ks []cid.Cid) { | |
+ sim.lk.Lock() | |
+ defer sim.lk.Unlock() | |
+ | |
+ // For each key | |
+ for _, c := range ks { | |
+ // If the session wanted the block | |
+ if wanted, ok := sim.wants[c][ses]; ok && wanted { | |
+ // Mark the block as unwanted | |
+ sim.wants[c][ses] = false | |
+ } | |
+ } | |
+} | |
+ | |
+// When a request is cancelled, the session calls RemoveSessionInterested(). | |
+// Returns the keys that no session is interested in any more. | |
+func (sim *SessionInterestManager) RemoveSessionInterested(ses uint64, ks []cid.Cid) []cid.Cid { | |
+ sim.lk.Lock() | |
+ defer sim.lk.Unlock() | |
+ | |
+ // The keys that no session is interested in | |
+ deletedKs := make([]cid.Cid, 0, len(ks)) | |
+ | |
+ // For each key | |
+ for _, c := range ks { | |
+ // If there is a list of sessions that want the key | |
+ if _, ok := sim.wants[c]; ok { | |
+ // Remove the session from the list of sessions that want the key | |
+ delete(sim.wants[c], ses) | |
+ | |
+ // If there are no more sessions that want the key | |
+ if len(sim.wants[c]) == 0 { | |
+ // Clean up the list memory | |
+ delete(sim.wants, c) | |
+ // Add the key to the list of keys that no session is interested in | |
+ deletedKs = append(deletedKs, c) | |
+ } | |
+ } | |
+ } | |
+ | |
+ return deletedKs | |
+} | |
+ | |
+// The session calls FilterSessionInterested() to filter the sets of keys for | |
+// those that the session is interested in | |
+func (sim *SessionInterestManager) FilterSessionInterested(ses uint64, ksets ...[]cid.Cid) [][]cid.Cid { | |
+ sim.lk.RLock() | |
+ defer sim.lk.RUnlock() | |
+ | |
+ // For each set of keys | |
+ kres := make([][]cid.Cid, len(ksets)) | |
+ for i, ks := range ksets { | |
+ // The set of keys that at least one session is interested in | |
+ has := make([]cid.Cid, 0, len(ks)) | |
+ | |
+ // For each key in the list | |
+ for _, c := range ks { | |
+ // If there is a session that's interested, add the key to the set | |
+ if _, ok := sim.wants[c][ses]; ok { | |
+ has = append(has, c) | |
+ } | |
+ } | |
+ kres[i] = has | |
+ } | |
+ return kres | |
+} | |
+ | |
+// When bitswap receives blocks it calls SplitWantedUnwanted() to discard | |
+// unwanted blocks | |
+func (sim *SessionInterestManager) SplitWantedUnwanted(blks []blocks.Block) ([]blocks.Block, []blocks.Block) { | |
+ sim.lk.RLock() | |
+ defer sim.lk.RUnlock() | |
+ | |
+ // Get the wanted block keys as a set | |
+ wantedKs := cid.NewSet() | |
+ for _, b := range blks { | |
+ c := b.Cid() | |
+ // For each session that is interested in the key | |
+ for ses := range sim.wants[c] { | |
+ // If the session wants the key (rather than just being interested) | |
+ if wanted, ok := sim.wants[c][ses]; ok && wanted { | |
+ // Add the key to the set | |
+ wantedKs.Add(c) | |
+ } | |
+ } | |
+ } | |
+ | |
+ // Separate the blocks into wanted and unwanted | |
+ wantedBlks := make([]blocks.Block, 0, len(blks)) | |
+ notWantedBlks := make([]blocks.Block, 0) | |
+ for _, b := range blks { | |
+ if wantedKs.Has(b.Cid()) { | |
+ wantedBlks = append(wantedBlks, b) | |
+ } else { | |
+ notWantedBlks = append(notWantedBlks, b) | |
+ } | |
+ } | |
+ return wantedBlks, notWantedBlks | |
+} | |
+ | |
+// When the SessionManager receives a message it calls InterestedSessions() to | |
+// find out which sessions are interested in the message. | |
+func (sim *SessionInterestManager) InterestedSessions(blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) []uint64 { | |
+ sim.lk.RLock() | |
+ defer sim.lk.RUnlock() | |
+ | |
+ ks := make([]cid.Cid, 0, len(blks)+len(haves)+len(dontHaves)) | |
+ ks = append(ks, blks...) | |
+ ks = append(ks, haves...) | |
+ ks = append(ks, dontHaves...) | |
+ | |
+ // Create a set of sessions that are interested in the keys | |
+ sesSet := make(map[uint64]struct{}) | |
+ for _, c := range ks { | |
+ for s := range sim.wants[c] { | |
+ sesSet[s] = struct{}{} | |
+ } | |
+ } | |
+ | |
+ // Convert the set into a list | |
+ ses := make([]uint64, 0, len(sesSet)) | |
+ for s := range sesSet { | |
+ ses = append(ses, s) | |
+ } | |
+ return ses | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager/sessionmanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager/sessionmanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager/sessionmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager/sessionmanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,196 @@ | |
+package sessionmanager | |
+ | |
+import ( | |
+ "context" | |
+ "strconv" | |
+ "sync" | |
+ "time" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ delay "github.com/ipfs/go-ipfs-delay" | |
+ "go.opentelemetry.io/otel/attribute" | |
+ "go.opentelemetry.io/otel/trace" | |
+ | |
+ exchange "github.com/ipfs/go-ipfs-exchange-interface" | |
+ "github.com/ipfs/go-libipfs/bitswap/client/internal" | |
+ bsbpm "github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager" | |
+ notifications "github.com/ipfs/go-libipfs/bitswap/client/internal/notifications" | |
+ bssession "github.com/ipfs/go-libipfs/bitswap/client/internal/session" | |
+ bssim "github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// Session is a session that is managed by the session manager | |
+type Session interface { | |
+ exchange.Fetcher | |
+ ID() uint64 | |
+ ReceiveFrom(peer.ID, []cid.Cid, []cid.Cid, []cid.Cid) | |
+ Shutdown() | |
+} | |
+ | |
+// SessionFactory generates a new session for the SessionManager to track. | |
+type SessionFactory func( | |
+ ctx context.Context, | |
+ sm bssession.SessionManager, | |
+ id uint64, | |
+ sprm bssession.SessionPeerManager, | |
+ sim *bssim.SessionInterestManager, | |
+ pm bssession.PeerManager, | |
+ bpm *bsbpm.BlockPresenceManager, | |
+ notif notifications.PubSub, | |
+ provSearchDelay time.Duration, | |
+ rebroadcastDelay delay.D, | |
+ self peer.ID) Session | |
+ | |
+// PeerManagerFactory generates a new peer manager for a session. | |
+type PeerManagerFactory func(ctx context.Context, id uint64) bssession.SessionPeerManager | |
+ | |
+// SessionManager is responsible for creating, managing, and dispatching to | |
+// sessions. | |
+type SessionManager struct { | |
+ ctx context.Context | |
+ sessionFactory SessionFactory | |
+ sessionInterestManager *bssim.SessionInterestManager | |
+ peerManagerFactory PeerManagerFactory | |
+ blockPresenceManager *bsbpm.BlockPresenceManager | |
+ peerManager bssession.PeerManager | |
+ notif notifications.PubSub | |
+ | |
+ // Sessions | |
+ sessLk sync.RWMutex | |
+ sessions map[uint64]Session | |
+ | |
+ // Session Index | |
+ sessIDLk sync.Mutex | |
+ sessID uint64 | |
+ | |
+ self peer.ID | |
+} | |
+ | |
+// New creates a new SessionManager. | |
+func New(ctx context.Context, sessionFactory SessionFactory, sessionInterestManager *bssim.SessionInterestManager, peerManagerFactory PeerManagerFactory, | |
+ blockPresenceManager *bsbpm.BlockPresenceManager, peerManager bssession.PeerManager, notif notifications.PubSub, self peer.ID) *SessionManager { | |
+ | |
+ return &SessionManager{ | |
+ ctx: ctx, | |
+ sessionFactory: sessionFactory, | |
+ sessionInterestManager: sessionInterestManager, | |
+ peerManagerFactory: peerManagerFactory, | |
+ blockPresenceManager: blockPresenceManager, | |
+ peerManager: peerManager, | |
+ notif: notif, | |
+ sessions: make(map[uint64]Session), | |
+ self: self, | |
+ } | |
+} | |
+ | |
+// NewSession initializes a session with the given context, and adds to the | |
+// session manager. | |
+func (sm *SessionManager) NewSession(ctx context.Context, | |
+ provSearchDelay time.Duration, | |
+ rebroadcastDelay delay.D) exchange.Fetcher { | |
+ id := sm.GetNextSessionID() | |
+ | |
+ ctx, span := internal.StartSpan(ctx, "SessionManager.NewSession", trace.WithAttributes(attribute.String("ID", strconv.FormatUint(id, 10)))) | |
+ defer span.End() | |
+ | |
+ pm := sm.peerManagerFactory(ctx, id) | |
+ session := sm.sessionFactory(ctx, sm, id, pm, sm.sessionInterestManager, sm.peerManager, sm.blockPresenceManager, sm.notif, provSearchDelay, rebroadcastDelay, sm.self) | |
+ | |
+ sm.sessLk.Lock() | |
+ if sm.sessions != nil { // check if SessionManager was shutdown | |
+ sm.sessions[id] = session | |
+ } | |
+ sm.sessLk.Unlock() | |
+ | |
+ return session | |
+} | |
+ | |
+func (sm *SessionManager) Shutdown() { | |
+ sm.sessLk.Lock() | |
+ | |
+ sessions := make([]Session, 0, len(sm.sessions)) | |
+ for _, ses := range sm.sessions { | |
+ sessions = append(sessions, ses) | |
+ } | |
+ | |
+ // Ensure that if Shutdown() is called twice we only shut down | |
+ // the sessions once | |
+ sm.sessions = nil | |
+ | |
+ sm.sessLk.Unlock() | |
+ | |
+ for _, ses := range sessions { | |
+ ses.Shutdown() | |
+ } | |
+} | |
+ | |
+func (sm *SessionManager) RemoveSession(sesid uint64) { | |
+ // Remove session from SessionInterestManager - returns the keys that no | |
+ // session is interested in anymore. | |
+ cancelKs := sm.sessionInterestManager.RemoveSession(sesid) | |
+ | |
+ // Cancel keys that no session is interested in anymore | |
+ sm.cancelWants(cancelKs) | |
+ | |
+ sm.sessLk.Lock() | |
+ defer sm.sessLk.Unlock() | |
+ | |
+ // Clean up session | |
+ if sm.sessions != nil { // check if SessionManager was shutdown | |
+ delete(sm.sessions, sesid) | |
+ } | |
+} | |
+ | |
+// GetNextSessionID returns the next sequential identifier for a session. | |
+func (sm *SessionManager) GetNextSessionID() uint64 { | |
+ sm.sessIDLk.Lock() | |
+ defer sm.sessIDLk.Unlock() | |
+ | |
+ sm.sessID++ | |
+ return sm.sessID | |
+} | |
+ | |
+// ReceiveFrom is called when a new message is received | |
+func (sm *SessionManager) ReceiveFrom(ctx context.Context, p peer.ID, blks []cid.Cid, haves []cid.Cid, dontHaves []cid.Cid) { | |
+ // Record block presence for HAVE / DONT_HAVE | |
+ sm.blockPresenceManager.ReceiveFrom(p, haves, dontHaves) | |
+ | |
+ // Notify each session that is interested in the blocks / HAVEs / DONT_HAVEs | |
+ for _, id := range sm.sessionInterestManager.InterestedSessions(blks, haves, dontHaves) { | |
+ sm.sessLk.RLock() | |
+ if sm.sessions == nil { // check if SessionManager was shutdown | |
+ sm.sessLk.RUnlock() | |
+ return | |
+ } | |
+ sess, ok := sm.sessions[id] | |
+ sm.sessLk.RUnlock() | |
+ | |
+ if ok { | |
+ sess.ReceiveFrom(p, blks, haves, dontHaves) | |
+ } | |
+ } | |
+ | |
+ // Send CANCEL to all peers with want-have / want-block | |
+ sm.peerManager.SendCancels(ctx, blks) | |
+} | |
+ | |
+// CancelSessionWants is called when a session cancels wants because a call to | |
+// GetBlocks() is cancelled | |
+func (sm *SessionManager) CancelSessionWants(sesid uint64, wants []cid.Cid) { | |
+ // Remove session's interest in the given blocks - returns the keys that no | |
+ // session is interested in anymore. | |
+ cancelKs := sm.sessionInterestManager.RemoveSessionInterested(sesid, wants) | |
+ sm.cancelWants(cancelKs) | |
+} | |
+ | |
+func (sm *SessionManager) cancelWants(wants []cid.Cid) { | |
+ // Free up block presence tracking for keys that no session is interested | |
+ // in anymore | |
+ sm.blockPresenceManager.RemoveKeys(wants) | |
+ | |
+ // Send CANCEL to all peers for blocks that no session is interested in | |
+ // anymore. | |
+ // Note: use bitswap context because session context may already be Done. | |
+ sm.peerManager.SendCancels(sm.ctx, wants) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager/sessionpeermanager.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,150 @@ | |
+package sessionpeermanager | |
+ | |
+import ( | |
+ "fmt" | |
+ "sync" | |
+ | |
+ logging "github.com/ipfs/go-log" | |
+ | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+var log = logging.Logger("bs:sprmgr") | |
+ | |
+const ( | |
+ // Connection Manager tag value for session peers. Indicates to connection | |
+ // manager that it should keep the connection to the peer. | |
+ sessionPeerTagValue = 5 | |
+) | |
+ | |
+// PeerTagger is an interface for tagging peers with metadata | |
+type PeerTagger interface { | |
+ TagPeer(peer.ID, string, int) | |
+ UntagPeer(p peer.ID, tag string) | |
+ Protect(peer.ID, string) | |
+ Unprotect(peer.ID, string) bool | |
+} | |
+ | |
+// SessionPeerManager keeps track of peers for a session, and takes care of | |
+// ConnectionManager tagging. | |
+type SessionPeerManager struct { | |
+ tagger PeerTagger | |
+ tag string | |
+ | |
+ id uint64 | |
+ plk sync.RWMutex | |
+ peers map[peer.ID]struct{} | |
+ peersDiscovered bool | |
+} | |
+ | |
+// New creates a new SessionPeerManager | |
+func New(id uint64, tagger PeerTagger) *SessionPeerManager { | |
+ return &SessionPeerManager{ | |
+ id: id, | |
+ tag: fmt.Sprint("bs-ses-", id), | |
+ tagger: tagger, | |
+ peers: make(map[peer.ID]struct{}), | |
+ } | |
+} | |
+ | |
+// AddPeer adds the peer to the SessionPeerManager. | |
+// Returns true if the peer is a new peer, false if it already existed. | |
+func (spm *SessionPeerManager) AddPeer(p peer.ID) bool { | |
+ spm.plk.Lock() | |
+ defer spm.plk.Unlock() | |
+ | |
+ // Check if the peer is a new peer | |
+ if _, ok := spm.peers[p]; ok { | |
+ return false | |
+ } | |
+ | |
+ spm.peers[p] = struct{}{} | |
+ spm.peersDiscovered = true | |
+ | |
+ // Tag the peer with the ConnectionManager so it doesn't discard the | |
+ // connection | |
+ spm.tagger.TagPeer(p, spm.tag, sessionPeerTagValue) | |
+ | |
+ log.Debugw("Bitswap: Added peer to session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) | |
+ return true | |
+} | |
+ | |
+// Protect connection to this peer from being pruned by the connection manager | |
+func (spm *SessionPeerManager) ProtectConnection(p peer.ID) { | |
+ spm.plk.Lock() | |
+ defer spm.plk.Unlock() | |
+ | |
+ if _, ok := spm.peers[p]; !ok { | |
+ return | |
+ } | |
+ | |
+ spm.tagger.Protect(p, spm.tag) | |
+} | |
+ | |
+// RemovePeer removes the peer from the SessionPeerManager. | |
+// Returns true if the peer was removed, false if it did not exist. | |
+func (spm *SessionPeerManager) RemovePeer(p peer.ID) bool { | |
+ spm.plk.Lock() | |
+ defer spm.plk.Unlock() | |
+ | |
+ if _, ok := spm.peers[p]; !ok { | |
+ return false | |
+ } | |
+ | |
+ delete(spm.peers, p) | |
+ spm.tagger.UntagPeer(p, spm.tag) | |
+ spm.tagger.Unprotect(p, spm.tag) | |
+ | |
+ log.Debugw("Bitswap: removed peer from session", "session", spm.id, "peer", p, "peerCount", len(spm.peers)) | |
+ return true | |
+} | |
+ | |
+// PeersDiscovered indicates whether peers have been discovered yet. | |
+// Returns true once a peer has been discovered by the session (even if all | |
+// peers are later removed from the session). | |
+func (spm *SessionPeerManager) PeersDiscovered() bool { | |
+ spm.plk.RLock() | |
+ defer spm.plk.RUnlock() | |
+ | |
+ return spm.peersDiscovered | |
+} | |
+ | |
+func (spm *SessionPeerManager) Peers() []peer.ID { | |
+ spm.plk.RLock() | |
+ defer spm.plk.RUnlock() | |
+ | |
+ peers := make([]peer.ID, 0, len(spm.peers)) | |
+ for p := range spm.peers { | |
+ peers = append(peers, p) | |
+ } | |
+ | |
+ return peers | |
+} | |
+ | |
+func (spm *SessionPeerManager) HasPeers() bool { | |
+ spm.plk.RLock() | |
+ defer spm.plk.RUnlock() | |
+ | |
+ return len(spm.peers) > 0 | |
+} | |
+ | |
+func (spm *SessionPeerManager) HasPeer(p peer.ID) bool { | |
+ spm.plk.RLock() | |
+ defer spm.plk.RUnlock() | |
+ | |
+ _, ok := spm.peers[p] | |
+ return ok | |
+} | |
+ | |
+// Shutdown untags all the peers | |
+func (spm *SessionPeerManager) Shutdown() { | |
+ spm.plk.Lock() | |
+ defer spm.plk.Unlock() | |
+ | |
+ // Untag the peers with the ConnectionManager so that it can release | |
+ // connections to those peers | |
+ for p := range spm.peers { | |
+ spm.tagger.UntagPeer(p, spm.tag) | |
+ spm.tagger.Unprotect(p, spm.tag) | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/tracing.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/tracing.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/tracing.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/internal/tracing.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,13 @@ | |
+package internal | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ | |
+ "go.opentelemetry.io/otel" | |
+ "go.opentelemetry.io/otel/trace" | |
+) | |
+ | |
+func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { | |
+ return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/stat.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/stat.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/stat.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/stat.go 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,30 @@ | |
+package client | |
+ | |
+import ( | |
+ cid "github.com/ipfs/go-cid" | |
+) | |
+ | |
+// Stat is a struct that provides various statistics on bitswap operations | |
+type Stat struct { | |
+ Wantlist []cid.Cid | |
+ BlocksReceived uint64 | |
+ DataReceived uint64 | |
+ DupBlksReceived uint64 | |
+ DupDataReceived uint64 | |
+ MessagesReceived uint64 | |
+} | |
+ | |
+// Stat returns aggregated statistics about bitswap operations | |
+func (bs *Client) Stat() (st Stat, err error) { | |
+ bs.counterLk.Lock() | |
+ c := bs.counters | |
+ st.BlocksReceived = c.blocksRecvd | |
+ st.DupBlksReceived = c.dupBlocksRecvd | |
+ st.DupDataReceived = c.dupDataRecvd | |
+ st.DataReceived = c.dataRecvd | |
+ st.MessagesReceived = c.messagesRecvd | |
+ bs.counterLk.Unlock() | |
+ st.Wantlist = bs.GetWantlist() | |
+ | |
+ return st, nil | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/client/wantlist/wantlist.go a/vendor/github.com/ipfs/go-libipfs/bitswap/client/wantlist/wantlist.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/client/wantlist/wantlist.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/client/wantlist/wantlist.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,142 @@ | |
+// Package wantlist implements an object for bitswap that contains the keys | |
+// that a given peer wants. | |
+package wantlist | |
+ | |
+import ( | |
+ "sort" | |
+ | |
+ pb "github.com/ipfs/go-libipfs/bitswap/message/pb" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+) | |
+ | |
+// Wantlist is a raw list of wanted blocks and their priorities | |
+type Wantlist struct { | |
+ set map[cid.Cid]Entry | |
+ | |
+ // Re-computing this can get expensive so we memoize it. | |
+ cached []Entry | |
+} | |
+ | |
+// Entry is an entry in a want list, consisting of a cid and its priority | |
+type Entry struct { | |
+ Cid cid.Cid | |
+ Priority int32 | |
+ WantType pb.Message_Wantlist_WantType | |
+} | |
+ | |
+// NewRefEntry creates a new reference tracked wantlist entry. | |
+func NewRefEntry(c cid.Cid, p int32) Entry { | |
+ return Entry{ | |
+ Cid: c, | |
+ Priority: p, | |
+ WantType: pb.Message_Wantlist_Block, | |
+ } | |
+} | |
+ | |
+type entrySlice []Entry | |
+ | |
+func (es entrySlice) Len() int { return len(es) } | |
+func (es entrySlice) Swap(i, j int) { es[i], es[j] = es[j], es[i] } | |
+func (es entrySlice) Less(i, j int) bool { return es[i].Priority > es[j].Priority } | |
+ | |
+// New generates a new raw Wantlist | |
+func New() *Wantlist { | |
+ return &Wantlist{ | |
+ set: make(map[cid.Cid]Entry), | |
+ } | |
+} | |
+ | |
+// Len returns the number of entries in a wantlist. | |
+func (w *Wantlist) Len() int { | |
+ return len(w.set) | |
+} | |
+ | |
+// Add adds an entry in a wantlist from CID & Priority, if not already present. | |
+func (w *Wantlist) Add(c cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) bool { | |
+ e, ok := w.set[c] | |
+ | |
+ // Adding want-have should not override want-block | |
+ if ok && (e.WantType == pb.Message_Wantlist_Block || wantType == pb.Message_Wantlist_Have) { | |
+ return false | |
+ } | |
+ | |
+ w.put(c, Entry{ | |
+ Cid: c, | |
+ Priority: priority, | |
+ WantType: wantType, | |
+ }) | |
+ | |
+ return true | |
+} | |
+ | |
+// Remove removes the given cid from the wantlist. | |
+func (w *Wantlist) Remove(c cid.Cid) bool { | |
+ _, ok := w.set[c] | |
+ if !ok { | |
+ return false | |
+ } | |
+ | |
+ w.delete(c) | |
+ return true | |
+} | |
+ | |
+// Remove removes the given cid from the wantlist, respecting the type: | |
+// Remove with want-have will not remove an existing want-block. | |
+func (w *Wantlist) RemoveType(c cid.Cid, wantType pb.Message_Wantlist_WantType) bool { | |
+ e, ok := w.set[c] | |
+ if !ok { | |
+ return false | |
+ } | |
+ | |
+ // Removing want-have should not remove want-block | |
+ if e.WantType == pb.Message_Wantlist_Block && wantType == pb.Message_Wantlist_Have { | |
+ return false | |
+ } | |
+ | |
+ w.delete(c) | |
+ return true | |
+} | |
+ | |
+func (w *Wantlist) delete(c cid.Cid) { | |
+ delete(w.set, c) | |
+ w.cached = nil | |
+} | |
+ | |
+func (w *Wantlist) put(c cid.Cid, e Entry) { | |
+ w.cached = nil | |
+ w.set[c] = e | |
+} | |
+ | |
+// Contains returns the entry, if present, for the given CID, plus whether it | |
+// was present. | |
+func (w *Wantlist) Contains(c cid.Cid) (Entry, bool) { | |
+ e, ok := w.set[c] | |
+ return e, ok | |
+} | |
+ | |
+// Entries returns all wantlist entries for a want list, sorted by priority. | |
+// | |
+// DO NOT MODIFY. The returned list is cached. | |
+func (w *Wantlist) Entries() []Entry { | |
+ if w.cached != nil { | |
+ return w.cached | |
+ } | |
+ es := make([]Entry, 0, len(w.set)) | |
+ for _, e := range w.set { | |
+ es = append(es, e) | |
+ } | |
+ sort.Sort(entrySlice(es)) | |
+ w.cached = es | |
+ return es[0:len(es):len(es)] | |
+} | |
+ | |
+// Absorb all the entries in other into this want list | |
+func (w *Wantlist) Absorb(other *Wantlist) { | |
+ // Invalidate the cache up-front to avoid doing any work trying to keep it up-to-date. | |
+ w.cached = nil | |
+ | |
+ for _, e := range other.Entries() { | |
+ w.Add(e.Cid, e.Priority, e.WantType) | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/forward.go a/vendor/github.com/ipfs/go-libipfs/bitswap/forward.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/forward.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/forward.go 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,17 @@ | |
+package bitswap | |
+ | |
+import ( | |
+ "github.com/ipfs/go-libipfs/bitswap/server" | |
+ "github.com/ipfs/go-libipfs/bitswap/tracer" | |
+) | |
+ | |
+type ( | |
+ // DEPRECATED | |
+ PeerBlockRequestFilter = server.PeerBlockRequestFilter | |
+ // DEPRECATED | |
+ TaskComparator = server.TaskComparator | |
+ // DEPRECATED | |
+ TaskInfo = server.TaskInfo | |
+ // DEPRECATED | |
+ Tracer = tracer.Tracer | |
+) | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/.gitignore a/vendor/github.com/ipfs/go-libipfs/bitswap/.gitignore | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/.gitignore 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/.gitignore 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1 @@ | |
+tmp | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/internal/defaults/defaults.go a/vendor/github.com/ipfs/go-libipfs/bitswap/internal/defaults/defaults.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/internal/defaults/defaults.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/internal/defaults/defaults.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,27 @@ | |
+package defaults | |
+ | |
+import ( | |
+ "time" | |
+) | |
+ | |
+const ( | |
+ // these requests take at _least_ two minutes at the moment. | |
+ ProvideTimeout = time.Minute * 3 | |
+ ProvSearchDelay = time.Second | |
+ | |
+ // Number of concurrent workers in decision engine that process requests to the blockstore | |
+ BitswapEngineBlockstoreWorkerCount = 128 | |
+ // the total number of simultaneous threads sending outgoing messages | |
+ BitswapTaskWorkerCount = 8 | |
+ // how many worker threads to start for decision engine task worker | |
+ BitswapEngineTaskWorkerCount = 8 | |
+ // the total amount of bytes that a peer should have outstanding, it is utilized by the decision engine | |
+ BitswapMaxOutstandingBytesPerPeer = 1 << 20 | |
+ // the number of bytes we attempt to make each outgoing bitswap message | |
+ BitswapEngineTargetMessageSize = 16 * 1024 | |
+ // HasBlockBufferSize is the buffer size of the channel for new blocks | |
+ // that need to be provided. They should get pulled over by the | |
+ // provideCollector even before they are actually provided. | |
+ // TODO: Does this need to be this large givent that? | |
+ HasBlockBufferSize = 256 | |
+) | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/internal/tracing.go a/vendor/github.com/ipfs/go-libipfs/bitswap/internal/tracing.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/internal/tracing.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/internal/tracing.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,13 @@ | |
+package internal | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ | |
+ "go.opentelemetry.io/otel" | |
+ "go.opentelemetry.io/otel/trace" | |
+) | |
+ | |
+func StartSpan(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { | |
+ return otel.Tracer("go-bitswap").Start(ctx, fmt.Sprintf("Bitswap.%s", name), opts...) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/message/message.go a/vendor/github.com/ipfs/go-libipfs/bitswap/message/message.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/message/message.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/message/message.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,500 @@ | |
+package message | |
+ | |
+import ( | |
+ "encoding/binary" | |
+ "errors" | |
+ "io" | |
+ | |
+ "github.com/ipfs/go-libipfs/bitswap/client/wantlist" | |
+ pb "github.com/ipfs/go-libipfs/bitswap/message/pb" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ pool "github.com/libp2p/go-buffer-pool" | |
+ msgio "github.com/libp2p/go-msgio" | |
+ | |
+ u "github.com/ipfs/go-ipfs-util" | |
+ "github.com/libp2p/go-libp2p/core/network" | |
+) | |
+ | |
+// BitSwapMessage is the basic interface for interacting building, encoding, | |
+// and decoding messages sent on the BitSwap protocol. | |
+type BitSwapMessage interface { | |
+ // Wantlist returns a slice of unique keys that represent data wanted by | |
+ // the sender. | |
+ Wantlist() []Entry | |
+ | |
+ // Blocks returns a slice of unique blocks. | |
+ Blocks() []blocks.Block | |
+ // BlockPresences returns the list of HAVE / DONT_HAVE in the message | |
+ BlockPresences() []BlockPresence | |
+ // Haves returns the Cids for each HAVE | |
+ Haves() []cid.Cid | |
+ // DontHaves returns the Cids for each DONT_HAVE | |
+ DontHaves() []cid.Cid | |
+ // PendingBytes returns the number of outstanding bytes of data that the | |
+ // engine has yet to send to the client (because they didn't fit in this | |
+ // message) | |
+ PendingBytes() int32 | |
+ | |
+ // AddEntry adds an entry to the Wantlist. | |
+ AddEntry(key cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int | |
+ | |
+ // Cancel adds a CANCEL for the given CID to the message | |
+ // Returns the size of the CANCEL entry in the protobuf | |
+ Cancel(key cid.Cid) int | |
+ | |
+ // Remove removes any entries for the given CID. Useful when the want | |
+ // status for the CID changes when preparing a message. | |
+ Remove(key cid.Cid) | |
+ | |
+ // Empty indicates whether the message has any information | |
+ Empty() bool | |
+ // Size returns the size of the message in bytes | |
+ Size() int | |
+ | |
+ // A full wantlist is an authoritative copy, a 'non-full' wantlist is a patch-set | |
+ Full() bool | |
+ | |
+ // AddBlock adds a block to the message | |
+ AddBlock(blocks.Block) | |
+ // AddBlockPresence adds a HAVE / DONT_HAVE for the given Cid to the message | |
+ AddBlockPresence(cid.Cid, pb.Message_BlockPresenceType) | |
+ // AddHave adds a HAVE for the given Cid to the message | |
+ AddHave(cid.Cid) | |
+ // AddDontHave adds a DONT_HAVE for the given Cid to the message | |
+ AddDontHave(cid.Cid) | |
+ // SetPendingBytes sets the number of bytes of data that are yet to be sent | |
+ // to the client (because they didn't fit in this message) | |
+ SetPendingBytes(int32) | |
+ Exportable | |
+ | |
+ Loggable() map[string]interface{} | |
+ | |
+ // Reset the values in the message back to defaults, so it can be reused | |
+ Reset(bool) | |
+ | |
+ // Clone the message fields | |
+ Clone() BitSwapMessage | |
+} | |
+ | |
+// Exportable is an interface for structures than can be | |
+// encoded in a bitswap protobuf. | |
+type Exportable interface { | |
+ // Note that older Bitswap versions use a different wire format, so we need | |
+ // to convert the message to the appropriate format depending on which | |
+ // version of the protocol the remote peer supports. | |
+ ToProtoV0() *pb.Message | |
+ ToProtoV1() *pb.Message | |
+ ToNetV0(w io.Writer) error | |
+ ToNetV1(w io.Writer) error | |
+} | |
+ | |
+// BlockPresence represents a HAVE / DONT_HAVE for a given Cid | |
+type BlockPresence struct { | |
+ Cid cid.Cid | |
+ Type pb.Message_BlockPresenceType | |
+} | |
+ | |
+// Entry is a wantlist entry in a Bitswap message, with flags indicating | |
+// - whether message is a cancel | |
+// - whether requester wants a DONT_HAVE message | |
+// - whether requester wants a HAVE message (instead of the block) | |
+type Entry struct { | |
+ wantlist.Entry | |
+ Cancel bool | |
+ SendDontHave bool | |
+} | |
+ | |
+// Get the size of the entry on the wire | |
+func (e *Entry) Size() int { | |
+ epb := e.ToPB() | |
+ return epb.Size() | |
+} | |
+ | |
+// Get the entry in protobuf form | |
+func (e *Entry) ToPB() pb.Message_Wantlist_Entry { | |
+ return pb.Message_Wantlist_Entry{ | |
+ Block: pb.Cid{Cid: e.Cid}, | |
+ Priority: int32(e.Priority), | |
+ Cancel: e.Cancel, | |
+ WantType: e.WantType, | |
+ SendDontHave: e.SendDontHave, | |
+ } | |
+} | |
+ | |
+var MaxEntrySize = maxEntrySize() | |
+ | |
+func maxEntrySize() int { | |
+ var maxInt32 int32 = (1 << 31) - 1 | |
+ | |
+ c := cid.NewCidV0(u.Hash([]byte("cid"))) | |
+ e := Entry{ | |
+ Entry: wantlist.Entry{ | |
+ Cid: c, | |
+ Priority: maxInt32, | |
+ WantType: pb.Message_Wantlist_Have, | |
+ }, | |
+ SendDontHave: true, // true takes up more space than false | |
+ Cancel: true, | |
+ } | |
+ return e.Size() | |
+} | |
+ | |
+type impl struct { | |
+ full bool | |
+ wantlist map[cid.Cid]*Entry | |
+ blocks map[cid.Cid]blocks.Block | |
+ blockPresences map[cid.Cid]pb.Message_BlockPresenceType | |
+ pendingBytes int32 | |
+} | |
+ | |
+// New returns a new, empty bitswap message | |
+func New(full bool) BitSwapMessage { | |
+ return newMsg(full) | |
+} | |
+ | |
+func newMsg(full bool) *impl { | |
+ return &impl{ | |
+ full: full, | |
+ wantlist: make(map[cid.Cid]*Entry), | |
+ blocks: make(map[cid.Cid]blocks.Block), | |
+ blockPresences: make(map[cid.Cid]pb.Message_BlockPresenceType), | |
+ } | |
+} | |
+ | |
+// Clone the message fields | |
+func (m *impl) Clone() BitSwapMessage { | |
+ msg := newMsg(m.full) | |
+ for k := range m.wantlist { | |
+ msg.wantlist[k] = m.wantlist[k] | |
+ } | |
+ for k := range m.blocks { | |
+ msg.blocks[k] = m.blocks[k] | |
+ } | |
+ for k := range m.blockPresences { | |
+ msg.blockPresences[k] = m.blockPresences[k] | |
+ } | |
+ msg.pendingBytes = m.pendingBytes | |
+ return msg | |
+} | |
+ | |
+// Reset the values in the message back to defaults, so it can be reused | |
+func (m *impl) Reset(full bool) { | |
+ m.full = full | |
+ for k := range m.wantlist { | |
+ delete(m.wantlist, k) | |
+ } | |
+ for k := range m.blocks { | |
+ delete(m.blocks, k) | |
+ } | |
+ for k := range m.blockPresences { | |
+ delete(m.blockPresences, k) | |
+ } | |
+ m.pendingBytes = 0 | |
+} | |
+ | |
+var errCidMissing = errors.New("missing cid") | |
+ | |
+func newMessageFromProto(pbm pb.Message) (BitSwapMessage, error) { | |
+ m := newMsg(pbm.Wantlist.Full) | |
+ for _, e := range pbm.Wantlist.Entries { | |
+ if !e.Block.Cid.Defined() { | |
+ return nil, errCidMissing | |
+ } | |
+ m.addEntry(e.Block.Cid, e.Priority, e.Cancel, e.WantType, e.SendDontHave) | |
+ } | |
+ | |
+ // deprecated | |
+ for _, d := range pbm.Blocks { | |
+ // CIDv0, sha256, protobuf only | |
+ b := blocks.NewBlock(d) | |
+ m.AddBlock(b) | |
+ } | |
+ // | |
+ | |
+ for _, b := range pbm.GetPayload() { | |
+ pref, err := cid.PrefixFromBytes(b.GetPrefix()) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ c, err := pref.Sum(b.GetData()) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ blk, err := blocks.NewBlockWithCid(b.GetData(), c) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ m.AddBlock(blk) | |
+ } | |
+ | |
+ for _, bi := range pbm.GetBlockPresences() { | |
+ if !bi.Cid.Cid.Defined() { | |
+ return nil, errCidMissing | |
+ } | |
+ m.AddBlockPresence(bi.Cid.Cid, bi.Type) | |
+ } | |
+ | |
+ m.pendingBytes = pbm.PendingBytes | |
+ | |
+ return m, nil | |
+} | |
+ | |
+func (m *impl) Full() bool { | |
+ return m.full | |
+} | |
+ | |
+func (m *impl) Empty() bool { | |
+ return len(m.blocks) == 0 && len(m.wantlist) == 0 && len(m.blockPresences) == 0 | |
+} | |
+ | |
+func (m *impl) Wantlist() []Entry { | |
+ out := make([]Entry, 0, len(m.wantlist)) | |
+ for _, e := range m.wantlist { | |
+ out = append(out, *e) | |
+ } | |
+ return out | |
+} | |
+ | |
+func (m *impl) Blocks() []blocks.Block { | |
+ bs := make([]blocks.Block, 0, len(m.blocks)) | |
+ for _, block := range m.blocks { | |
+ bs = append(bs, block) | |
+ } | |
+ return bs | |
+} | |
+ | |
+func (m *impl) BlockPresences() []BlockPresence { | |
+ bps := make([]BlockPresence, 0, len(m.blockPresences)) | |
+ for c, t := range m.blockPresences { | |
+ bps = append(bps, BlockPresence{c, t}) | |
+ } | |
+ return bps | |
+} | |
+ | |
+func (m *impl) Haves() []cid.Cid { | |
+ return m.getBlockPresenceByType(pb.Message_Have) | |
+} | |
+ | |
+func (m *impl) DontHaves() []cid.Cid { | |
+ return m.getBlockPresenceByType(pb.Message_DontHave) | |
+} | |
+ | |
+func (m *impl) getBlockPresenceByType(t pb.Message_BlockPresenceType) []cid.Cid { | |
+ cids := make([]cid.Cid, 0, len(m.blockPresences)) | |
+ for c, bpt := range m.blockPresences { | |
+ if bpt == t { | |
+ cids = append(cids, c) | |
+ } | |
+ } | |
+ return cids | |
+} | |
+ | |
+func (m *impl) PendingBytes() int32 { | |
+ return m.pendingBytes | |
+} | |
+ | |
+func (m *impl) SetPendingBytes(pendingBytes int32) { | |
+ m.pendingBytes = pendingBytes | |
+} | |
+ | |
+func (m *impl) Remove(k cid.Cid) { | |
+ delete(m.wantlist, k) | |
+} | |
+ | |
+func (m *impl) Cancel(k cid.Cid) int { | |
+ return m.addEntry(k, 0, true, pb.Message_Wantlist_Block, false) | |
+} | |
+ | |
+func (m *impl) AddEntry(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { | |
+ return m.addEntry(k, priority, false, wantType, sendDontHave) | |
+} | |
+ | |
+func (m *impl) addEntry(c cid.Cid, priority int32, cancel bool, wantType pb.Message_Wantlist_WantType, sendDontHave bool) int { | |
+ e, exists := m.wantlist[c] | |
+ if exists { | |
+ // Only change priority if want is of the same type | |
+ if e.WantType == wantType { | |
+ e.Priority = priority | |
+ } | |
+ // Only change from "dont cancel" to "do cancel" | |
+ if cancel { | |
+ e.Cancel = cancel | |
+ } | |
+ // Only change from "dont send" to "do send" DONT_HAVE | |
+ if sendDontHave { | |
+ e.SendDontHave = sendDontHave | |
+ } | |
+ // want-block overrides existing want-have | |
+ if wantType == pb.Message_Wantlist_Block && e.WantType == pb.Message_Wantlist_Have { | |
+ e.WantType = wantType | |
+ } | |
+ m.wantlist[c] = e | |
+ return 0 | |
+ } | |
+ | |
+ e = &Entry{ | |
+ Entry: wantlist.Entry{ | |
+ Cid: c, | |
+ Priority: priority, | |
+ WantType: wantType, | |
+ }, | |
+ SendDontHave: sendDontHave, | |
+ Cancel: cancel, | |
+ } | |
+ m.wantlist[c] = e | |
+ | |
+ return e.Size() | |
+} | |
+ | |
+func (m *impl) AddBlock(b blocks.Block) { | |
+ delete(m.blockPresences, b.Cid()) | |
+ m.blocks[b.Cid()] = b | |
+} | |
+ | |
+func (m *impl) AddBlockPresence(c cid.Cid, t pb.Message_BlockPresenceType) { | |
+ if _, ok := m.blocks[c]; ok { | |
+ return | |
+ } | |
+ m.blockPresences[c] = t | |
+} | |
+ | |
+func (m *impl) AddHave(c cid.Cid) { | |
+ m.AddBlockPresence(c, pb.Message_Have) | |
+} | |
+ | |
+func (m *impl) AddDontHave(c cid.Cid) { | |
+ m.AddBlockPresence(c, pb.Message_DontHave) | |
+} | |
+ | |
+func (m *impl) Size() int { | |
+ size := 0 | |
+ for _, block := range m.blocks { | |
+ size += len(block.RawData()) | |
+ } | |
+ for c := range m.blockPresences { | |
+ size += BlockPresenceSize(c) | |
+ } | |
+ for _, e := range m.wantlist { | |
+ size += e.Size() | |
+ } | |
+ | |
+ return size | |
+} | |
+ | |
+func BlockPresenceSize(c cid.Cid) int { | |
+ return (&pb.Message_BlockPresence{ | |
+ Cid: pb.Cid{Cid: c}, | |
+ Type: pb.Message_Have, | |
+ }).Size() | |
+} | |
+ | |
+// FromNet generates a new BitswapMessage from incoming data on an io.Reader. | |
+func FromNet(r io.Reader) (BitSwapMessage, error) { | |
+ reader := msgio.NewVarintReaderSize(r, network.MessageSizeMax) | |
+ return FromMsgReader(reader) | |
+} | |
+ | |
+// FromPBReader generates a new Bitswap message from a gogo-protobuf reader | |
+func FromMsgReader(r msgio.Reader) (BitSwapMessage, error) { | |
+ msg, err := r.ReadMsg() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ var pb pb.Message | |
+ err = pb.Unmarshal(msg) | |
+ r.ReleaseMsg(msg) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ return newMessageFromProto(pb) | |
+} | |
+ | |
+func (m *impl) ToProtoV0() *pb.Message { | |
+ pbm := new(pb.Message) | |
+ pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) | |
+ for _, e := range m.wantlist { | |
+ pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) | |
+ } | |
+ pbm.Wantlist.Full = m.full | |
+ | |
+ blocks := m.Blocks() | |
+ pbm.Blocks = make([][]byte, 0, len(blocks)) | |
+ for _, b := range blocks { | |
+ pbm.Blocks = append(pbm.Blocks, b.RawData()) | |
+ } | |
+ return pbm | |
+} | |
+ | |
+func (m *impl) ToProtoV1() *pb.Message { | |
+ pbm := new(pb.Message) | |
+ pbm.Wantlist.Entries = make([]pb.Message_Wantlist_Entry, 0, len(m.wantlist)) | |
+ for _, e := range m.wantlist { | |
+ pbm.Wantlist.Entries = append(pbm.Wantlist.Entries, e.ToPB()) | |
+ } | |
+ pbm.Wantlist.Full = m.full | |
+ | |
+ blocks := m.Blocks() | |
+ pbm.Payload = make([]pb.Message_Block, 0, len(blocks)) | |
+ for _, b := range blocks { | |
+ pbm.Payload = append(pbm.Payload, pb.Message_Block{ | |
+ Data: b.RawData(), | |
+ Prefix: b.Cid().Prefix().Bytes(), | |
+ }) | |
+ } | |
+ | |
+ pbm.BlockPresences = make([]pb.Message_BlockPresence, 0, len(m.blockPresences)) | |
+ for c, t := range m.blockPresences { | |
+ pbm.BlockPresences = append(pbm.BlockPresences, pb.Message_BlockPresence{ | |
+ Cid: pb.Cid{Cid: c}, | |
+ Type: t, | |
+ }) | |
+ } | |
+ | |
+ pbm.PendingBytes = m.PendingBytes() | |
+ | |
+ return pbm | |
+} | |
+ | |
+func (m *impl) ToNetV0(w io.Writer) error { | |
+ return write(w, m.ToProtoV0()) | |
+} | |
+ | |
+func (m *impl) ToNetV1(w io.Writer) error { | |
+ return write(w, m.ToProtoV1()) | |
+} | |
+ | |
+func write(w io.Writer, m *pb.Message) error { | |
+ size := m.Size() | |
+ | |
+ buf := pool.Get(size + binary.MaxVarintLen64) | |
+ defer pool.Put(buf) | |
+ | |
+ n := binary.PutUvarint(buf, uint64(size)) | |
+ | |
+ written, err := m.MarshalTo(buf[n:]) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ n += written | |
+ | |
+ _, err = w.Write(buf[:n]) | |
+ return err | |
+} | |
+ | |
+func (m *impl) Loggable() map[string]interface{} { | |
+ blocks := make([]string, 0, len(m.blocks)) | |
+ for _, v := range m.blocks { | |
+ blocks = append(blocks, v.Cid().String()) | |
+ } | |
+ return map[string]interface{}{ | |
+ "blocks": blocks, | |
+ "wants": m.Wantlist(), | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/cid.go a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/cid.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/cid.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/cid.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,44 @@ | |
+package bitswap_message_pb | |
+ | |
+import ( | |
+ "github.com/ipfs/go-cid" | |
+) | |
+ | |
+// NOTE: Don't "embed" the cid, wrap it like we're doing here. Otherwise, gogo | |
+// will try to use the Bytes() function. | |
+ | |
+// Cid is a custom type for CIDs in protobufs, that allows us to avoid | |
+// reallocating. | |
+type Cid struct { | |
+ Cid cid.Cid | |
+} | |
+ | |
+func (c Cid) Marshal() ([]byte, error) { | |
+ return c.Cid.Bytes(), nil | |
+} | |
+ | |
+func (c *Cid) MarshalTo(data []byte) (int, error) { | |
+ // intentionally using KeyString here to avoid allocating. | |
+ return copy(data[:c.Size()], c.Cid.KeyString()), nil | |
+} | |
+ | |
+func (c *Cid) Unmarshal(data []byte) (err error) { | |
+ c.Cid, err = cid.Cast(data) | |
+ return err | |
+} | |
+ | |
+func (c *Cid) Size() int { | |
+ return len(c.Cid.KeyString()) | |
+} | |
+ | |
+func (c Cid) MarshalJSON() ([]byte, error) { | |
+ return c.Cid.MarshalJSON() | |
+} | |
+ | |
+func (c *Cid) UnmarshalJSON(data []byte) error { | |
+ return c.Cid.UnmarshalJSON(data) | |
+} | |
+ | |
+func (c Cid) Equal(other Cid) bool { | |
+ return c.Cid.Equals(c.Cid) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/Makefile a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/Makefile | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/Makefile 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/Makefile 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,11 @@ | |
+PB = $(wildcard *.proto) | |
+GO = $(PB:.proto=.pb.go) | |
+ | |
+all: $(GO) | |
+ | |
+%.pb.go: %.proto | |
+ protoc --proto_path=$(GOPATH)/src:. --gogofaster_out=. $< | |
+ | |
+clean: | |
+ rm -f *.pb.go | |
+ rm -f *.go | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.pb.go a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.pb.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.pb.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.pb.go 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,1569 @@ | |
+// Code generated by protoc-gen-gogo. DO NOT EDIT. | |
+// source: message.proto | |
+ | |
+package bitswap_message_pb | |
+ | |
+import ( | |
+ fmt "fmt" | |
+ _ "github.com/gogo/protobuf/gogoproto" | |
+ proto "github.com/gogo/protobuf/proto" | |
+ io "io" | |
+ math "math" | |
+ math_bits "math/bits" | |
+) | |
+ | |
+// Reference imports to suppress errors if they are not otherwise used. | |
+var _ = proto.Marshal | |
+var _ = fmt.Errorf | |
+var _ = math.Inf | |
+ | |
+// This is a compile-time assertion to ensure that this generated file | |
+// is compatible with the proto package it is being compiled against. | |
+// A compilation error at this line likely means your copy of the | |
+// proto package needs to be updated. | |
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package | |
+ | |
+type Message_BlockPresenceType int32 | |
+ | |
+const ( | |
+ Message_Have Message_BlockPresenceType = 0 | |
+ Message_DontHave Message_BlockPresenceType = 1 | |
+) | |
+ | |
+var Message_BlockPresenceType_name = map[int32]string{ | |
+ 0: "Have", | |
+ 1: "DontHave", | |
+} | |
+ | |
+var Message_BlockPresenceType_value = map[string]int32{ | |
+ "Have": 0, | |
+ "DontHave": 1, | |
+} | |
+ | |
+func (x Message_BlockPresenceType) String() string { | |
+ return proto.EnumName(Message_BlockPresenceType_name, int32(x)) | |
+} | |
+ | |
+func (Message_BlockPresenceType) EnumDescriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} | |
+} | |
+ | |
+type Message_Wantlist_WantType int32 | |
+ | |
+const ( | |
+ Message_Wantlist_Block Message_Wantlist_WantType = 0 | |
+ Message_Wantlist_Have Message_Wantlist_WantType = 1 | |
+) | |
+ | |
+var Message_Wantlist_WantType_name = map[int32]string{ | |
+ 0: "Block", | |
+ 1: "Have", | |
+} | |
+ | |
+var Message_Wantlist_WantType_value = map[string]int32{ | |
+ "Block": 0, | |
+ "Have": 1, | |
+} | |
+ | |
+func (x Message_Wantlist_WantType) String() string { | |
+ return proto.EnumName(Message_Wantlist_WantType_name, int32(x)) | |
+} | |
+ | |
+func (Message_Wantlist_WantType) EnumDescriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} | |
+} | |
+ | |
+type Message struct { | |
+ Wantlist Message_Wantlist `protobuf:"bytes,1,opt,name=wantlist,proto3" json:"wantlist"` | |
+ Blocks [][]byte `protobuf:"bytes,2,rep,name=blocks,proto3" json:"blocks,omitempty"` | |
+ Payload []Message_Block `protobuf:"bytes,3,rep,name=payload,proto3" json:"payload"` | |
+ BlockPresences []Message_BlockPresence `protobuf:"bytes,4,rep,name=blockPresences,proto3" json:"blockPresences"` | |
+ PendingBytes int32 `protobuf:"varint,5,opt,name=pendingBytes,proto3" json:"pendingBytes,omitempty"` | |
+} | |
+ | |
+func (m *Message) Reset() { *m = Message{} } | |
+func (m *Message) String() string { return proto.CompactTextString(m) } | |
+func (*Message) ProtoMessage() {} | |
+func (*Message) Descriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0} | |
+} | |
+func (m *Message) XXX_Unmarshal(b []byte) error { | |
+ return m.Unmarshal(b) | |
+} | |
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
+ if deterministic { | |
+ return xxx_messageInfo_Message.Marshal(b, m, deterministic) | |
+ } else { | |
+ b = b[:cap(b)] | |
+ n, err := m.MarshalToSizedBuffer(b) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return b[:n], nil | |
+ } | |
+} | |
+func (m *Message) XXX_Merge(src proto.Message) { | |
+ xxx_messageInfo_Message.Merge(m, src) | |
+} | |
+func (m *Message) XXX_Size() int { | |
+ return m.Size() | |
+} | |
+func (m *Message) XXX_DiscardUnknown() { | |
+ xxx_messageInfo_Message.DiscardUnknown(m) | |
+} | |
+ | |
+var xxx_messageInfo_Message proto.InternalMessageInfo | |
+ | |
+func (m *Message) GetWantlist() Message_Wantlist { | |
+ if m != nil { | |
+ return m.Wantlist | |
+ } | |
+ return Message_Wantlist{} | |
+} | |
+ | |
+func (m *Message) GetBlocks() [][]byte { | |
+ if m != nil { | |
+ return m.Blocks | |
+ } | |
+ return nil | |
+} | |
+ | |
+func (m *Message) GetPayload() []Message_Block { | |
+ if m != nil { | |
+ return m.Payload | |
+ } | |
+ return nil | |
+} | |
+ | |
+func (m *Message) GetBlockPresences() []Message_BlockPresence { | |
+ if m != nil { | |
+ return m.BlockPresences | |
+ } | |
+ return nil | |
+} | |
+ | |
+func (m *Message) GetPendingBytes() int32 { | |
+ if m != nil { | |
+ return m.PendingBytes | |
+ } | |
+ return 0 | |
+} | |
+ | |
+type Message_Wantlist struct { | |
+ Entries []Message_Wantlist_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries"` | |
+ Full bool `protobuf:"varint,2,opt,name=full,proto3" json:"full,omitempty"` | |
+} | |
+ | |
+func (m *Message_Wantlist) Reset() { *m = Message_Wantlist{} } | |
+func (m *Message_Wantlist) String() string { return proto.CompactTextString(m) } | |
+func (*Message_Wantlist) ProtoMessage() {} | |
+func (*Message_Wantlist) Descriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0, 0} | |
+} | |
+func (m *Message_Wantlist) XXX_Unmarshal(b []byte) error { | |
+ return m.Unmarshal(b) | |
+} | |
+func (m *Message_Wantlist) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
+ if deterministic { | |
+ return xxx_messageInfo_Message_Wantlist.Marshal(b, m, deterministic) | |
+ } else { | |
+ b = b[:cap(b)] | |
+ n, err := m.MarshalToSizedBuffer(b) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return b[:n], nil | |
+ } | |
+} | |
+func (m *Message_Wantlist) XXX_Merge(src proto.Message) { | |
+ xxx_messageInfo_Message_Wantlist.Merge(m, src) | |
+} | |
+func (m *Message_Wantlist) XXX_Size() int { | |
+ return m.Size() | |
+} | |
+func (m *Message_Wantlist) XXX_DiscardUnknown() { | |
+ xxx_messageInfo_Message_Wantlist.DiscardUnknown(m) | |
+} | |
+ | |
+var xxx_messageInfo_Message_Wantlist proto.InternalMessageInfo | |
+ | |
+func (m *Message_Wantlist) GetEntries() []Message_Wantlist_Entry { | |
+ if m != nil { | |
+ return m.Entries | |
+ } | |
+ return nil | |
+} | |
+ | |
+func (m *Message_Wantlist) GetFull() bool { | |
+ if m != nil { | |
+ return m.Full | |
+ } | |
+ return false | |
+} | |
+ | |
+type Message_Wantlist_Entry struct { | |
+ Block Cid `protobuf:"bytes,1,opt,name=block,proto3,customtype=Cid" json:"block"` | |
+ Priority int32 `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"` | |
+ Cancel bool `protobuf:"varint,3,opt,name=cancel,proto3" json:"cancel,omitempty"` | |
+ WantType Message_Wantlist_WantType `protobuf:"varint,4,opt,name=wantType,proto3,enum=bitswap.message.pb.Message_Wantlist_WantType" json:"wantType,omitempty"` | |
+ SendDontHave bool `protobuf:"varint,5,opt,name=sendDontHave,proto3" json:"sendDontHave,omitempty"` | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) Reset() { *m = Message_Wantlist_Entry{} } | |
+func (m *Message_Wantlist_Entry) String() string { return proto.CompactTextString(m) } | |
+func (*Message_Wantlist_Entry) ProtoMessage() {} | |
+func (*Message_Wantlist_Entry) Descriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0, 0, 0} | |
+} | |
+func (m *Message_Wantlist_Entry) XXX_Unmarshal(b []byte) error { | |
+ return m.Unmarshal(b) | |
+} | |
+func (m *Message_Wantlist_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
+ if deterministic { | |
+ return xxx_messageInfo_Message_Wantlist_Entry.Marshal(b, m, deterministic) | |
+ } else { | |
+ b = b[:cap(b)] | |
+ n, err := m.MarshalToSizedBuffer(b) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return b[:n], nil | |
+ } | |
+} | |
+func (m *Message_Wantlist_Entry) XXX_Merge(src proto.Message) { | |
+ xxx_messageInfo_Message_Wantlist_Entry.Merge(m, src) | |
+} | |
+func (m *Message_Wantlist_Entry) XXX_Size() int { | |
+ return m.Size() | |
+} | |
+func (m *Message_Wantlist_Entry) XXX_DiscardUnknown() { | |
+ xxx_messageInfo_Message_Wantlist_Entry.DiscardUnknown(m) | |
+} | |
+ | |
+var xxx_messageInfo_Message_Wantlist_Entry proto.InternalMessageInfo | |
+ | |
+func (m *Message_Wantlist_Entry) GetPriority() int32 { | |
+ if m != nil { | |
+ return m.Priority | |
+ } | |
+ return 0 | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) GetCancel() bool { | |
+ if m != nil { | |
+ return m.Cancel | |
+ } | |
+ return false | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) GetWantType() Message_Wantlist_WantType { | |
+ if m != nil { | |
+ return m.WantType | |
+ } | |
+ return Message_Wantlist_Block | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) GetSendDontHave() bool { | |
+ if m != nil { | |
+ return m.SendDontHave | |
+ } | |
+ return false | |
+} | |
+ | |
+type Message_Block struct { | |
+ Prefix []byte `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` | |
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` | |
+} | |
+ | |
+func (m *Message_Block) Reset() { *m = Message_Block{} } | |
+func (m *Message_Block) String() string { return proto.CompactTextString(m) } | |
+func (*Message_Block) ProtoMessage() {} | |
+func (*Message_Block) Descriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0, 1} | |
+} | |
+func (m *Message_Block) XXX_Unmarshal(b []byte) error { | |
+ return m.Unmarshal(b) | |
+} | |
+func (m *Message_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
+ if deterministic { | |
+ return xxx_messageInfo_Message_Block.Marshal(b, m, deterministic) | |
+ } else { | |
+ b = b[:cap(b)] | |
+ n, err := m.MarshalToSizedBuffer(b) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return b[:n], nil | |
+ } | |
+} | |
+func (m *Message_Block) XXX_Merge(src proto.Message) { | |
+ xxx_messageInfo_Message_Block.Merge(m, src) | |
+} | |
+func (m *Message_Block) XXX_Size() int { | |
+ return m.Size() | |
+} | |
+func (m *Message_Block) XXX_DiscardUnknown() { | |
+ xxx_messageInfo_Message_Block.DiscardUnknown(m) | |
+} | |
+ | |
+var xxx_messageInfo_Message_Block proto.InternalMessageInfo | |
+ | |
+func (m *Message_Block) GetPrefix() []byte { | |
+ if m != nil { | |
+ return m.Prefix | |
+ } | |
+ return nil | |
+} | |
+ | |
+func (m *Message_Block) GetData() []byte { | |
+ if m != nil { | |
+ return m.Data | |
+ } | |
+ return nil | |
+} | |
+ | |
+type Message_BlockPresence struct { | |
+ Cid Cid `protobuf:"bytes,1,opt,name=cid,proto3,customtype=Cid" json:"cid"` | |
+ Type Message_BlockPresenceType `protobuf:"varint,2,opt,name=type,proto3,enum=bitswap.message.pb.Message_BlockPresenceType" json:"type,omitempty"` | |
+} | |
+ | |
+func (m *Message_BlockPresence) Reset() { *m = Message_BlockPresence{} } | |
+func (m *Message_BlockPresence) String() string { return proto.CompactTextString(m) } | |
+func (*Message_BlockPresence) ProtoMessage() {} | |
+func (*Message_BlockPresence) Descriptor() ([]byte, []int) { | |
+ return fileDescriptor_33c57e4bae7b9afd, []int{0, 2} | |
+} | |
+func (m *Message_BlockPresence) XXX_Unmarshal(b []byte) error { | |
+ return m.Unmarshal(b) | |
+} | |
+func (m *Message_BlockPresence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { | |
+ if deterministic { | |
+ return xxx_messageInfo_Message_BlockPresence.Marshal(b, m, deterministic) | |
+ } else { | |
+ b = b[:cap(b)] | |
+ n, err := m.MarshalToSizedBuffer(b) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return b[:n], nil | |
+ } | |
+} | |
+func (m *Message_BlockPresence) XXX_Merge(src proto.Message) { | |
+ xxx_messageInfo_Message_BlockPresence.Merge(m, src) | |
+} | |
+func (m *Message_BlockPresence) XXX_Size() int { | |
+ return m.Size() | |
+} | |
+func (m *Message_BlockPresence) XXX_DiscardUnknown() { | |
+ xxx_messageInfo_Message_BlockPresence.DiscardUnknown(m) | |
+} | |
+ | |
+var xxx_messageInfo_Message_BlockPresence proto.InternalMessageInfo | |
+ | |
+func (m *Message_BlockPresence) GetType() Message_BlockPresenceType { | |
+ if m != nil { | |
+ return m.Type | |
+ } | |
+ return Message_Have | |
+} | |
+ | |
+func init() { | |
+ proto.RegisterEnum("bitswap.message.pb.Message_BlockPresenceType", Message_BlockPresenceType_name, Message_BlockPresenceType_value) | |
+ proto.RegisterEnum("bitswap.message.pb.Message_Wantlist_WantType", Message_Wantlist_WantType_name, Message_Wantlist_WantType_value) | |
+ proto.RegisterType((*Message)(nil), "bitswap.message.pb.Message") | |
+ proto.RegisterType((*Message_Wantlist)(nil), "bitswap.message.pb.Message.Wantlist") | |
+ proto.RegisterType((*Message_Wantlist_Entry)(nil), "bitswap.message.pb.Message.Wantlist.Entry") | |
+ proto.RegisterType((*Message_Block)(nil), "bitswap.message.pb.Message.Block") | |
+ proto.RegisterType((*Message_BlockPresence)(nil), "bitswap.message.pb.Message.BlockPresence") | |
+} | |
+ | |
+func init() { proto.RegisterFile("message.proto", fileDescriptor_33c57e4bae7b9afd) } | |
+ | |
+var fileDescriptor_33c57e4bae7b9afd = []byte{ | |
+ // 497 bytes of a gzipped FileDescriptorProto | |
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xdf, 0x8a, 0xd3, 0x40, | |
+ 0x14, 0xc6, 0x33, 0x4d, 0xd2, 0xc6, 0xd3, 0xee, 0x52, 0xe7, 0x42, 0x42, 0xc0, 0x34, 0x5b, 0xbc, | |
+ 0x88, 0xca, 0x66, 0xa1, 0xfb, 0x04, 0x5b, 0xff, 0xa0, 0x82, 0x20, 0x83, 0xd0, 0xeb, 0xfc, 0x99, | |
+ 0xd6, 0xc1, 0x6c, 0x12, 0x33, 0x53, 0xd7, 0xbe, 0x85, 0x8f, 0xb5, 0x37, 0xc2, 0x5e, 0x8a, 0xca, | |
+ 0x22, 0xed, 0x8b, 0x48, 0x4e, 0xa6, 0x85, 0xba, 0xe2, 0xee, 0xdd, 0x9c, 0x33, 0xe7, 0xfb, 0x65, | |
+ 0xbe, 0xef, 0x10, 0x38, 0x38, 0xe7, 0x52, 0xc6, 0x0b, 0x1e, 0x55, 0x75, 0xa9, 0x4a, 0x4a, 0x13, | |
+ 0xa1, 0xe4, 0x45, 0x5c, 0x45, 0xbb, 0x76, 0xe2, 0x1d, 0x2f, 0x84, 0xfa, 0xb0, 0x4c, 0xa2, 0xb4, | |
+ 0x3c, 0x3f, 0x59, 0x94, 0x8b, 0xf2, 0x04, 0x47, 0x93, 0xe5, 0x1c, 0x2b, 0x2c, 0xf0, 0xd4, 0x22, | |
+ 0xc6, 0xbf, 0xba, 0xd0, 0x7b, 0xdb, 0xaa, 0xe9, 0x4b, 0x70, 0x2e, 0xe2, 0x42, 0xe5, 0x42, 0x2a, | |
+ 0x97, 0x04, 0x24, 0xec, 0x4f, 0x1e, 0x45, 0x37, 0xbf, 0x10, 0xe9, 0xf1, 0x68, 0xa6, 0x67, 0xa7, | |
+ 0xd6, 0xe5, 0xf5, 0xc8, 0x60, 0x3b, 0x2d, 0x7d, 0x00, 0xdd, 0x24, 0x2f, 0xd3, 0x8f, 0xd2, 0xed, | |
+ 0x04, 0x66, 0x38, 0x60, 0xba, 0xa2, 0x67, 0xd0, 0xab, 0xe2, 0x55, 0x5e, 0xc6, 0x99, 0x6b, 0x06, | |
+ 0x66, 0xd8, 0x9f, 0x1c, 0xfd, 0x0f, 0x3f, 0x6d, 0x44, 0x9a, 0xbd, 0xd5, 0xd1, 0x19, 0x1c, 0x22, | |
+ 0xec, 0x5d, 0xcd, 0x25, 0x2f, 0x52, 0x2e, 0x5d, 0x0b, 0x49, 0x8f, 0x6f, 0x25, 0x6d, 0x15, 0x9a, | |
+ 0xf8, 0x17, 0x86, 0x8e, 0x61, 0x50, 0xf1, 0x22, 0x13, 0xc5, 0x62, 0xba, 0x52, 0x5c, 0xba, 0x76, | |
+ 0x40, 0x42, 0x9b, 0xed, 0xf5, 0xbc, 0x9f, 0x1d, 0x70, 0xb6, 0xa6, 0xe9, 0x1b, 0xe8, 0xf1, 0x42, | |
+ 0xd5, 0x82, 0x4b, 0x97, 0xe0, 0x13, 0x9e, 0xdc, 0x25, 0xab, 0xe8, 0x45, 0xa1, 0xea, 0xd5, 0xd6, | |
+ 0x95, 0x06, 0x50, 0x0a, 0xd6, 0x7c, 0x99, 0xe7, 0x6e, 0x27, 0x20, 0xa1, 0xc3, 0xf0, 0xec, 0x7d, | |
+ 0x23, 0x60, 0xe3, 0x30, 0x3d, 0x02, 0x1b, 0x1f, 0x8b, 0x3b, 0x19, 0x4c, 0xfb, 0x8d, 0xf6, 0xc7, | |
+ 0xf5, 0xc8, 0x7c, 0x26, 0x32, 0xd6, 0xde, 0x50, 0x0f, 0x9c, 0xaa, 0x16, 0x65, 0x2d, 0xd4, 0x0a, | |
+ 0x21, 0x36, 0xdb, 0xd5, 0xcd, 0x36, 0xd2, 0xb8, 0x48, 0x79, 0xee, 0x9a, 0x88, 0xd7, 0x15, 0x7d, | |
+ 0xdd, 0x6e, 0xfb, 0xfd, 0xaa, 0xe2, 0xae, 0x15, 0x90, 0xf0, 0x70, 0x72, 0x7c, 0x27, 0x07, 0x33, | |
+ 0x2d, 0x62, 0x3b, 0x79, 0x13, 0x9e, 0xe4, 0x45, 0xf6, 0xbc, 0x2c, 0xd4, 0xab, 0xf8, 0x33, 0xc7, | |
+ 0xf0, 0x1c, 0xb6, 0xd7, 0x1b, 0x8f, 0xda, 0xec, 0x70, 0xfe, 0x1e, 0xd8, 0xb8, 0x93, 0xa1, 0x41, | |
+ 0x1d, 0xb0, 0x9a, 0xeb, 0x21, 0xf1, 0x4e, 0x75, 0xb3, 0x79, 0x70, 0x55, 0xf3, 0xb9, 0xf8, 0xd2, | |
+ 0x1a, 0x66, 0xba, 0x6a, 0x52, 0xca, 0x62, 0x15, 0xa3, 0xc1, 0x01, 0xc3, 0xb3, 0xf7, 0x09, 0x0e, | |
+ 0xf6, 0xb6, 0x4b, 0x1f, 0x82, 0x99, 0x8a, 0xec, 0x5f, 0x51, 0x35, 0x7d, 0x7a, 0x06, 0x96, 0x6a, | |
+ 0x0c, 0x77, 0x6e, 0x37, 0xbc, 0xc7, 0x45, 0xc3, 0x28, 0x1d, 0x3f, 0x85, 0xfb, 0x37, 0xae, 0x76, | |
+ 0x36, 0x0c, 0x3a, 0x00, 0x67, 0xeb, 0x79, 0x48, 0xa6, 0xee, 0xe5, 0xda, 0x27, 0x57, 0x6b, 0x9f, | |
+ 0xfc, 0x5e, 0xfb, 0xe4, 0xeb, 0xc6, 0x37, 0xae, 0x36, 0xbe, 0xf1, 0x7d, 0xe3, 0x1b, 0x49, 0x17, | |
+ 0xff, 0xbf, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x8a, 0xaf, 0x83, 0xd3, 0x03, 0x00, | |
+ 0x00, | |
+} | |
+ | |
+func (m *Message) Marshal() (dAtA []byte, err error) { | |
+ size := m.Size() | |
+ dAtA = make([]byte, size) | |
+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return dAtA[:n], nil | |
+} | |
+ | |
+func (m *Message) MarshalTo(dAtA []byte) (int, error) { | |
+ size := m.Size() | |
+ return m.MarshalToSizedBuffer(dAtA[:size]) | |
+} | |
+ | |
+func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
+ i := len(dAtA) | |
+ _ = i | |
+ var l int | |
+ _ = l | |
+ if m.PendingBytes != 0 { | |
+ i = encodeVarintMessage(dAtA, i, uint64(m.PendingBytes)) | |
+ i-- | |
+ dAtA[i] = 0x28 | |
+ } | |
+ if len(m.BlockPresences) > 0 { | |
+ for iNdEx := len(m.BlockPresences) - 1; iNdEx >= 0; iNdEx-- { | |
+ { | |
+ size, err := m.BlockPresences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ i -= size | |
+ i = encodeVarintMessage(dAtA, i, uint64(size)) | |
+ } | |
+ i-- | |
+ dAtA[i] = 0x22 | |
+ } | |
+ } | |
+ if len(m.Payload) > 0 { | |
+ for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { | |
+ { | |
+ size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ i -= size | |
+ i = encodeVarintMessage(dAtA, i, uint64(size)) | |
+ } | |
+ i-- | |
+ dAtA[i] = 0x1a | |
+ } | |
+ } | |
+ if len(m.Blocks) > 0 { | |
+ for iNdEx := len(m.Blocks) - 1; iNdEx >= 0; iNdEx-- { | |
+ i -= len(m.Blocks[iNdEx]) | |
+ copy(dAtA[i:], m.Blocks[iNdEx]) | |
+ i = encodeVarintMessage(dAtA, i, uint64(len(m.Blocks[iNdEx]))) | |
+ i-- | |
+ dAtA[i] = 0x12 | |
+ } | |
+ } | |
+ { | |
+ size, err := m.Wantlist.MarshalToSizedBuffer(dAtA[:i]) | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ i -= size | |
+ i = encodeVarintMessage(dAtA, i, uint64(size)) | |
+ } | |
+ i-- | |
+ dAtA[i] = 0xa | |
+ return len(dAtA) - i, nil | |
+} | |
+ | |
+func (m *Message_Wantlist) Marshal() (dAtA []byte, err error) { | |
+ size := m.Size() | |
+ dAtA = make([]byte, size) | |
+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return dAtA[:n], nil | |
+} | |
+ | |
+func (m *Message_Wantlist) MarshalTo(dAtA []byte) (int, error) { | |
+ size := m.Size() | |
+ return m.MarshalToSizedBuffer(dAtA[:size]) | |
+} | |
+ | |
+func (m *Message_Wantlist) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
+ i := len(dAtA) | |
+ _ = i | |
+ var l int | |
+ _ = l | |
+ if m.Full { | |
+ i-- | |
+ if m.Full { | |
+ dAtA[i] = 1 | |
+ } else { | |
+ dAtA[i] = 0 | |
+ } | |
+ i-- | |
+ dAtA[i] = 0x10 | |
+ } | |
+ if len(m.Entries) > 0 { | |
+ for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { | |
+ { | |
+ size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ i -= size | |
+ i = encodeVarintMessage(dAtA, i, uint64(size)) | |
+ } | |
+ i-- | |
+ dAtA[i] = 0xa | |
+ } | |
+ } | |
+ return len(dAtA) - i, nil | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) Marshal() (dAtA []byte, err error) { | |
+ size := m.Size() | |
+ dAtA = make([]byte, size) | |
+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return dAtA[:n], nil | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) MarshalTo(dAtA []byte) (int, error) { | |
+ size := m.Size() | |
+ return m.MarshalToSizedBuffer(dAtA[:size]) | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
+ i := len(dAtA) | |
+ _ = i | |
+ var l int | |
+ _ = l | |
+ if m.SendDontHave { | |
+ i-- | |
+ if m.SendDontHave { | |
+ dAtA[i] = 1 | |
+ } else { | |
+ dAtA[i] = 0 | |
+ } | |
+ i-- | |
+ dAtA[i] = 0x28 | |
+ } | |
+ if m.WantType != 0 { | |
+ i = encodeVarintMessage(dAtA, i, uint64(m.WantType)) | |
+ i-- | |
+ dAtA[i] = 0x20 | |
+ } | |
+ if m.Cancel { | |
+ i-- | |
+ if m.Cancel { | |
+ dAtA[i] = 1 | |
+ } else { | |
+ dAtA[i] = 0 | |
+ } | |
+ i-- | |
+ dAtA[i] = 0x18 | |
+ } | |
+ if m.Priority != 0 { | |
+ i = encodeVarintMessage(dAtA, i, uint64(m.Priority)) | |
+ i-- | |
+ dAtA[i] = 0x10 | |
+ } | |
+ { | |
+ size := m.Block.Size() | |
+ i -= size | |
+ if _, err := m.Block.MarshalTo(dAtA[i:]); err != nil { | |
+ return 0, err | |
+ } | |
+ i = encodeVarintMessage(dAtA, i, uint64(size)) | |
+ } | |
+ i-- | |
+ dAtA[i] = 0xa | |
+ return len(dAtA) - i, nil | |
+} | |
+ | |
+func (m *Message_Block) Marshal() (dAtA []byte, err error) { | |
+ size := m.Size() | |
+ dAtA = make([]byte, size) | |
+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return dAtA[:n], nil | |
+} | |
+ | |
+func (m *Message_Block) MarshalTo(dAtA []byte) (int, error) { | |
+ size := m.Size() | |
+ return m.MarshalToSizedBuffer(dAtA[:size]) | |
+} | |
+ | |
+func (m *Message_Block) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
+ i := len(dAtA) | |
+ _ = i | |
+ var l int | |
+ _ = l | |
+ if len(m.Data) > 0 { | |
+ i -= len(m.Data) | |
+ copy(dAtA[i:], m.Data) | |
+ i = encodeVarintMessage(dAtA, i, uint64(len(m.Data))) | |
+ i-- | |
+ dAtA[i] = 0x12 | |
+ } | |
+ if len(m.Prefix) > 0 { | |
+ i -= len(m.Prefix) | |
+ copy(dAtA[i:], m.Prefix) | |
+ i = encodeVarintMessage(dAtA, i, uint64(len(m.Prefix))) | |
+ i-- | |
+ dAtA[i] = 0xa | |
+ } | |
+ return len(dAtA) - i, nil | |
+} | |
+ | |
+func (m *Message_BlockPresence) Marshal() (dAtA []byte, err error) { | |
+ size := m.Size() | |
+ dAtA = make([]byte, size) | |
+ n, err := m.MarshalToSizedBuffer(dAtA[:size]) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return dAtA[:n], nil | |
+} | |
+ | |
+func (m *Message_BlockPresence) MarshalTo(dAtA []byte) (int, error) { | |
+ size := m.Size() | |
+ return m.MarshalToSizedBuffer(dAtA[:size]) | |
+} | |
+ | |
+func (m *Message_BlockPresence) MarshalToSizedBuffer(dAtA []byte) (int, error) { | |
+ i := len(dAtA) | |
+ _ = i | |
+ var l int | |
+ _ = l | |
+ if m.Type != 0 { | |
+ i = encodeVarintMessage(dAtA, i, uint64(m.Type)) | |
+ i-- | |
+ dAtA[i] = 0x10 | |
+ } | |
+ { | |
+ size := m.Cid.Size() | |
+ i -= size | |
+ if _, err := m.Cid.MarshalTo(dAtA[i:]); err != nil { | |
+ return 0, err | |
+ } | |
+ i = encodeVarintMessage(dAtA, i, uint64(size)) | |
+ } | |
+ i-- | |
+ dAtA[i] = 0xa | |
+ return len(dAtA) - i, nil | |
+} | |
+ | |
+func encodeVarintMessage(dAtA []byte, offset int, v uint64) int { | |
+ offset -= sovMessage(v) | |
+ base := offset | |
+ for v >= 1<<7 { | |
+ dAtA[offset] = uint8(v&0x7f | 0x80) | |
+ v >>= 7 | |
+ offset++ | |
+ } | |
+ dAtA[offset] = uint8(v) | |
+ return base | |
+} | |
+func (m *Message) Size() (n int) { | |
+ if m == nil { | |
+ return 0 | |
+ } | |
+ var l int | |
+ _ = l | |
+ l = m.Wantlist.Size() | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ if len(m.Blocks) > 0 { | |
+ for _, b := range m.Blocks { | |
+ l = len(b) | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ } | |
+ } | |
+ if len(m.Payload) > 0 { | |
+ for _, e := range m.Payload { | |
+ l = e.Size() | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ } | |
+ } | |
+ if len(m.BlockPresences) > 0 { | |
+ for _, e := range m.BlockPresences { | |
+ l = e.Size() | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ } | |
+ } | |
+ if m.PendingBytes != 0 { | |
+ n += 1 + sovMessage(uint64(m.PendingBytes)) | |
+ } | |
+ return n | |
+} | |
+ | |
+func (m *Message_Wantlist) Size() (n int) { | |
+ if m == nil { | |
+ return 0 | |
+ } | |
+ var l int | |
+ _ = l | |
+ if len(m.Entries) > 0 { | |
+ for _, e := range m.Entries { | |
+ l = e.Size() | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ } | |
+ } | |
+ if m.Full { | |
+ n += 2 | |
+ } | |
+ return n | |
+} | |
+ | |
+func (m *Message_Wantlist_Entry) Size() (n int) { | |
+ if m == nil { | |
+ return 0 | |
+ } | |
+ var l int | |
+ _ = l | |
+ l = m.Block.Size() | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ if m.Priority != 0 { | |
+ n += 1 + sovMessage(uint64(m.Priority)) | |
+ } | |
+ if m.Cancel { | |
+ n += 2 | |
+ } | |
+ if m.WantType != 0 { | |
+ n += 1 + sovMessage(uint64(m.WantType)) | |
+ } | |
+ if m.SendDontHave { | |
+ n += 2 | |
+ } | |
+ return n | |
+} | |
+ | |
+func (m *Message_Block) Size() (n int) { | |
+ if m == nil { | |
+ return 0 | |
+ } | |
+ var l int | |
+ _ = l | |
+ l = len(m.Prefix) | |
+ if l > 0 { | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ } | |
+ l = len(m.Data) | |
+ if l > 0 { | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ } | |
+ return n | |
+} | |
+ | |
+func (m *Message_BlockPresence) Size() (n int) { | |
+ if m == nil { | |
+ return 0 | |
+ } | |
+ var l int | |
+ _ = l | |
+ l = m.Cid.Size() | |
+ n += 1 + l + sovMessage(uint64(l)) | |
+ if m.Type != 0 { | |
+ n += 1 + sovMessage(uint64(m.Type)) | |
+ } | |
+ return n | |
+} | |
+ | |
+func sovMessage(x uint64) (n int) { | |
+ return (math_bits.Len64(x|1) + 6) / 7 | |
+} | |
+func sozMessage(x uint64) (n int) { | |
+ return sovMessage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) | |
+} | |
+func (m *Message) Unmarshal(dAtA []byte) error { | |
+ l := len(dAtA) | |
+ iNdEx := 0 | |
+ for iNdEx < l { | |
+ preIndex := iNdEx | |
+ var wire uint64 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ wire |= uint64(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ fieldNum := int32(wire >> 3) | |
+ wireType := int(wire & 0x7) | |
+ if wireType == 4 { | |
+ return fmt.Errorf("proto: Message: wiretype end group for non-group") | |
+ } | |
+ if fieldNum <= 0 { | |
+ return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) | |
+ } | |
+ switch fieldNum { | |
+ case 1: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Wantlist", wireType) | |
+ } | |
+ var msglen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ msglen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if msglen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + msglen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ if err := m.Wantlist.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
+ return err | |
+ } | |
+ iNdEx = postIndex | |
+ case 2: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) | |
+ } | |
+ var byteLen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ byteLen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if byteLen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + byteLen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ m.Blocks = append(m.Blocks, make([]byte, postIndex-iNdEx)) | |
+ copy(m.Blocks[len(m.Blocks)-1], dAtA[iNdEx:postIndex]) | |
+ iNdEx = postIndex | |
+ case 3: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) | |
+ } | |
+ var msglen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ msglen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if msglen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + msglen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ m.Payload = append(m.Payload, Message_Block{}) | |
+ if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
+ return err | |
+ } | |
+ iNdEx = postIndex | |
+ case 4: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field BlockPresences", wireType) | |
+ } | |
+ var msglen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ msglen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if msglen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + msglen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ m.BlockPresences = append(m.BlockPresences, Message_BlockPresence{}) | |
+ if err := m.BlockPresences[len(m.BlockPresences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
+ return err | |
+ } | |
+ iNdEx = postIndex | |
+ case 5: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field PendingBytes", wireType) | |
+ } | |
+ m.PendingBytes = 0 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ m.PendingBytes |= int32(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ default: | |
+ iNdEx = preIndex | |
+ skippy, err := skipMessage(dAtA[iNdEx:]) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ if (skippy < 0) || (iNdEx+skippy) < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if (iNdEx + skippy) > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ iNdEx += skippy | |
+ } | |
+ } | |
+ | |
+ if iNdEx > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ return nil | |
+} | |
+func (m *Message_Wantlist) Unmarshal(dAtA []byte) error { | |
+ l := len(dAtA) | |
+ iNdEx := 0 | |
+ for iNdEx < l { | |
+ preIndex := iNdEx | |
+ var wire uint64 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ wire |= uint64(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ fieldNum := int32(wire >> 3) | |
+ wireType := int(wire & 0x7) | |
+ if wireType == 4 { | |
+ return fmt.Errorf("proto: Wantlist: wiretype end group for non-group") | |
+ } | |
+ if fieldNum <= 0 { | |
+ return fmt.Errorf("proto: Wantlist: illegal tag %d (wire type %d)", fieldNum, wire) | |
+ } | |
+ switch fieldNum { | |
+ case 1: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) | |
+ } | |
+ var msglen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ msglen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if msglen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + msglen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ m.Entries = append(m.Entries, Message_Wantlist_Entry{}) | |
+ if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
+ return err | |
+ } | |
+ iNdEx = postIndex | |
+ case 2: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Full", wireType) | |
+ } | |
+ var v int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ v |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ m.Full = bool(v != 0) | |
+ default: | |
+ iNdEx = preIndex | |
+ skippy, err := skipMessage(dAtA[iNdEx:]) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ if (skippy < 0) || (iNdEx+skippy) < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if (iNdEx + skippy) > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ iNdEx += skippy | |
+ } | |
+ } | |
+ | |
+ if iNdEx > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ return nil | |
+} | |
+func (m *Message_Wantlist_Entry) Unmarshal(dAtA []byte) error { | |
+ l := len(dAtA) | |
+ iNdEx := 0 | |
+ for iNdEx < l { | |
+ preIndex := iNdEx | |
+ var wire uint64 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ wire |= uint64(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ fieldNum := int32(wire >> 3) | |
+ wireType := int(wire & 0x7) | |
+ if wireType == 4 { | |
+ return fmt.Errorf("proto: Entry: wiretype end group for non-group") | |
+ } | |
+ if fieldNum <= 0 { | |
+ return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) | |
+ } | |
+ switch fieldNum { | |
+ case 1: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) | |
+ } | |
+ var byteLen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ byteLen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if byteLen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + byteLen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ if err := m.Block.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
+ return err | |
+ } | |
+ iNdEx = postIndex | |
+ case 2: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) | |
+ } | |
+ m.Priority = 0 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ m.Priority |= int32(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ case 3: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Cancel", wireType) | |
+ } | |
+ var v int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ v |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ m.Cancel = bool(v != 0) | |
+ case 4: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field WantType", wireType) | |
+ } | |
+ m.WantType = 0 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ m.WantType |= Message_Wantlist_WantType(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ case 5: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field SendDontHave", wireType) | |
+ } | |
+ var v int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ v |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ m.SendDontHave = bool(v != 0) | |
+ default: | |
+ iNdEx = preIndex | |
+ skippy, err := skipMessage(dAtA[iNdEx:]) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ if (skippy < 0) || (iNdEx+skippy) < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if (iNdEx + skippy) > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ iNdEx += skippy | |
+ } | |
+ } | |
+ | |
+ if iNdEx > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ return nil | |
+} | |
+func (m *Message_Block) Unmarshal(dAtA []byte) error { | |
+ l := len(dAtA) | |
+ iNdEx := 0 | |
+ for iNdEx < l { | |
+ preIndex := iNdEx | |
+ var wire uint64 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ wire |= uint64(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ fieldNum := int32(wire >> 3) | |
+ wireType := int(wire & 0x7) | |
+ if wireType == 4 { | |
+ return fmt.Errorf("proto: Block: wiretype end group for non-group") | |
+ } | |
+ if fieldNum <= 0 { | |
+ return fmt.Errorf("proto: Block: illegal tag %d (wire type %d)", fieldNum, wire) | |
+ } | |
+ switch fieldNum { | |
+ case 1: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) | |
+ } | |
+ var byteLen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ byteLen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if byteLen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + byteLen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ m.Prefix = append(m.Prefix[:0], dAtA[iNdEx:postIndex]...) | |
+ if m.Prefix == nil { | |
+ m.Prefix = []byte{} | |
+ } | |
+ iNdEx = postIndex | |
+ case 2: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) | |
+ } | |
+ var byteLen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ byteLen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if byteLen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + byteLen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) | |
+ if m.Data == nil { | |
+ m.Data = []byte{} | |
+ } | |
+ iNdEx = postIndex | |
+ default: | |
+ iNdEx = preIndex | |
+ skippy, err := skipMessage(dAtA[iNdEx:]) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ if (skippy < 0) || (iNdEx+skippy) < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if (iNdEx + skippy) > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ iNdEx += skippy | |
+ } | |
+ } | |
+ | |
+ if iNdEx > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ return nil | |
+} | |
+func (m *Message_BlockPresence) Unmarshal(dAtA []byte) error { | |
+ l := len(dAtA) | |
+ iNdEx := 0 | |
+ for iNdEx < l { | |
+ preIndex := iNdEx | |
+ var wire uint64 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ wire |= uint64(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ fieldNum := int32(wire >> 3) | |
+ wireType := int(wire & 0x7) | |
+ if wireType == 4 { | |
+ return fmt.Errorf("proto: BlockPresence: wiretype end group for non-group") | |
+ } | |
+ if fieldNum <= 0 { | |
+ return fmt.Errorf("proto: BlockPresence: illegal tag %d (wire type %d)", fieldNum, wire) | |
+ } | |
+ switch fieldNum { | |
+ case 1: | |
+ if wireType != 2 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) | |
+ } | |
+ var byteLen int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ byteLen |= int(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if byteLen < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ postIndex := iNdEx + byteLen | |
+ if postIndex < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if postIndex > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ if err := m.Cid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { | |
+ return err | |
+ } | |
+ iNdEx = postIndex | |
+ case 2: | |
+ if wireType != 0 { | |
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) | |
+ } | |
+ m.Type = 0 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ m.Type |= Message_BlockPresenceType(b&0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ default: | |
+ iNdEx = preIndex | |
+ skippy, err := skipMessage(dAtA[iNdEx:]) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ if (skippy < 0) || (iNdEx+skippy) < 0 { | |
+ return ErrInvalidLengthMessage | |
+ } | |
+ if (iNdEx + skippy) > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ iNdEx += skippy | |
+ } | |
+ } | |
+ | |
+ if iNdEx > l { | |
+ return io.ErrUnexpectedEOF | |
+ } | |
+ return nil | |
+} | |
+func skipMessage(dAtA []byte) (n int, err error) { | |
+ l := len(dAtA) | |
+ iNdEx := 0 | |
+ depth := 0 | |
+ for iNdEx < l { | |
+ var wire uint64 | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return 0, ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return 0, io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ wire |= (uint64(b) & 0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ wireType := int(wire & 0x7) | |
+ switch wireType { | |
+ case 0: | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return 0, ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return 0, io.ErrUnexpectedEOF | |
+ } | |
+ iNdEx++ | |
+ if dAtA[iNdEx-1] < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ case 1: | |
+ iNdEx += 8 | |
+ case 2: | |
+ var length int | |
+ for shift := uint(0); ; shift += 7 { | |
+ if shift >= 64 { | |
+ return 0, ErrIntOverflowMessage | |
+ } | |
+ if iNdEx >= l { | |
+ return 0, io.ErrUnexpectedEOF | |
+ } | |
+ b := dAtA[iNdEx] | |
+ iNdEx++ | |
+ length |= (int(b) & 0x7F) << shift | |
+ if b < 0x80 { | |
+ break | |
+ } | |
+ } | |
+ if length < 0 { | |
+ return 0, ErrInvalidLengthMessage | |
+ } | |
+ iNdEx += length | |
+ case 3: | |
+ depth++ | |
+ case 4: | |
+ if depth == 0 { | |
+ return 0, ErrUnexpectedEndOfGroupMessage | |
+ } | |
+ depth-- | |
+ case 5: | |
+ iNdEx += 4 | |
+ default: | |
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType) | |
+ } | |
+ if iNdEx < 0 { | |
+ return 0, ErrInvalidLengthMessage | |
+ } | |
+ if depth == 0 { | |
+ return iNdEx, nil | |
+ } | |
+ } | |
+ return 0, io.ErrUnexpectedEOF | |
+} | |
+ | |
+var ( | |
+ ErrInvalidLengthMessage = fmt.Errorf("proto: negative length found during unmarshaling") | |
+ ErrIntOverflowMessage = fmt.Errorf("proto: integer overflow") | |
+ ErrUnexpectedEndOfGroupMessage = fmt.Errorf("proto: unexpected end of group") | |
+) | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.proto a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.proto | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.proto 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/message/pb/message.proto 2023-01-30 20:34:49.305464367 +0100 | |
@@ -0,0 +1,46 @@ | |
+syntax = "proto3"; | |
+ | |
+package bitswap.message.pb; | |
+ | |
+import "github.com/gogo/protobuf/gogoproto/gogo.proto"; | |
+ | |
+message Message { | |
+ | |
+ message Wantlist { | |
+ enum WantType { | |
+ Block = 0; | |
+ Have = 1; | |
+ } | |
+ | |
+ message Entry { | |
+ bytes block = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; // the block cid (cidV0 in bitswap 1.0.0, cidV1 in bitswap 1.1.0) | |
+ int32 priority = 2; // the priority (normalized). default to 1 | |
+ bool cancel = 3; // whether this revokes an entry | |
+ WantType wantType = 4; // Note: defaults to enum 0, ie Block | |
+ bool sendDontHave = 5; // Note: defaults to false | |
+ } | |
+ | |
+ repeated Entry entries = 1 [(gogoproto.nullable) = false]; // a list of wantlist entries | |
+ bool full = 2; // whether this is the full wantlist. default to false | |
+ } | |
+ | |
+ message Block { | |
+ bytes prefix = 1; // CID prefix (cid version, multicodec and multihash prefix (type + length) | |
+ bytes data = 2; | |
+ } | |
+ | |
+ enum BlockPresenceType { | |
+ Have = 0; | |
+ DontHave = 1; | |
+ } | |
+ message BlockPresence { | |
+ bytes cid = 1 [(gogoproto.customtype) = "Cid", (gogoproto.nullable) = false]; | |
+ BlockPresenceType type = 2; | |
+ } | |
+ | |
+ Wantlist wantlist = 1 [(gogoproto.nullable) = false]; | |
+ repeated bytes blocks = 2; // used to send Blocks in bitswap 1.0.0 | |
+ repeated Block payload = 3 [(gogoproto.nullable) = false]; // used to send Blocks in bitswap 1.1.0 | |
+ repeated BlockPresence blockPresences = 4 [(gogoproto.nullable) = false]; | |
+ int32 pendingBytes = 5; | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/metrics/metrics.go a/vendor/github.com/ipfs/go-libipfs/bitswap/metrics/metrics.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/metrics/metrics.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/metrics/metrics.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,46 @@ | |
+package metrics | |
+ | |
+import ( | |
+ "context" | |
+ | |
+ "github.com/ipfs/go-metrics-interface" | |
+) | |
+ | |
+var ( | |
+ // the 1<<18+15 is to observe old file chunks that are 1<<18 + 14 in size | |
+ metricsBuckets = []float64{1 << 6, 1 << 10, 1 << 14, 1 << 18, 1<<18 + 15, 1 << 22} | |
+ | |
+ timeMetricsBuckets = []float64{1, 10, 30, 60, 90, 120, 600} | |
+) | |
+ | |
+func DupHist(ctx context.Context) metrics.Histogram { | |
+ return metrics.NewCtx(ctx, "recv_dup_blocks_bytes", "Summary of duplicate data blocks recived").Histogram(metricsBuckets) | |
+} | |
+ | |
+func AllHist(ctx context.Context) metrics.Histogram { | |
+ return metrics.NewCtx(ctx, "recv_all_blocks_bytes", "Summary of all data blocks recived").Histogram(metricsBuckets) | |
+} | |
+ | |
+func SentHist(ctx context.Context) metrics.Histogram { | |
+ return metrics.NewCtx(ctx, "sent_all_blocks_bytes", "Histogram of blocks sent by this bitswap").Histogram(metricsBuckets) | |
+} | |
+ | |
+func SendTimeHist(ctx context.Context) metrics.Histogram { | |
+ return metrics.NewCtx(ctx, "send_times", "Histogram of how long it takes to send messages in this bitswap").Histogram(timeMetricsBuckets) | |
+} | |
+ | |
+func PendingEngineGauge(ctx context.Context) metrics.Gauge { | |
+ return metrics.NewCtx(ctx, "pending_tasks", "Total number of pending tasks").Gauge() | |
+} | |
+ | |
+func ActiveEngineGauge(ctx context.Context) metrics.Gauge { | |
+ return metrics.NewCtx(ctx, "active_tasks", "Total number of active tasks").Gauge() | |
+} | |
+ | |
+func PendingBlocksGauge(ctx context.Context) metrics.Gauge { | |
+ return metrics.NewCtx(ctx, "pending_block_tasks", "Total number of pending blockstore tasks").Gauge() | |
+} | |
+ | |
+func ActiveBlocksGauge(ctx context.Context) metrics.Gauge { | |
+ return metrics.NewCtx(ctx, "active_block_tasks", "Total number of active blockstore tasks").Gauge() | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/network/connecteventmanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/network/connecteventmanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/network/connecteventmanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/network/connecteventmanager.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,218 @@ | |
+package network | |
+ | |
+import ( | |
+ "sync" | |
+ | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+type ConnectionListener interface { | |
+ PeerConnected(peer.ID) | |
+ PeerDisconnected(peer.ID) | |
+} | |
+ | |
+type state byte | |
+ | |
+const ( | |
+ stateDisconnected = iota | |
+ stateResponsive | |
+ stateUnresponsive | |
+) | |
+ | |
+type connectEventManager struct { | |
+ connListeners []ConnectionListener | |
+ lk sync.RWMutex | |
+ cond sync.Cond | |
+ peers map[peer.ID]*peerState | |
+ | |
+ changeQueue []peer.ID | |
+ stop bool | |
+ done chan struct{} | |
+} | |
+ | |
+type peerState struct { | |
+ newState, curState state | |
+ pending bool | |
+} | |
+ | |
+func newConnectEventManager(connListeners ...ConnectionListener) *connectEventManager { | |
+ evtManager := &connectEventManager{ | |
+ connListeners: connListeners, | |
+ peers: make(map[peer.ID]*peerState), | |
+ done: make(chan struct{}), | |
+ } | |
+ evtManager.cond = sync.Cond{L: &evtManager.lk} | |
+ return evtManager | |
+} | |
+ | |
+func (c *connectEventManager) Start() { | |
+ go c.worker() | |
+} | |
+ | |
+func (c *connectEventManager) Stop() { | |
+ c.lk.Lock() | |
+ c.stop = true | |
+ c.lk.Unlock() | |
+ c.cond.Broadcast() | |
+ | |
+ <-c.done | |
+} | |
+ | |
+func (c *connectEventManager) getState(p peer.ID) state { | |
+ if state, ok := c.peers[p]; ok { | |
+ return state.newState | |
+ } else { | |
+ return stateDisconnected | |
+ } | |
+} | |
+ | |
+func (c *connectEventManager) setState(p peer.ID, newState state) { | |
+ state, ok := c.peers[p] | |
+ if !ok { | |
+ state = new(peerState) | |
+ c.peers[p] = state | |
+ } | |
+ state.newState = newState | |
+ if !state.pending && state.newState != state.curState { | |
+ state.pending = true | |
+ c.changeQueue = append(c.changeQueue, p) | |
+ c.cond.Broadcast() | |
+ } | |
+} | |
+ | |
+// Waits for a change to be enqueued, or for the event manager to be stopped. Returns false if the | |
+// connect event manager has been stopped. | |
+func (c *connectEventManager) waitChange() bool { | |
+ for !c.stop && len(c.changeQueue) == 0 { | |
+ c.cond.Wait() | |
+ } | |
+ return !c.stop | |
+} | |
+ | |
+func (c *connectEventManager) worker() { | |
+ c.lk.Lock() | |
+ defer c.lk.Unlock() | |
+ defer close(c.done) | |
+ | |
+ for c.waitChange() { | |
+ pid := c.changeQueue[0] | |
+ c.changeQueue[0] = peer.ID("") // free the peer ID (slicing won't do that) | |
+ c.changeQueue = c.changeQueue[1:] | |
+ | |
+ state, ok := c.peers[pid] | |
+ // If we've disconnected and forgotten, continue. | |
+ if !ok { | |
+ // This shouldn't be possible because _this_ thread is responsible for | |
+ // removing peers from this map, and we shouldn't get duplicate entries in | |
+ // the change queue. | |
+ log.Error("a change was enqueued for a peer we're not tracking") | |
+ continue | |
+ } | |
+ | |
+ // Record the fact that this "state" is no longer in the queue. | |
+ state.pending = false | |
+ | |
+ // Then, if there's nothing to do, continue. | |
+ if state.curState == state.newState { | |
+ continue | |
+ } | |
+ | |
+ // Or record the state update, then apply it. | |
+ oldState := state.curState | |
+ state.curState = state.newState | |
+ | |
+ switch state.newState { | |
+ case stateDisconnected: | |
+ delete(c.peers, pid) | |
+ fallthrough | |
+ case stateUnresponsive: | |
+ // Only trigger a disconnect event if the peer was responsive. | |
+ // We could be transitioning from unresponsive to disconnected. | |
+ if oldState == stateResponsive { | |
+ c.lk.Unlock() | |
+ for _, v := range c.connListeners { | |
+ v.PeerDisconnected(pid) | |
+ } | |
+ c.lk.Lock() | |
+ } | |
+ case stateResponsive: | |
+ c.lk.Unlock() | |
+ for _, v := range c.connListeners { | |
+ v.PeerConnected(pid) | |
+ } | |
+ c.lk.Lock() | |
+ } | |
+ } | |
+} | |
+ | |
+// Called whenever we receive a new connection. May be called many times. | |
+func (c *connectEventManager) Connected(p peer.ID) { | |
+ c.lk.Lock() | |
+ defer c.lk.Unlock() | |
+ | |
+ // !responsive -> responsive | |
+ | |
+ if c.getState(p) == stateResponsive { | |
+ return | |
+ } | |
+ c.setState(p, stateResponsive) | |
+} | |
+ | |
+// Called when we drop the final connection to a peer. | |
+func (c *connectEventManager) Disconnected(p peer.ID) { | |
+ c.lk.Lock() | |
+ defer c.lk.Unlock() | |
+ | |
+ // !disconnected -> disconnected | |
+ | |
+ if c.getState(p) == stateDisconnected { | |
+ return | |
+ } | |
+ | |
+ c.setState(p, stateDisconnected) | |
+} | |
+ | |
+// Called whenever a peer is unresponsive. | |
+func (c *connectEventManager) MarkUnresponsive(p peer.ID) { | |
+ c.lk.Lock() | |
+ defer c.lk.Unlock() | |
+ | |
+ // responsive -> unresponsive | |
+ | |
+ if c.getState(p) != stateResponsive { | |
+ return | |
+ } | |
+ | |
+ c.setState(p, stateUnresponsive) | |
+} | |
+ | |
+// Called whenever we receive a message from a peer. | |
+// | |
+// - When we're connected to the peer, this will mark the peer as responsive (from unresponsive). | |
+// - When not connected, we ignore this call. Unfortunately, a peer may disconnect before we process | |
+// | |
+// the "on message" event, so we can't treat this as evidence of a connection. | |
+func (c *connectEventManager) OnMessage(p peer.ID) { | |
+ c.lk.RLock() | |
+ unresponsive := c.getState(p) == stateUnresponsive | |
+ c.lk.RUnlock() | |
+ | |
+ // Only continue if both connected, and unresponsive. | |
+ if !unresponsive { | |
+ return | |
+ } | |
+ | |
+ // unresponsive -> responsive | |
+ | |
+ // We need to make a modification so now take a write lock | |
+ c.lk.Lock() | |
+ defer c.lk.Unlock() | |
+ | |
+ // Note: state may have changed in the time between when read lock | |
+ // was released and write lock taken, so check again | |
+ if c.getState(p) != stateUnresponsive { | |
+ return | |
+ } | |
+ | |
+ c.setState(p, stateResponsive) | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/network/interface.go a/vendor/github.com/ipfs/go-libipfs/bitswap/network/interface.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/network/interface.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/network/interface.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,111 @@ | |
+package network | |
+ | |
+import ( | |
+ "context" | |
+ "time" | |
+ | |
+ bsmsg "github.com/ipfs/go-libipfs/bitswap/message" | |
+ "github.com/ipfs/go-libipfs/bitswap/network/internal" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ | |
+ "github.com/libp2p/go-libp2p/core/connmgr" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
+) | |
+ | |
+var ( | |
+ // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol | |
+ ProtocolBitswapNoVers = internal.ProtocolBitswapNoVers | |
+ // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol | |
+ ProtocolBitswapOneZero = internal.ProtocolBitswapOneZero | |
+ // ProtocolBitswapOneOne is the the prefix for version 1.1.0 | |
+ ProtocolBitswapOneOne = internal.ProtocolBitswapOneOne | |
+ // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 | |
+ ProtocolBitswap = internal.ProtocolBitswap | |
+) | |
+ | |
+// BitSwapNetwork provides network connectivity for BitSwap sessions. | |
+type BitSwapNetwork interface { | |
+ Self() peer.ID | |
+ | |
+ // SendMessage sends a BitSwap message to a peer. | |
+ SendMessage( | |
+ context.Context, | |
+ peer.ID, | |
+ bsmsg.BitSwapMessage) error | |
+ | |
+ // Start registers the Reciver and starts handling new messages, connectivity events, etc. | |
+ Start(...Receiver) | |
+ // Stop stops the network service. | |
+ Stop() | |
+ | |
+ ConnectTo(context.Context, peer.ID) error | |
+ DisconnectFrom(context.Context, peer.ID) error | |
+ | |
+ NewMessageSender(context.Context, peer.ID, *MessageSenderOpts) (MessageSender, error) | |
+ | |
+ ConnectionManager() connmgr.ConnManager | |
+ | |
+ Stats() Stats | |
+ | |
+ Routing | |
+ | |
+ Pinger | |
+} | |
+ | |
+// MessageSender is an interface for sending a series of messages over the bitswap | |
+// network | |
+type MessageSender interface { | |
+ SendMsg(context.Context, bsmsg.BitSwapMessage) error | |
+ Close() error | |
+ Reset() error | |
+ // Indicates whether the remote peer supports HAVE / DONT_HAVE messages | |
+ SupportsHave() bool | |
+} | |
+ | |
+type MessageSenderOpts struct { | |
+ MaxRetries int | |
+ SendTimeout time.Duration | |
+ SendErrorBackoff time.Duration | |
+} | |
+ | |
+// Receiver is an interface that can receive messages from the BitSwapNetwork. | |
+type Receiver interface { | |
+ ReceiveMessage( | |
+ ctx context.Context, | |
+ sender peer.ID, | |
+ incoming bsmsg.BitSwapMessage) | |
+ | |
+ ReceiveError(error) | |
+ | |
+ // Connected/Disconnected warns bitswap about peer connections. | |
+ PeerConnected(peer.ID) | |
+ PeerDisconnected(peer.ID) | |
+} | |
+ | |
+// Routing is an interface to providing and finding providers on a bitswap | |
+// network. | |
+type Routing interface { | |
+ // FindProvidersAsync returns a channel of providers for the given key. | |
+ FindProvidersAsync(context.Context, cid.Cid, int) <-chan peer.ID | |
+ | |
+ // Provide provides the key to the network. | |
+ Provide(context.Context, cid.Cid) error | |
+} | |
+ | |
+// Pinger is an interface to ping a peer and get the average latency of all pings | |
+type Pinger interface { | |
+ // Ping a peer | |
+ Ping(context.Context, peer.ID) ping.Result | |
+ // Get the average latency of all pings | |
+ Latency(peer.ID) time.Duration | |
+} | |
+ | |
+// Stats is a container for statistics about the bitswap network | |
+// the numbers inside are specific to bitswap, and not any other protocols | |
+// using the same underlying network. | |
+type Stats struct { | |
+ MessagesSent uint64 | |
+ MessagesRecvd uint64 | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/network/internal/default.go a/vendor/github.com/ipfs/go-libipfs/bitswap/network/internal/default.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/network/internal/default.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/network/internal/default.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,23 @@ | |
+package internal | |
+ | |
+import ( | |
+ "github.com/libp2p/go-libp2p/core/protocol" | |
+) | |
+ | |
+var ( | |
+ // ProtocolBitswapNoVers is equivalent to the legacy bitswap protocol | |
+ ProtocolBitswapNoVers protocol.ID = "/ipfs/bitswap" | |
+ // ProtocolBitswapOneZero is the prefix for the legacy bitswap protocol | |
+ ProtocolBitswapOneZero protocol.ID = "/ipfs/bitswap/1.0.0" | |
+ // ProtocolBitswapOneOne is the the prefix for version 1.1.0 | |
+ ProtocolBitswapOneOne protocol.ID = "/ipfs/bitswap/1.1.0" | |
+ // ProtocolBitswap is the current version of the bitswap protocol: 1.2.0 | |
+ ProtocolBitswap protocol.ID = "/ipfs/bitswap/1.2.0" | |
+) | |
+ | |
+var DefaultProtocols = []protocol.ID{ | |
+ ProtocolBitswap, | |
+ ProtocolBitswapOneOne, | |
+ ProtocolBitswapOneZero, | |
+ ProtocolBitswapNoVers, | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/network/ipfs_impl.go a/vendor/github.com/ipfs/go-libipfs/bitswap/network/ipfs_impl.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/network/ipfs_impl.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/network/ipfs_impl.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,472 @@ | |
+package network | |
+ | |
+import ( | |
+ "context" | |
+ "errors" | |
+ "fmt" | |
+ "io" | |
+ "sync/atomic" | |
+ "time" | |
+ | |
+ bsmsg "github.com/ipfs/go-libipfs/bitswap/message" | |
+ "github.com/ipfs/go-libipfs/bitswap/network/internal" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ logging "github.com/ipfs/go-log" | |
+ "github.com/libp2p/go-libp2p/core/connmgr" | |
+ "github.com/libp2p/go-libp2p/core/host" | |
+ "github.com/libp2p/go-libp2p/core/network" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+ peerstore "github.com/libp2p/go-libp2p/core/peerstore" | |
+ "github.com/libp2p/go-libp2p/core/protocol" | |
+ "github.com/libp2p/go-libp2p/core/routing" | |
+ "github.com/libp2p/go-libp2p/p2p/protocol/ping" | |
+ msgio "github.com/libp2p/go-msgio" | |
+ ma "github.com/multiformats/go-multiaddr" | |
+ "github.com/multiformats/go-multistream" | |
+) | |
+ | |
+var log = logging.Logger("bitswap_network") | |
+ | |
+var connectTimeout = time.Second * 5 | |
+ | |
+var maxSendTimeout = 2 * time.Minute | |
+var minSendTimeout = 10 * time.Second | |
+var sendLatency = 2 * time.Second | |
+var minSendRate = (100 * 1000) / 8 // 100kbit/s | |
+ | |
+// NewFromIpfsHost returns a BitSwapNetwork supported by underlying IPFS host. | |
+func NewFromIpfsHost(host host.Host, r routing.ContentRouting, opts ...NetOpt) BitSwapNetwork { | |
+ s := processSettings(opts...) | |
+ | |
+ bitswapNetwork := impl{ | |
+ host: host, | |
+ routing: r, | |
+ | |
+ protocolBitswapNoVers: s.ProtocolPrefix + ProtocolBitswapNoVers, | |
+ protocolBitswapOneZero: s.ProtocolPrefix + ProtocolBitswapOneZero, | |
+ protocolBitswapOneOne: s.ProtocolPrefix + ProtocolBitswapOneOne, | |
+ protocolBitswap: s.ProtocolPrefix + ProtocolBitswap, | |
+ | |
+ supportedProtocols: s.SupportedProtocols, | |
+ } | |
+ | |
+ return &bitswapNetwork | |
+} | |
+ | |
+func processSettings(opts ...NetOpt) Settings { | |
+ s := Settings{SupportedProtocols: append([]protocol.ID(nil), internal.DefaultProtocols...)} | |
+ for _, opt := range opts { | |
+ opt(&s) | |
+ } | |
+ for i, proto := range s.SupportedProtocols { | |
+ s.SupportedProtocols[i] = s.ProtocolPrefix + proto | |
+ } | |
+ return s | |
+} | |
+ | |
+// impl transforms the ipfs network interface, which sends and receives | |
+// NetMessage objects, into the bitswap network interface. | |
+type impl struct { | |
+ // NOTE: Stats must be at the top of the heap allocation to ensure 64bit | |
+ // alignment. | |
+ stats Stats | |
+ | |
+ host host.Host | |
+ routing routing.ContentRouting | |
+ connectEvtMgr *connectEventManager | |
+ | |
+ protocolBitswapNoVers protocol.ID | |
+ protocolBitswapOneZero protocol.ID | |
+ protocolBitswapOneOne protocol.ID | |
+ protocolBitswap protocol.ID | |
+ | |
+ supportedProtocols []protocol.ID | |
+ | |
+ // inbound messages from the network are forwarded to the receiver | |
+ receivers []Receiver | |
+} | |
+ | |
+type streamMessageSender struct { | |
+ to peer.ID | |
+ stream network.Stream | |
+ connected bool | |
+ bsnet *impl | |
+ opts *MessageSenderOpts | |
+} | |
+ | |
+// Open a stream to the remote peer | |
+func (s *streamMessageSender) Connect(ctx context.Context) (network.Stream, error) { | |
+ if s.connected { | |
+ return s.stream, nil | |
+ } | |
+ | |
+ tctx, cancel := context.WithTimeout(ctx, s.opts.SendTimeout) | |
+ defer cancel() | |
+ | |
+ if err := s.bsnet.ConnectTo(tctx, s.to); err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ stream, err := s.bsnet.newStreamToPeer(tctx, s.to) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ s.stream = stream | |
+ s.connected = true | |
+ return s.stream, nil | |
+} | |
+ | |
+// Reset the stream | |
+func (s *streamMessageSender) Reset() error { | |
+ if s.stream != nil { | |
+ err := s.stream.Reset() | |
+ s.connected = false | |
+ return err | |
+ } | |
+ return nil | |
+} | |
+ | |
+// Close the stream | |
+func (s *streamMessageSender) Close() error { | |
+ return s.stream.Close() | |
+} | |
+ | |
+// Indicates whether the peer supports HAVE / DONT_HAVE messages | |
+func (s *streamMessageSender) SupportsHave() bool { | |
+ return s.bsnet.SupportsHave(s.stream.Protocol()) | |
+} | |
+ | |
+// Send a message to the peer, attempting multiple times | |
+func (s *streamMessageSender) SendMsg(ctx context.Context, msg bsmsg.BitSwapMessage) error { | |
+ return s.multiAttempt(ctx, func() error { | |
+ return s.send(ctx, msg) | |
+ }) | |
+} | |
+ | |
+// Perform a function with multiple attempts, and a timeout | |
+func (s *streamMessageSender) multiAttempt(ctx context.Context, fn func() error) error { | |
+ // Try to call the function repeatedly | |
+ var err error | |
+ for i := 0; i < s.opts.MaxRetries; i++ { | |
+ if err = fn(); err == nil { | |
+ // Attempt was successful | |
+ return nil | |
+ } | |
+ | |
+ // Attempt failed | |
+ | |
+ // If the sender has been closed or the context cancelled, just bail out | |
+ select { | |
+ case <-ctx.Done(): | |
+ return ctx.Err() | |
+ default: | |
+ } | |
+ | |
+ // Protocol is not supported, so no need to try multiple times | |
+ if errors.Is(err, multistream.ErrNotSupported) { | |
+ s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) | |
+ return err | |
+ } | |
+ | |
+ // Failed to send so reset stream and try again | |
+ _ = s.Reset() | |
+ | |
+ // Failed too many times so mark the peer as unresponsive and return an error | |
+ if i == s.opts.MaxRetries-1 { | |
+ s.bsnet.connectEvtMgr.MarkUnresponsive(s.to) | |
+ return err | |
+ } | |
+ | |
+ select { | |
+ case <-ctx.Done(): | |
+ return ctx.Err() | |
+ case <-time.After(s.opts.SendErrorBackoff): | |
+ // wait a short time in case disconnect notifications are still propagating | |
+ log.Infof("send message to %s failed but context was not Done: %s", s.to, err) | |
+ } | |
+ } | |
+ return err | |
+} | |
+ | |
+// Send a message to the peer | |
+func (s *streamMessageSender) send(ctx context.Context, msg bsmsg.BitSwapMessage) error { | |
+ start := time.Now() | |
+ stream, err := s.Connect(ctx) | |
+ if err != nil { | |
+ log.Infof("failed to open stream to %s: %s", s.to, err) | |
+ return err | |
+ } | |
+ | |
+ // The send timeout includes the time required to connect | |
+ // (although usually we will already have connected - we only need to | |
+ // connect after a failed attempt to send) | |
+ timeout := s.opts.SendTimeout - time.Since(start) | |
+ if err = s.bsnet.msgToStream(ctx, stream, msg, timeout); err != nil { | |
+ log.Infof("failed to send message to %s: %s", s.to, err) | |
+ return err | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
+func (bsnet *impl) Self() peer.ID { | |
+ return bsnet.host.ID() | |
+} | |
+ | |
+func (bsnet *impl) Ping(ctx context.Context, p peer.ID) ping.Result { | |
+ ctx, cancel := context.WithCancel(ctx) | |
+ defer cancel() | |
+ res := <-ping.Ping(ctx, bsnet.host, p) | |
+ return res | |
+} | |
+ | |
+func (bsnet *impl) Latency(p peer.ID) time.Duration { | |
+ return bsnet.host.Peerstore().LatencyEWMA(p) | |
+} | |
+ | |
+// Indicates whether the given protocol supports HAVE / DONT_HAVE messages | |
+func (bsnet *impl) SupportsHave(proto protocol.ID) bool { | |
+ switch proto { | |
+ case bsnet.protocolBitswapOneOne, bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: | |
+ return false | |
+ } | |
+ return true | |
+} | |
+ | |
+func (bsnet *impl) msgToStream(ctx context.Context, s network.Stream, msg bsmsg.BitSwapMessage, timeout time.Duration) error { | |
+ deadline := time.Now().Add(timeout) | |
+ if dl, ok := ctx.Deadline(); ok && dl.Before(deadline) { | |
+ deadline = dl | |
+ } | |
+ | |
+ if err := s.SetWriteDeadline(deadline); err != nil { | |
+ log.Warnf("error setting deadline: %s", err) | |
+ } | |
+ | |
+ // Older Bitswap versions use a slightly different wire format so we need | |
+ // to convert the message to the appropriate format depending on the remote | |
+ // peer's Bitswap version. | |
+ switch s.Protocol() { | |
+ case bsnet.protocolBitswapOneOne, bsnet.protocolBitswap: | |
+ if err := msg.ToNetV1(s); err != nil { | |
+ log.Debugf("error: %s", err) | |
+ return err | |
+ } | |
+ case bsnet.protocolBitswapOneZero, bsnet.protocolBitswapNoVers: | |
+ if err := msg.ToNetV0(s); err != nil { | |
+ log.Debugf("error: %s", err) | |
+ return err | |
+ } | |
+ default: | |
+ return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) | |
+ } | |
+ | |
+ atomic.AddUint64(&bsnet.stats.MessagesSent, 1) | |
+ | |
+ if err := s.SetWriteDeadline(time.Time{}); err != nil { | |
+ log.Warnf("error resetting deadline: %s", err) | |
+ } | |
+ return nil | |
+} | |
+ | |
+func (bsnet *impl) NewMessageSender(ctx context.Context, p peer.ID, opts *MessageSenderOpts) (MessageSender, error) { | |
+ opts = setDefaultOpts(opts) | |
+ | |
+ sender := &streamMessageSender{ | |
+ to: p, | |
+ bsnet: bsnet, | |
+ opts: opts, | |
+ } | |
+ | |
+ err := sender.multiAttempt(ctx, func() error { | |
+ _, err := sender.Connect(ctx) | |
+ return err | |
+ }) | |
+ | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ return sender, nil | |
+} | |
+ | |
+func setDefaultOpts(opts *MessageSenderOpts) *MessageSenderOpts { | |
+ copy := *opts | |
+ if opts.MaxRetries == 0 { | |
+ copy.MaxRetries = 3 | |
+ } | |
+ if opts.SendTimeout == 0 { | |
+ copy.SendTimeout = maxSendTimeout | |
+ } | |
+ if opts.SendErrorBackoff == 0 { | |
+ copy.SendErrorBackoff = 100 * time.Millisecond | |
+ } | |
+ return © | |
+} | |
+ | |
+func sendTimeout(size int) time.Duration { | |
+ timeout := sendLatency | |
+ timeout += time.Duration((uint64(time.Second) * uint64(size)) / uint64(minSendRate)) | |
+ if timeout > maxSendTimeout { | |
+ timeout = maxSendTimeout | |
+ } else if timeout < minSendTimeout { | |
+ timeout = minSendTimeout | |
+ } | |
+ return timeout | |
+} | |
+ | |
+func (bsnet *impl) SendMessage( | |
+ ctx context.Context, | |
+ p peer.ID, | |
+ outgoing bsmsg.BitSwapMessage) error { | |
+ | |
+ tctx, cancel := context.WithTimeout(ctx, connectTimeout) | |
+ defer cancel() | |
+ | |
+ s, err := bsnet.newStreamToPeer(tctx, p) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ | |
+ timeout := sendTimeout(outgoing.Size()) | |
+ if err = bsnet.msgToStream(ctx, s, outgoing, timeout); err != nil { | |
+ _ = s.Reset() | |
+ return err | |
+ } | |
+ | |
+ return s.Close() | |
+} | |
+ | |
+func (bsnet *impl) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { | |
+ return bsnet.host.NewStream(ctx, p, bsnet.supportedProtocols...) | |
+} | |
+ | |
+func (bsnet *impl) Start(r ...Receiver) { | |
+ bsnet.receivers = r | |
+ { | |
+ connectionListeners := make([]ConnectionListener, len(r)) | |
+ for i, v := range r { | |
+ connectionListeners[i] = v | |
+ } | |
+ bsnet.connectEvtMgr = newConnectEventManager(connectionListeners...) | |
+ } | |
+ for _, proto := range bsnet.supportedProtocols { | |
+ bsnet.host.SetStreamHandler(proto, bsnet.handleNewStream) | |
+ } | |
+ bsnet.host.Network().Notify((*netNotifiee)(bsnet)) | |
+ bsnet.connectEvtMgr.Start() | |
+ | |
+} | |
+ | |
+func (bsnet *impl) Stop() { | |
+ bsnet.connectEvtMgr.Stop() | |
+ bsnet.host.Network().StopNotify((*netNotifiee)(bsnet)) | |
+} | |
+ | |
+func (bsnet *impl) ConnectTo(ctx context.Context, p peer.ID) error { | |
+ return bsnet.host.Connect(ctx, peer.AddrInfo{ID: p}) | |
+} | |
+ | |
+func (bsnet *impl) DisconnectFrom(ctx context.Context, p peer.ID) error { | |
+ panic("Not implemented: DisconnectFrom() is only used by tests") | |
+} | |
+ | |
+// FindProvidersAsync returns a channel of providers for the given key. | |
+func (bsnet *impl) FindProvidersAsync(ctx context.Context, k cid.Cid, max int) <-chan peer.ID { | |
+ out := make(chan peer.ID, max) | |
+ go func() { | |
+ defer close(out) | |
+ providers := bsnet.routing.FindProvidersAsync(ctx, k, max) | |
+ for info := range providers { | |
+ if info.ID == bsnet.host.ID() { | |
+ continue // ignore self as provider | |
+ } | |
+ bsnet.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.TempAddrTTL) | |
+ select { | |
+ case <-ctx.Done(): | |
+ return | |
+ case out <- info.ID: | |
+ } | |
+ } | |
+ }() | |
+ return out | |
+} | |
+ | |
+// Provide provides the key to the network | |
+func (bsnet *impl) Provide(ctx context.Context, k cid.Cid) error { | |
+ return bsnet.routing.Provide(ctx, k, true) | |
+} | |
+ | |
+// handleNewStream receives a new stream from the network. | |
+func (bsnet *impl) handleNewStream(s network.Stream) { | |
+ defer s.Close() | |
+ | |
+ if len(bsnet.receivers) == 0 { | |
+ _ = s.Reset() | |
+ return | |
+ } | |
+ | |
+ reader := msgio.NewVarintReaderSize(s, network.MessageSizeMax) | |
+ for { | |
+ received, err := bsmsg.FromMsgReader(reader) | |
+ if err != nil { | |
+ if err != io.EOF { | |
+ _ = s.Reset() | |
+ for _, v := range bsnet.receivers { | |
+ v.ReceiveError(err) | |
+ } | |
+ log.Debugf("bitswap net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) | |
+ } | |
+ return | |
+ } | |
+ | |
+ p := s.Conn().RemotePeer() | |
+ ctx := context.Background() | |
+ log.Debugf("bitswap net handleNewStream from %s", s.Conn().RemotePeer()) | |
+ bsnet.connectEvtMgr.OnMessage(s.Conn().RemotePeer()) | |
+ atomic.AddUint64(&bsnet.stats.MessagesRecvd, 1) | |
+ for _, v := range bsnet.receivers { | |
+ v.ReceiveMessage(ctx, p, received) | |
+ } | |
+ } | |
+} | |
+ | |
+func (bsnet *impl) ConnectionManager() connmgr.ConnManager { | |
+ return bsnet.host.ConnManager() | |
+} | |
+ | |
+func (bsnet *impl) Stats() Stats { | |
+ return Stats{ | |
+ MessagesRecvd: atomic.LoadUint64(&bsnet.stats.MessagesRecvd), | |
+ MessagesSent: atomic.LoadUint64(&bsnet.stats.MessagesSent), | |
+ } | |
+} | |
+ | |
+type netNotifiee impl | |
+ | |
+func (nn *netNotifiee) impl() *impl { | |
+ return (*impl)(nn) | |
+} | |
+ | |
+func (nn *netNotifiee) Connected(n network.Network, v network.Conn) { | |
+ // ignore transient connections | |
+ if v.Stat().Transient { | |
+ return | |
+ } | |
+ | |
+ nn.impl().connectEvtMgr.Connected(v.RemotePeer()) | |
+} | |
+func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) { | |
+ // Only record a "disconnect" when we actually disconnect. | |
+ if n.Connectedness(v.RemotePeer()) == network.Connected { | |
+ return | |
+ } | |
+ | |
+ nn.impl().connectEvtMgr.Disconnected(v.RemotePeer()) | |
+} | |
+func (nn *netNotifiee) OpenedStream(n network.Network, s network.Stream) {} | |
+func (nn *netNotifiee) ClosedStream(n network.Network, v network.Stream) {} | |
+func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {} | |
+func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/network/options.go a/vendor/github.com/ipfs/go-libipfs/bitswap/network/options.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/network/options.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/network/options.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,22 @@ | |
+package network | |
+ | |
+import "github.com/libp2p/go-libp2p/core/protocol" | |
+ | |
+type NetOpt func(*Settings) | |
+ | |
+type Settings struct { | |
+ ProtocolPrefix protocol.ID | |
+ SupportedProtocols []protocol.ID | |
+} | |
+ | |
+func Prefix(prefix protocol.ID) NetOpt { | |
+ return func(settings *Settings) { | |
+ settings.ProtocolPrefix = prefix | |
+ } | |
+} | |
+ | |
+func SupportedProtocols(protos []protocol.ID) NetOpt { | |
+ return func(settings *Settings) { | |
+ settings.SupportedProtocols = protos | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/options.go a/vendor/github.com/ipfs/go-libipfs/bitswap/options.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/options.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/options.go 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,79 @@ | |
+package bitswap | |
+ | |
+import ( | |
+ "time" | |
+ | |
+ delay "github.com/ipfs/go-ipfs-delay" | |
+ "github.com/ipfs/go-libipfs/bitswap/client" | |
+ "github.com/ipfs/go-libipfs/bitswap/server" | |
+ "github.com/ipfs/go-libipfs/bitswap/tracer" | |
+) | |
+ | |
+type option func(*Bitswap) | |
+ | |
+// Option is interface{} of server.Option or client.Option or func(*Bitswap) | |
+// wrapped in a struct to gain strong type checking. | |
+type Option struct { | |
+ v interface{} | |
+} | |
+ | |
+func EngineBlockstoreWorkerCount(count int) Option { | |
+ return Option{server.EngineBlockstoreWorkerCount(count)} | |
+} | |
+ | |
+func EngineTaskWorkerCount(count int) Option { | |
+ return Option{server.EngineTaskWorkerCount(count)} | |
+} | |
+ | |
+func MaxOutstandingBytesPerPeer(count int) Option { | |
+ return Option{server.MaxOutstandingBytesPerPeer(count)} | |
+} | |
+ | |
+func TaskWorkerCount(count int) Option { | |
+ return Option{server.TaskWorkerCount(count)} | |
+} | |
+ | |
+func ProvideEnabled(enabled bool) Option { | |
+ return Option{server.ProvideEnabled(enabled)} | |
+} | |
+ | |
+func SetSendDontHaves(send bool) Option { | |
+ return Option{server.SetSendDontHaves(send)} | |
+} | |
+ | |
+func WithPeerBlockRequestFilter(pbrf server.PeerBlockRequestFilter) Option { | |
+ return Option{server.WithPeerBlockRequestFilter(pbrf)} | |
+} | |
+ | |
+func WithScoreLedger(scoreLedger server.ScoreLedger) Option { | |
+ return Option{server.WithScoreLedger(scoreLedger)} | |
+} | |
+ | |
+func WithTargetMessageSize(tms int) Option { | |
+ return Option{server.WithTargetMessageSize(tms)} | |
+} | |
+ | |
+func WithTaskComparator(comparator server.TaskComparator) Option { | |
+ return Option{server.WithTaskComparator(comparator)} | |
+} | |
+ | |
+func ProviderSearchDelay(newProvSearchDelay time.Duration) Option { | |
+ return Option{client.ProviderSearchDelay(newProvSearchDelay)} | |
+} | |
+ | |
+func RebroadcastDelay(newRebroadcastDelay delay.D) Option { | |
+ return Option{client.RebroadcastDelay(newRebroadcastDelay)} | |
+} | |
+ | |
+func SetSimulateDontHavesOnTimeout(send bool) Option { | |
+ return Option{client.SetSimulateDontHavesOnTimeout(send)} | |
+} | |
+ | |
+func WithTracer(tap tracer.Tracer) Option { | |
+ // Only trace the server, both receive the same messages anyway | |
+ return Option{ | |
+ option(func(bs *Bitswap) { | |
+ bs.tracer = tap | |
+ }), | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/README.md a/vendor/github.com/ipfs/go-libipfs/bitswap/README.md | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/README.md 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/README.md 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,120 @@ | |
+go-bitswap | |
+================== | |
+ | |
+> An implementation of the bitswap protocol in go! | |
+ | |
+## Table of Contents | |
+ | |
+- [Background](#background) | |
+- [Usage](#usage) | |
+- [Implementation](#implementation) | |
+- [Contribute](#contribute) | |
+- [License](#license) | |
+ | |
+## Background | |
+ | |
+Bitswap is the data trading module for ipfs. It manages requesting and sending | |
+blocks to and from other peers in the network. Bitswap has two main jobs: | |
+- to acquire blocks requested by the client from the network | |
+- to judiciously send blocks in its possession to other peers who want them | |
+ | |
+Bitswap is a message based protocol, as opposed to request-response. All messages | |
+contain wantlists or blocks. | |
+ | |
+A node sends a wantlist to tell peers which blocks it wants. When a node receives | |
+a wantlist it should check which blocks it has from the wantlist, and consider | |
+sending the matching blocks to the requestor. | |
+ | |
+When a node receives blocks that it asked for, the node should send out a | |
+notification called a 'Cancel' to tell its peers that the node no longer | |
+wants those blocks. | |
+ | |
+`go-bitswap` provides an implementation of the Bitswap protocol in go. | |
+ | |
+[Learn more about how Bitswap works](./docs/how-bitswap-works.md) | |
+ | |
+## Usage | |
+ | |
+### Initializing a Bitswap Exchange | |
+ | |
+```golang | |
+import ( | |
+ "context" | |
+ bitswap "github.com/ipfs/go-libipfs/bitswap" | |
+ bsnet "github.com/ipfs/go-libipfs/bitswap/network" | |
+ blockstore "github.com/ipfs/go-ipfs-blockstore" | |
+ "github.com/libp2p/go-libp2p-core/routing" | |
+ "github.com/libp2p/go-libp2p-core/host" | |
+) | |
+ | |
+var ctx context.Context | |
+var host host.Host | |
+var router routing.ContentRouting | |
+var bstore blockstore.Blockstore | |
+ | |
+network := bsnet.NewFromIpfsHost(host, router) | |
+exchange := bitswap.New(ctx, network, bstore) | |
+``` | |
+ | |
+Parameter Notes: | |
+ | |
+1. `ctx` is just the parent context for all of Bitswap | |
+2. `network` is a network abstraction provided to Bitswap on top of libp2p & content routing. | |
+3. `bstore` is an IPFS blockstore | |
+ | |
+### Get A Block Synchronously | |
+ | |
+```golang | |
+var c cid.Cid | |
+var ctx context.Context | |
+var exchange bitswap.Bitswap | |
+ | |
+block, err := exchange.GetBlock(ctx, c) | |
+``` | |
+ | |
+Parameter Notes: | |
+ | |
+1. `ctx` is the context for this request, which can be cancelled to cancel the request | |
+2. `c` is the content ID of the block you're requesting | |
+ | |
+### Get Several Blocks Asynchronously | |
+ | |
+```golang | |
+var cids []cid.Cid | |
+var ctx context.Context | |
+var exchange bitswap.Bitswap | |
+ | |
+blockChannel, err := exchange.GetBlocks(ctx, cids) | |
+``` | |
+ | |
+Parameter Notes: | |
+ | |
+1. `ctx` is the context for this request, which can be cancelled to cancel the request | |
+2. `cids` is a slice of content IDs for the blocks you're requesting | |
+ | |
+### Get Related Blocks Faster With Sessions | |
+ | |
+In IPFS, content blocks are often connected to each other through a MerkleDAG. If you know ahead of time that block requests are related, Bitswap can make several optimizations internally in how it requests those blocks in order to get them faster. Bitswap provides a mechanism called a Bitswap Session to manage a series of block requests as part of a single higher level operation. You should initialize a Bitswap Session any time you intend to make a series of block requests that are related -- and whose responses are likely to come from the same peers. | |
+ | |
+```golang | |
+var ctx context.Context | |
+var cids []cids.cid | |
+var exchange bitswap.Bitswap | |
+ | |
+session := exchange.NewSession(ctx) | |
+blocksChannel, err := session.GetBlocks(ctx, cids) | |
+// later | |
+var relatedCids []cids.cid | |
+relatedBlocksChannel, err := session.GetBlocks(ctx, relatedCids) | |
+``` | |
+ | |
+Note that `NewSession` returns an interface with `GetBlock` and `GetBlocks` methods that have the same signature as the overall Bitswap exchange. | |
+ | |
+### Tell bitswap a new block was added to the local datastore | |
+ | |
+```golang | |
+var blk blocks.Block | |
+var exchange bitswap.Bitswap | |
+ | |
+err := exchange.HasBlock(blk) | |
+``` | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/sendOnlyTracer.go a/vendor/github.com/ipfs/go-libipfs/bitswap/sendOnlyTracer.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/sendOnlyTracer.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/sendOnlyTracer.go 2023-01-30 20:34:49.302131003 +0100 | |
@@ -0,0 +1,20 @@ | |
+package bitswap | |
+ | |
+import ( | |
+ "github.com/ipfs/go-libipfs/bitswap/message" | |
+ "github.com/ipfs/go-libipfs/bitswap/tracer" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+type sendOnlyTracer interface { | |
+ MessageSent(peer.ID, message.BitSwapMessage) | |
+} | |
+ | |
+var _ tracer.Tracer = nopReceiveTracer{} | |
+ | |
+// we need to only trace sends because we already trace receives in the polyfill object (to not get them traced twice) | |
+type nopReceiveTracer struct { | |
+ sendOnlyTracer | |
+} | |
+ | |
+func (nopReceiveTracer) MessageReceived(peer.ID, message.BitSwapMessage) {} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/forward.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/forward.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/forward.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/forward.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,14 @@ | |
+package server | |
+ | |
+import ( | |
+ "github.com/ipfs/go-libipfs/bitswap/server/internal/decision" | |
+) | |
+ | |
+type ( | |
+ Receipt = decision.Receipt | |
+ PeerBlockRequestFilter = decision.PeerBlockRequestFilter | |
+ TaskComparator = decision.TaskComparator | |
+ TaskInfo = decision.TaskInfo | |
+ ScoreLedger = decision.ScoreLedger | |
+ ScorePeerFunc = decision.ScorePeerFunc | |
+) | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/blockstoremanager.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/blockstoremanager.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/blockstoremanager.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/blockstoremanager.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,149 @@ | |
+package decision | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ "sync" | |
+ | |
+ cid "github.com/ipfs/go-cid" | |
+ bstore "github.com/ipfs/go-ipfs-blockstore" | |
+ ipld "github.com/ipfs/go-ipld-format" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ "github.com/ipfs/go-metrics-interface" | |
+) | |
+ | |
+// blockstoreManager maintains a pool of workers that make requests to the blockstore. | |
+type blockstoreManager struct { | |
+ bs bstore.Blockstore | |
+ workerCount int | |
+ jobs chan func() | |
+ pendingGauge metrics.Gauge | |
+ activeGauge metrics.Gauge | |
+ | |
+ workerWG sync.WaitGroup | |
+ stopChan chan struct{} | |
+ stopOnce sync.Once | |
+} | |
+ | |
+// newBlockstoreManager creates a new blockstoreManager with the given context | |
+// and number of workers | |
+func newBlockstoreManager( | |
+ bs bstore.Blockstore, | |
+ workerCount int, | |
+ pendingGauge metrics.Gauge, | |
+ activeGauge metrics.Gauge, | |
+) *blockstoreManager { | |
+ return &blockstoreManager{ | |
+ bs: bs, | |
+ workerCount: workerCount, | |
+ jobs: make(chan func()), | |
+ pendingGauge: pendingGauge, | |
+ activeGauge: activeGauge, | |
+ stopChan: make(chan struct{}), | |
+ } | |
+} | |
+ | |
+func (bsm *blockstoreManager) start() { | |
+ bsm.workerWG.Add(bsm.workerCount) | |
+ for i := 0; i < bsm.workerCount; i++ { | |
+ go bsm.worker() | |
+ } | |
+} | |
+ | |
+func (bsm *blockstoreManager) stop() { | |
+ bsm.stopOnce.Do(func() { | |
+ close(bsm.stopChan) | |
+ }) | |
+ bsm.workerWG.Wait() | |
+} | |
+ | |
+func (bsm *blockstoreManager) worker() { | |
+ defer bsm.workerWG.Done() | |
+ for { | |
+ select { | |
+ case <-bsm.stopChan: | |
+ return | |
+ case job := <-bsm.jobs: | |
+ bsm.pendingGauge.Dec() | |
+ bsm.activeGauge.Inc() | |
+ job() | |
+ bsm.activeGauge.Dec() | |
+ } | |
+ } | |
+} | |
+ | |
+func (bsm *blockstoreManager) addJob(ctx context.Context, job func()) error { | |
+ select { | |
+ case <-ctx.Done(): | |
+ return ctx.Err() | |
+ case <-bsm.stopChan: | |
+ return fmt.Errorf("shutting down") | |
+ case bsm.jobs <- job: | |
+ bsm.pendingGauge.Inc() | |
+ return nil | |
+ } | |
+} | |
+ | |
+func (bsm *blockstoreManager) getBlockSizes(ctx context.Context, ks []cid.Cid) (map[cid.Cid]int, error) { | |
+ res := make(map[cid.Cid]int) | |
+ if len(ks) == 0 { | |
+ return res, nil | |
+ } | |
+ | |
+ var lk sync.Mutex | |
+ return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { | |
+ size, err := bsm.bs.GetSize(ctx, c) | |
+ if err != nil { | |
+ if !ipld.IsNotFound(err) { | |
+ // Note: this isn't a fatal error. We shouldn't abort the request | |
+ log.Errorf("blockstore.GetSize(%s) error: %s", c, err) | |
+ } | |
+ } else { | |
+ lk.Lock() | |
+ res[c] = size | |
+ lk.Unlock() | |
+ } | |
+ }) | |
+} | |
+ | |
+func (bsm *blockstoreManager) getBlocks(ctx context.Context, ks []cid.Cid) (map[cid.Cid]blocks.Block, error) { | |
+ res := make(map[cid.Cid]blocks.Block, len(ks)) | |
+ if len(ks) == 0 { | |
+ return res, nil | |
+ } | |
+ | |
+ var lk sync.Mutex | |
+ return res, bsm.jobPerKey(ctx, ks, func(c cid.Cid) { | |
+ blk, err := bsm.bs.Get(ctx, c) | |
+ if err != nil { | |
+ if !ipld.IsNotFound(err) { | |
+ // Note: this isn't a fatal error. We shouldn't abort the request | |
+ log.Errorf("blockstore.Get(%s) error: %s", c, err) | |
+ } | |
+ return | |
+ } | |
+ | |
+ lk.Lock() | |
+ res[c] = blk | |
+ lk.Unlock() | |
+ }) | |
+} | |
+ | |
+func (bsm *blockstoreManager) jobPerKey(ctx context.Context, ks []cid.Cid, jobFn func(c cid.Cid)) error { | |
+ var err error | |
+ var wg sync.WaitGroup | |
+ for _, k := range ks { | |
+ c := k | |
+ wg.Add(1) | |
+ err = bsm.addJob(ctx, func() { | |
+ jobFn(c) | |
+ wg.Done() | |
+ }) | |
+ if err != nil { | |
+ wg.Done() | |
+ break | |
+ } | |
+ } | |
+ wg.Wait() | |
+ return err | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/engine.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/engine.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/engine.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/engine.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,1026 @@ | |
+// Package decision implements the decision engine for the bitswap service. | |
+package decision | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ "sync" | |
+ "time" | |
+ | |
+ "github.com/google/uuid" | |
+ | |
+ "github.com/ipfs/go-cid" | |
+ bstore "github.com/ipfs/go-ipfs-blockstore" | |
+ wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" | |
+ "github.com/ipfs/go-libipfs/bitswap/internal/defaults" | |
+ bsmsg "github.com/ipfs/go-libipfs/bitswap/message" | |
+ pb "github.com/ipfs/go-libipfs/bitswap/message/pb" | |
+ bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ logging "github.com/ipfs/go-log" | |
+ "github.com/ipfs/go-metrics-interface" | |
+ "github.com/ipfs/go-peertaskqueue" | |
+ "github.com/ipfs/go-peertaskqueue/peertask" | |
+ "github.com/ipfs/go-peertaskqueue/peertracker" | |
+ process "github.com/jbenet/goprocess" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// TODO consider taking responsibility for other types of requests. For | |
+// example, there could be a |cancelQueue| for all of the cancellation | |
+// messages that need to go out. There could also be a |wantlistQueue| for | |
+// the local peer's wantlists. Alternatively, these could all be bundled | |
+// into a single, intelligent global queue that efficiently | |
+// batches/combines and takes all of these into consideration. | |
+// | |
+// Right now, messages go onto the network for four reasons: | |
+// 1. an initial `sendwantlist` message to a provider of the first key in a | |
+// request | |
+// 2. a periodic full sweep of `sendwantlist` messages to all providers | |
+// 3. upon receipt of blocks, a `cancel` message to all peers | |
+// 4. draining the priority queue of `blockrequests` from peers | |
+// | |
+// Presently, only `blockrequests` are handled by the decision engine. | |
+// However, there is an opportunity to give it more responsibility! If the | |
+// decision engine is given responsibility for all of the others, it can | |
+// intelligently decide how to combine requests efficiently. | |
+// | |
+// Some examples of what would be possible: | |
+// | |
+// * when sending out the wantlists, include `cancel` requests | |
+// * when handling `blockrequests`, include `sendwantlist` and `cancel` as | |
+// appropriate | |
+// * when handling `cancel`, if we recently received a wanted block from a | |
+// peer, include a partial wantlist that contains a few other high priority | |
+// blocks | |
+// | |
+// In a sense, if we treat the decision engine as a black box, it could do | |
+// whatever it sees fit to produce desired outcomes (get wanted keys | |
+// quickly, maintain good relationships with peers, etc). | |
+ | |
+var log = logging.Logger("engine") | |
+ | |
+const ( | |
+ // outboxChanBuffer must be 0 to prevent stale messages from being sent | |
+ outboxChanBuffer = 0 | |
+ // targetMessageSize is the ideal size of the batched payload. We try to | |
+ // pop this much data off the request queue, but it may be a little more | |
+ // or less depending on what's in the queue. | |
+ defaultTargetMessageSize = 16 * 1024 | |
+ // tagFormat is the tag given to peers associated an engine | |
+ tagFormat = "bs-engine-%s-%s" | |
+ | |
+ // queuedTagWeight is the default weight for peers that have work queued | |
+ // on their behalf. | |
+ queuedTagWeight = 10 | |
+ | |
+ // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in | |
+ // bytes up to which we will replace a want-have with a want-block | |
+ maxBlockSizeReplaceHasWithBlock = 1024 | |
+) | |
+ | |
+// Envelope contains a message for a Peer. | |
+type Envelope struct { | |
+ // Peer is the intended recipient. | |
+ Peer peer.ID | |
+ | |
+ // Message is the payload. | |
+ Message bsmsg.BitSwapMessage | |
+ | |
+ // A callback to notify the decision queue that the task is complete | |
+ Sent func() | |
+} | |
+ | |
+// PeerTagger covers the methods on the connection manager used by the decision | |
+// engine to tag peers | |
+type PeerTagger interface { | |
+ TagPeer(peer.ID, string, int) | |
+ UntagPeer(p peer.ID, tag string) | |
+} | |
+ | |
+// Assigns a specific score to a peer | |
+type ScorePeerFunc func(peer.ID, int) | |
+ | |
+// ScoreLedger is an external ledger dealing with peer scores. | |
+type ScoreLedger interface { | |
+ // Returns aggregated data communication with a given peer. | |
+ GetReceipt(p peer.ID) *Receipt | |
+ // Increments the sent counter for the given peer. | |
+ AddToSentBytes(p peer.ID, n int) | |
+ // Increments the received counter for the given peer. | |
+ AddToReceivedBytes(p peer.ID, n int) | |
+ // PeerConnected should be called when a new peer connects, | |
+ // meaning the ledger should open accounting. | |
+ PeerConnected(p peer.ID) | |
+ // PeerDisconnected should be called when a peer disconnects to | |
+ // clean up the accounting. | |
+ PeerDisconnected(p peer.ID) | |
+ // Starts the ledger sampling process. | |
+ Start(scorePeer ScorePeerFunc) | |
+ // Stops the sampling process. | |
+ Stop() | |
+} | |
+ | |
+// Engine manages sending requested blocks to peers. | |
+type Engine struct { | |
+ // peerRequestQueue is a priority queue of requests received from peers. | |
+ // Requests are popped from the queue, packaged up, and placed in the | |
+ // outbox. | |
+ peerRequestQueue *peertaskqueue.PeerTaskQueue | |
+ | |
+ // FIXME it's a bit odd for the client and the worker to both share memory | |
+ // (both modify the peerRequestQueue) and also to communicate over the | |
+ // workSignal channel. consider sending requests over the channel and | |
+ // allowing the worker to have exclusive access to the peerRequestQueue. In | |
+ // that case, no lock would be required. | |
+ workSignal chan struct{} | |
+ | |
+ // outbox contains outgoing messages to peers. This is owned by the | |
+ // taskWorker goroutine | |
+ outbox chan (<-chan *Envelope) | |
+ | |
+ bsm *blockstoreManager | |
+ | |
+ peerTagger PeerTagger | |
+ | |
+ tagQueued, tagUseful string | |
+ | |
+ lock sync.RWMutex // protects the fields immediately below | |
+ | |
+ // ledgerMap lists block-related Ledgers by their Partner key. | |
+ ledgerMap map[peer.ID]*ledger | |
+ | |
+ // peerLedger saves which peers are waiting for a Cid | |
+ peerLedger *peerLedger | |
+ | |
+ // an external ledger dealing with peer scores | |
+ scoreLedger ScoreLedger | |
+ | |
+ ticker *time.Ticker | |
+ | |
+ taskWorkerLock sync.Mutex | |
+ taskWorkerCount int | |
+ | |
+ targetMessageSize int | |
+ | |
+ // maxBlockSizeReplaceHasWithBlock is the maximum size of the block in | |
+ // bytes up to which we will replace a want-have with a want-block | |
+ maxBlockSizeReplaceHasWithBlock int | |
+ | |
+ sendDontHaves bool | |
+ | |
+ self peer.ID | |
+ | |
+ // metrics gauge for total pending tasks across all workers | |
+ pendingGauge metrics.Gauge | |
+ | |
+ // metrics gauge for total pending tasks across all workers | |
+ activeGauge metrics.Gauge | |
+ | |
+ // used to ensure metrics are reported each fixed number of operation | |
+ metricsLock sync.Mutex | |
+ metricUpdateCounter int | |
+ | |
+ taskComparator TaskComparator | |
+ | |
+ peerBlockRequestFilter PeerBlockRequestFilter | |
+ | |
+ bstoreWorkerCount int | |
+ maxOutstandingBytesPerPeer int | |
+} | |
+ | |
+// TaskInfo represents the details of a request from a peer. | |
+type TaskInfo struct { | |
+ Peer peer.ID | |
+ // The CID of the block | |
+ Cid cid.Cid | |
+ // Tasks can be want-have or want-block | |
+ IsWantBlock bool | |
+ // Whether to immediately send a response if the block is not found | |
+ SendDontHave bool | |
+ // The size of the block corresponding to the task | |
+ BlockSize int | |
+ // Whether the block was found | |
+ HaveBlock bool | |
+} | |
+ | |
+// TaskComparator is used for task prioritization. | |
+// It should return true if task 'ta' has higher priority than task 'tb' | |
+type TaskComparator func(ta, tb *TaskInfo) bool | |
+ | |
+// PeerBlockRequestFilter is used to accept / deny requests for a CID coming from a PeerID | |
+// It should return true if the request should be fullfilled. | |
+type PeerBlockRequestFilter func(p peer.ID, c cid.Cid) bool | |
+ | |
+type Option func(*Engine) | |
+ | |
+func WithTaskComparator(comparator TaskComparator) Option { | |
+ return func(e *Engine) { | |
+ e.taskComparator = comparator | |
+ } | |
+} | |
+ | |
+func WithPeerBlockRequestFilter(pbrf PeerBlockRequestFilter) Option { | |
+ return func(e *Engine) { | |
+ e.peerBlockRequestFilter = pbrf | |
+ } | |
+} | |
+ | |
+func WithTargetMessageSize(size int) Option { | |
+ return func(e *Engine) { | |
+ e.targetMessageSize = size | |
+ } | |
+} | |
+ | |
+func WithScoreLedger(scoreledger ScoreLedger) Option { | |
+ return func(e *Engine) { | |
+ e.scoreLedger = scoreledger | |
+ } | |
+} | |
+ | |
+// WithBlockstoreWorkerCount sets the number of worker threads used for | |
+// blockstore operations in the decision engine | |
+func WithBlockstoreWorkerCount(count int) Option { | |
+ if count <= 0 { | |
+ panic(fmt.Sprintf("Engine blockstore worker count is %d but must be > 0", count)) | |
+ } | |
+ return func(e *Engine) { | |
+ e.bstoreWorkerCount = count | |
+ } | |
+} | |
+ | |
+// WithTaskWorkerCount sets the number of worker threads used inside the engine | |
+func WithTaskWorkerCount(count int) Option { | |
+ if count <= 0 { | |
+ panic(fmt.Sprintf("Engine task worker count is %d but must be > 0", count)) | |
+ } | |
+ return func(e *Engine) { | |
+ e.taskWorkerCount = count | |
+ } | |
+} | |
+ | |
+// WithMaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any | |
+// given time. Setting it to 0 will disable any limiting. | |
+func WithMaxOutstandingBytesPerPeer(count int) Option { | |
+ if count < 0 { | |
+ panic(fmt.Sprintf("max outstanding bytes per peer is %d but must be >= 0", count)) | |
+ } | |
+ return func(e *Engine) { | |
+ e.maxOutstandingBytesPerPeer = count | |
+ } | |
+} | |
+ | |
+func WithSetSendDontHave(send bool) Option { | |
+ return func(e *Engine) { | |
+ e.sendDontHaves = send | |
+ } | |
+} | |
+ | |
+// wrapTaskComparator wraps a TaskComparator so it can be used as a QueueTaskComparator | |
+func wrapTaskComparator(tc TaskComparator) peertask.QueueTaskComparator { | |
+ return func(a, b *peertask.QueueTask) bool { | |
+ taskDataA := a.Task.Data.(*taskData) | |
+ taskInfoA := &TaskInfo{ | |
+ Peer: a.Target, | |
+ Cid: a.Task.Topic.(cid.Cid), | |
+ IsWantBlock: taskDataA.IsWantBlock, | |
+ SendDontHave: taskDataA.SendDontHave, | |
+ BlockSize: taskDataA.BlockSize, | |
+ HaveBlock: taskDataA.HaveBlock, | |
+ } | |
+ taskDataB := b.Task.Data.(*taskData) | |
+ taskInfoB := &TaskInfo{ | |
+ Peer: b.Target, | |
+ Cid: b.Task.Topic.(cid.Cid), | |
+ IsWantBlock: taskDataB.IsWantBlock, | |
+ SendDontHave: taskDataB.SendDontHave, | |
+ BlockSize: taskDataB.BlockSize, | |
+ HaveBlock: taskDataB.HaveBlock, | |
+ } | |
+ return tc(taskInfoA, taskInfoB) | |
+ } | |
+} | |
+ | |
+// NewEngine creates a new block sending engine for the given block store. | |
+// maxOutstandingBytesPerPeer hints to the peer task queue not to give a peer more tasks if it has some maximum | |
+// work already outstanding. | |
+func NewEngine( | |
+ ctx context.Context, | |
+ bs bstore.Blockstore, | |
+ peerTagger PeerTagger, | |
+ self peer.ID, | |
+ opts ...Option, | |
+) *Engine { | |
+ return newEngine( | |
+ ctx, | |
+ bs, | |
+ peerTagger, | |
+ self, | |
+ maxBlockSizeReplaceHasWithBlock, | |
+ opts..., | |
+ ) | |
+} | |
+ | |
+func newEngine( | |
+ ctx context.Context, | |
+ bs bstore.Blockstore, | |
+ peerTagger PeerTagger, | |
+ self peer.ID, | |
+ maxReplaceSize int, | |
+ opts ...Option, | |
+) *Engine { | |
+ e := &Engine{ | |
+ ledgerMap: make(map[peer.ID]*ledger), | |
+ scoreLedger: NewDefaultScoreLedger(), | |
+ bstoreWorkerCount: defaults.BitswapEngineBlockstoreWorkerCount, | |
+ maxOutstandingBytesPerPeer: defaults.BitswapMaxOutstandingBytesPerPeer, | |
+ peerTagger: peerTagger, | |
+ outbox: make(chan (<-chan *Envelope), outboxChanBuffer), | |
+ workSignal: make(chan struct{}, 1), | |
+ ticker: time.NewTicker(time.Millisecond * 100), | |
+ maxBlockSizeReplaceHasWithBlock: maxReplaceSize, | |
+ taskWorkerCount: defaults.BitswapEngineTaskWorkerCount, | |
+ sendDontHaves: true, | |
+ self: self, | |
+ peerLedger: newPeerLedger(), | |
+ pendingGauge: bmetrics.PendingEngineGauge(ctx), | |
+ activeGauge: bmetrics.ActiveEngineGauge(ctx), | |
+ targetMessageSize: defaultTargetMessageSize, | |
+ tagQueued: fmt.Sprintf(tagFormat, "queued", uuid.New().String()), | |
+ tagUseful: fmt.Sprintf(tagFormat, "useful", uuid.New().String()), | |
+ } | |
+ | |
+ for _, opt := range opts { | |
+ opt(e) | |
+ } | |
+ | |
+ e.bsm = newBlockstoreManager(bs, e.bstoreWorkerCount, bmetrics.PendingBlocksGauge(ctx), bmetrics.ActiveBlocksGauge(ctx)) | |
+ | |
+ // default peer task queue options | |
+ peerTaskQueueOpts := []peertaskqueue.Option{ | |
+ peertaskqueue.OnPeerAddedHook(e.onPeerAdded), | |
+ peertaskqueue.OnPeerRemovedHook(e.onPeerRemoved), | |
+ peertaskqueue.TaskMerger(newTaskMerger()), | |
+ peertaskqueue.IgnoreFreezing(true), | |
+ peertaskqueue.MaxOutstandingWorkPerPeer(e.maxOutstandingBytesPerPeer), | |
+ } | |
+ | |
+ if e.taskComparator != nil { | |
+ queueTaskComparator := wrapTaskComparator(e.taskComparator) | |
+ peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.PeerComparator(peertracker.TaskPriorityPeerComparator(queueTaskComparator))) | |
+ peerTaskQueueOpts = append(peerTaskQueueOpts, peertaskqueue.TaskComparator(queueTaskComparator)) | |
+ } | |
+ | |
+ e.peerRequestQueue = peertaskqueue.New(peerTaskQueueOpts...) | |
+ | |
+ return e | |
+} | |
+ | |
+func (e *Engine) updateMetrics() { | |
+ e.metricsLock.Lock() | |
+ c := e.metricUpdateCounter | |
+ e.metricUpdateCounter++ | |
+ e.metricsLock.Unlock() | |
+ | |
+ if c%100 == 0 { | |
+ stats := e.peerRequestQueue.Stats() | |
+ e.activeGauge.Set(float64(stats.NumActive)) | |
+ e.pendingGauge.Set(float64(stats.NumPending)) | |
+ } | |
+} | |
+ | |
+// SetSendDontHaves indicates what to do when the engine receives a want-block | |
+// for a block that is not in the blockstore. Either | |
+// - Send a DONT_HAVE message | |
+// - Simply don't respond | |
+// Older versions of Bitswap did not respond, so this allows us to simulate | |
+// those older versions for testing. | |
+func (e *Engine) SetSendDontHaves(send bool) { | |
+ e.sendDontHaves = send | |
+} | |
+ | |
+// Starts the score ledger. Before start the function checks and, | |
+// if it is unset, initializes the scoreLedger with the default | |
+// implementation. | |
+func (e *Engine) startScoreLedger(px process.Process) { | |
+ e.scoreLedger.Start(func(p peer.ID, score int) { | |
+ if score == 0 { | |
+ e.peerTagger.UntagPeer(p, e.tagUseful) | |
+ } else { | |
+ e.peerTagger.TagPeer(p, e.tagUseful, score) | |
+ } | |
+ }) | |
+ px.Go(func(ppx process.Process) { | |
+ <-ppx.Closing() | |
+ e.scoreLedger.Stop() | |
+ }) | |
+} | |
+ | |
+func (e *Engine) startBlockstoreManager(px process.Process) { | |
+ e.bsm.start() | |
+ px.Go(func(ppx process.Process) { | |
+ <-ppx.Closing() | |
+ e.bsm.stop() | |
+ }) | |
+} | |
+ | |
+// Start up workers to handle requests from other nodes for the data on this node | |
+func (e *Engine) StartWorkers(ctx context.Context, px process.Process) { | |
+ e.startBlockstoreManager(px) | |
+ e.startScoreLedger(px) | |
+ | |
+ e.taskWorkerLock.Lock() | |
+ defer e.taskWorkerLock.Unlock() | |
+ | |
+ for i := 0; i < e.taskWorkerCount; i++ { | |
+ px.Go(func(_ process.Process) { | |
+ e.taskWorker(ctx) | |
+ }) | |
+ } | |
+ | |
+} | |
+ | |
+func (e *Engine) onPeerAdded(p peer.ID) { | |
+ e.peerTagger.TagPeer(p, e.tagQueued, queuedTagWeight) | |
+} | |
+ | |
+func (e *Engine) onPeerRemoved(p peer.ID) { | |
+ e.peerTagger.UntagPeer(p, e.tagQueued) | |
+} | |
+ | |
+// WantlistForPeer returns the list of keys that the given peer has asked for | |
+func (e *Engine) WantlistForPeer(p peer.ID) []wl.Entry { | |
+ partner := e.findOrCreate(p) | |
+ | |
+ partner.lk.Lock() | |
+ entries := partner.wantList.Entries() | |
+ partner.lk.Unlock() | |
+ | |
+ return entries | |
+} | |
+ | |
+// LedgerForPeer returns aggregated data communication with a given peer. | |
+func (e *Engine) LedgerForPeer(p peer.ID) *Receipt { | |
+ return e.scoreLedger.GetReceipt(p) | |
+} | |
+ | |
+// Each taskWorker pulls items off the request queue up to the maximum size | |
+// and adds them to an envelope that is passed off to the bitswap workers, | |
+// which send the message to the network. | |
+func (e *Engine) taskWorker(ctx context.Context) { | |
+ defer e.taskWorkerExit() | |
+ for { | |
+ oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking | |
+ select { | |
+ case <-ctx.Done(): | |
+ return | |
+ case e.outbox <- oneTimeUse: | |
+ } | |
+ // receiver is ready for an outoing envelope. let's prepare one. first, | |
+ // we must acquire a task from the PQ... | |
+ envelope, err := e.nextEnvelope(ctx) | |
+ if err != nil { | |
+ close(oneTimeUse) | |
+ return // ctx cancelled | |
+ } | |
+ oneTimeUse <- envelope // buffered. won't block | |
+ close(oneTimeUse) | |
+ } | |
+} | |
+ | |
+// taskWorkerExit handles cleanup of task workers | |
+func (e *Engine) taskWorkerExit() { | |
+ e.taskWorkerLock.Lock() | |
+ defer e.taskWorkerLock.Unlock() | |
+ | |
+ e.taskWorkerCount-- | |
+ if e.taskWorkerCount == 0 { | |
+ close(e.outbox) | |
+ } | |
+} | |
+ | |
+// nextEnvelope runs in the taskWorker goroutine. Returns an error if the | |
+// context is cancelled before the next Envelope can be created. | |
+func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) { | |
+ for { | |
+ // Pop some tasks off the request queue | |
+ p, nextTasks, pendingBytes := e.peerRequestQueue.PopTasks(e.targetMessageSize) | |
+ e.updateMetrics() | |
+ for len(nextTasks) == 0 { | |
+ select { | |
+ case <-ctx.Done(): | |
+ return nil, ctx.Err() | |
+ case <-e.workSignal: | |
+ p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) | |
+ e.updateMetrics() | |
+ case <-e.ticker.C: | |
+ // When a task is cancelled, the queue may be "frozen" for a | |
+ // period of time. We periodically "thaw" the queue to make | |
+ // sure it doesn't get stuck in a frozen state. | |
+ e.peerRequestQueue.ThawRound() | |
+ p, nextTasks, pendingBytes = e.peerRequestQueue.PopTasks(e.targetMessageSize) | |
+ e.updateMetrics() | |
+ } | |
+ } | |
+ | |
+ // Create a new message | |
+ msg := bsmsg.New(false) | |
+ | |
+ log.Debugw("Bitswap process tasks", "local", e.self, "taskCount", len(nextTasks)) | |
+ | |
+ // Amount of data in the request queue still waiting to be popped | |
+ msg.SetPendingBytes(int32(pendingBytes)) | |
+ | |
+ // Split out want-blocks, want-haves and DONT_HAVEs | |
+ blockCids := make([]cid.Cid, 0, len(nextTasks)) | |
+ blockTasks := make(map[cid.Cid]*taskData, len(nextTasks)) | |
+ for _, t := range nextTasks { | |
+ c := t.Topic.(cid.Cid) | |
+ td := t.Data.(*taskData) | |
+ if td.HaveBlock { | |
+ if td.IsWantBlock { | |
+ blockCids = append(blockCids, c) | |
+ blockTasks[c] = td | |
+ } else { | |
+ // Add HAVES to the message | |
+ msg.AddHave(c) | |
+ } | |
+ } else { | |
+ // Add DONT_HAVEs to the message | |
+ msg.AddDontHave(c) | |
+ } | |
+ } | |
+ | |
+ // Fetch blocks from datastore | |
+ blks, err := e.bsm.getBlocks(ctx, blockCids) | |
+ if err != nil { | |
+ // we're dropping the envelope but that's not an issue in practice. | |
+ return nil, err | |
+ } | |
+ | |
+ for c, t := range blockTasks { | |
+ blk := blks[c] | |
+ // If the block was not found (it has been removed) | |
+ if blk == nil { | |
+ // If the client requested DONT_HAVE, add DONT_HAVE to the message | |
+ if t.SendDontHave { | |
+ msg.AddDontHave(c) | |
+ } | |
+ } else { | |
+ // Add the block to the message | |
+ // log.Debugf(" make evlp %s->%s block: %s (%d bytes)", e.self, p, c, len(blk.RawData())) | |
+ msg.AddBlock(blk) | |
+ } | |
+ } | |
+ | |
+ // If there's nothing in the message, bail out | |
+ if msg.Empty() { | |
+ e.peerRequestQueue.TasksDone(p, nextTasks...) | |
+ continue | |
+ } | |
+ | |
+ log.Debugw("Bitswap engine -> msg", "local", e.self, "to", p, "blockCount", len(msg.Blocks()), "presenceCount", len(msg.BlockPresences()), "size", msg.Size()) | |
+ return &Envelope{ | |
+ Peer: p, | |
+ Message: msg, | |
+ Sent: func() { | |
+ // Once the message has been sent, signal the request queue so | |
+ // it can be cleared from the queue | |
+ e.peerRequestQueue.TasksDone(p, nextTasks...) | |
+ | |
+ // Signal the worker to check for more work | |
+ e.signalNewWork() | |
+ }, | |
+ }, nil | |
+ } | |
+} | |
+ | |
+// Outbox returns a channel of one-time use Envelope channels. | |
+func (e *Engine) Outbox() <-chan (<-chan *Envelope) { | |
+ return e.outbox | |
+} | |
+ | |
+// Peers returns a slice of Peers with whom the local node has active sessions. | |
+func (e *Engine) Peers() []peer.ID { | |
+ e.lock.RLock() | |
+ defer e.lock.RUnlock() | |
+ | |
+ response := make([]peer.ID, 0, len(e.ledgerMap)) | |
+ | |
+ for _, ledger := range e.ledgerMap { | |
+ response = append(response, ledger.Partner) | |
+ } | |
+ return response | |
+} | |
+ | |
+// MessageReceived is called when a message is received from a remote peer. | |
+// For each item in the wantlist, add a want-have or want-block entry to the | |
+// request queue (this is later popped off by the workerTasks) | |
+func (e *Engine) MessageReceived(ctx context.Context, p peer.ID, m bsmsg.BitSwapMessage) { | |
+ entries := m.Wantlist() | |
+ | |
+ if len(entries) > 0 { | |
+ log.Debugw("Bitswap engine <- msg", "local", e.self, "from", p, "entryCount", len(entries)) | |
+ for _, et := range entries { | |
+ if !et.Cancel { | |
+ if et.WantType == pb.Message_Wantlist_Have { | |
+ log.Debugw("Bitswap engine <- want-have", "local", e.self, "from", p, "cid", et.Cid) | |
+ } else { | |
+ log.Debugw("Bitswap engine <- want-block", "local", e.self, "from", p, "cid", et.Cid) | |
+ } | |
+ } | |
+ } | |
+ } | |
+ | |
+ if m.Empty() { | |
+ log.Infof("received empty message from %s", p) | |
+ } | |
+ | |
+ newWorkExists := false | |
+ defer func() { | |
+ if newWorkExists { | |
+ e.signalNewWork() | |
+ } | |
+ }() | |
+ | |
+ // Dispatch entries | |
+ wants, cancels := e.splitWantsCancels(entries) | |
+ wants, denials := e.splitWantsDenials(p, wants) | |
+ | |
+ // Get block sizes | |
+ wantKs := cid.NewSet() | |
+ for _, entry := range wants { | |
+ wantKs.Add(entry.Cid) | |
+ } | |
+ blockSizes, err := e.bsm.getBlockSizes(ctx, wantKs.Keys()) | |
+ if err != nil { | |
+ log.Info("aborting message processing", err) | |
+ return | |
+ } | |
+ | |
+ e.lock.Lock() | |
+ for _, entry := range wants { | |
+ e.peerLedger.Wants(p, entry.Cid) | |
+ } | |
+ for _, entry := range cancels { | |
+ e.peerLedger.CancelWant(p, entry.Cid) | |
+ } | |
+ e.lock.Unlock() | |
+ | |
+ // Get the ledger for the peer | |
+ l := e.findOrCreate(p) | |
+ l.lk.Lock() | |
+ defer l.lk.Unlock() | |
+ | |
+ // If the peer sent a full wantlist, replace the ledger's wantlist | |
+ if m.Full() { | |
+ l.wantList = wl.New() | |
+ } | |
+ | |
+ var activeEntries []peertask.Task | |
+ | |
+ // Remove cancelled blocks from the queue | |
+ for _, entry := range cancels { | |
+ log.Debugw("Bitswap engine <- cancel", "local", e.self, "from", p, "cid", entry.Cid) | |
+ if l.CancelWant(entry.Cid) { | |
+ e.peerRequestQueue.Remove(entry.Cid, p) | |
+ } | |
+ } | |
+ | |
+ // Cancel a block operation | |
+ sendDontHave := func(entry bsmsg.Entry) { | |
+ // Only add the task to the queue if the requester wants a DONT_HAVE | |
+ if e.sendDontHaves && entry.SendDontHave { | |
+ c := entry.Cid | |
+ | |
+ newWorkExists = true | |
+ isWantBlock := false | |
+ if entry.WantType == pb.Message_Wantlist_Block { | |
+ isWantBlock = true | |
+ } | |
+ | |
+ activeEntries = append(activeEntries, peertask.Task{ | |
+ Topic: c, | |
+ Priority: int(entry.Priority), | |
+ Work: bsmsg.BlockPresenceSize(c), | |
+ Data: &taskData{ | |
+ BlockSize: 0, | |
+ HaveBlock: false, | |
+ IsWantBlock: isWantBlock, | |
+ SendDontHave: entry.SendDontHave, | |
+ }, | |
+ }) | |
+ } | |
+ } | |
+ | |
+ // Deny access to blocks | |
+ for _, entry := range denials { | |
+ log.Debugw("Bitswap engine: block denied access", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) | |
+ sendDontHave(entry) | |
+ } | |
+ | |
+ // For each want-have / want-block | |
+ for _, entry := range wants { | |
+ c := entry.Cid | |
+ blockSize, found := blockSizes[entry.Cid] | |
+ | |
+ // Add each want-have / want-block to the ledger | |
+ l.Wants(c, entry.Priority, entry.WantType) | |
+ | |
+ // If the block was not found | |
+ if !found { | |
+ log.Debugw("Bitswap engine: block not found", "local", e.self, "from", p, "cid", entry.Cid, "sendDontHave", entry.SendDontHave) | |
+ sendDontHave(entry) | |
+ } else { | |
+ // The block was found, add it to the queue | |
+ newWorkExists = true | |
+ | |
+ isWantBlock := e.sendAsBlock(entry.WantType, blockSize) | |
+ | |
+ log.Debugw("Bitswap engine: block found", "local", e.self, "from", p, "cid", entry.Cid, "isWantBlock", isWantBlock) | |
+ | |
+ // entrySize is the amount of space the entry takes up in the | |
+ // message we send to the recipient. If we're sending a block, the | |
+ // entrySize is the size of the block. Otherwise it's the size of | |
+ // a block presence entry. | |
+ entrySize := blockSize | |
+ if !isWantBlock { | |
+ entrySize = bsmsg.BlockPresenceSize(c) | |
+ } | |
+ activeEntries = append(activeEntries, peertask.Task{ | |
+ Topic: c, | |
+ Priority: int(entry.Priority), | |
+ Work: entrySize, | |
+ Data: &taskData{ | |
+ BlockSize: blockSize, | |
+ HaveBlock: true, | |
+ IsWantBlock: isWantBlock, | |
+ SendDontHave: entry.SendDontHave, | |
+ }, | |
+ }) | |
+ } | |
+ } | |
+ | |
+ // Push entries onto the request queue | |
+ if len(activeEntries) > 0 { | |
+ e.peerRequestQueue.PushTasks(p, activeEntries...) | |
+ e.updateMetrics() | |
+ } | |
+} | |
+ | |
+// Split the want-have / want-block entries from the cancel entries | |
+func (e *Engine) splitWantsCancels(es []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { | |
+ wants := make([]bsmsg.Entry, 0, len(es)) | |
+ cancels := make([]bsmsg.Entry, 0, len(es)) | |
+ for _, et := range es { | |
+ if et.Cancel { | |
+ cancels = append(cancels, et) | |
+ } else { | |
+ wants = append(wants, et) | |
+ } | |
+ } | |
+ return wants, cancels | |
+} | |
+ | |
+// Split the want-have / want-block entries from the block that will be denied access | |
+func (e *Engine) splitWantsDenials(p peer.ID, allWants []bsmsg.Entry) ([]bsmsg.Entry, []bsmsg.Entry) { | |
+ if e.peerBlockRequestFilter == nil { | |
+ return allWants, nil | |
+ } | |
+ | |
+ wants := make([]bsmsg.Entry, 0, len(allWants)) | |
+ denied := make([]bsmsg.Entry, 0, len(allWants)) | |
+ | |
+ for _, et := range allWants { | |
+ if e.peerBlockRequestFilter(p, et.Cid) { | |
+ wants = append(wants, et) | |
+ } else { | |
+ denied = append(denied, et) | |
+ } | |
+ } | |
+ | |
+ return wants, denied | |
+} | |
+ | |
+// ReceivedBlocks is called when new blocks are received from the network. | |
+// This function also updates the receive side of the ledger. | |
+func (e *Engine) ReceivedBlocks(from peer.ID, blks []blocks.Block) { | |
+ if len(blks) == 0 { | |
+ return | |
+ } | |
+ | |
+ l := e.findOrCreate(from) | |
+ | |
+ // Record how many bytes were received in the ledger | |
+ l.lk.Lock() | |
+ defer l.lk.Unlock() | |
+ for _, blk := range blks { | |
+ log.Debugw("Bitswap engine <- block", "local", e.self, "from", from, "cid", blk.Cid(), "size", len(blk.RawData())) | |
+ e.scoreLedger.AddToReceivedBytes(l.Partner, len(blk.RawData())) | |
+ } | |
+} | |
+ | |
+// NotifyNewBlocks is called when new blocks becomes available locally, and in particular when the caller of bitswap | |
+// decide to store those blocks and make them available on the network. | |
+func (e *Engine) NotifyNewBlocks(blks []blocks.Block) { | |
+ if len(blks) == 0 { | |
+ return | |
+ } | |
+ | |
+ // Get the size of each block | |
+ blockSizes := make(map[cid.Cid]int, len(blks)) | |
+ for _, blk := range blks { | |
+ blockSizes[blk.Cid()] = len(blk.RawData()) | |
+ } | |
+ | |
+ // Check each peer to see if it wants one of the blocks we received | |
+ var work bool | |
+ missingWants := make(map[peer.ID][]cid.Cid) | |
+ for _, b := range blks { | |
+ k := b.Cid() | |
+ | |
+ e.lock.RLock() | |
+ peers := e.peerLedger.Peers(k) | |
+ e.lock.RUnlock() | |
+ | |
+ for _, p := range peers { | |
+ e.lock.RLock() | |
+ ledger, ok := e.ledgerMap[p] | |
+ e.lock.RUnlock() | |
+ | |
+ if !ok { | |
+ // This can happen if the peer has disconnected while we're processing this list. | |
+ log.Debugw("failed to find peer in ledger", "peer", p) | |
+ missingWants[p] = append(missingWants[p], k) | |
+ continue | |
+ } | |
+ ledger.lk.RLock() | |
+ entry, ok := ledger.WantListContains(k) | |
+ ledger.lk.RUnlock() | |
+ if !ok { | |
+ // This can happen if the peer has canceled their want while we're processing this message. | |
+ log.Debugw("wantlist index doesn't match peer's wantlist", "peer", p) | |
+ missingWants[p] = append(missingWants[p], k) | |
+ continue | |
+ } | |
+ work = true | |
+ | |
+ blockSize := blockSizes[k] | |
+ isWantBlock := e.sendAsBlock(entry.WantType, blockSize) | |
+ | |
+ entrySize := blockSize | |
+ if !isWantBlock { | |
+ entrySize = bsmsg.BlockPresenceSize(k) | |
+ } | |
+ | |
+ e.peerRequestQueue.PushTasks(p, peertask.Task{ | |
+ Topic: entry.Cid, | |
+ Priority: int(entry.Priority), | |
+ Work: entrySize, | |
+ Data: &taskData{ | |
+ BlockSize: blockSize, | |
+ HaveBlock: true, | |
+ IsWantBlock: isWantBlock, | |
+ SendDontHave: false, | |
+ }, | |
+ }) | |
+ e.updateMetrics() | |
+ } | |
+ } | |
+ | |
+ // If we found missing wants (e.g., because the peer disconnected, we have some races here) | |
+ // remove them from the list. Unfortunately, we still have to re-check because the user | |
+ // could have re-connected in the meantime. | |
+ if len(missingWants) > 0 { | |
+ e.lock.Lock() | |
+ for p, wl := range missingWants { | |
+ if ledger, ok := e.ledgerMap[p]; ok { | |
+ ledger.lk.RLock() | |
+ for _, k := range wl { | |
+ if _, has := ledger.WantListContains(k); has { | |
+ continue | |
+ } | |
+ e.peerLedger.CancelWant(p, k) | |
+ } | |
+ ledger.lk.RUnlock() | |
+ } else { | |
+ for _, k := range wl { | |
+ e.peerLedger.CancelWant(p, k) | |
+ } | |
+ } | |
+ } | |
+ e.lock.Unlock() | |
+ } | |
+ | |
+ if work { | |
+ e.signalNewWork() | |
+ } | |
+} | |
+ | |
+// TODO add contents of m.WantList() to my local wantlist? NB: could introduce | |
+// race conditions where I send a message, but MessageSent gets handled after | |
+// MessageReceived. The information in the local wantlist could become | |
+// inconsistent. Would need to ensure that Sends and acknowledgement of the | |
+// send happen atomically | |
+ | |
+// MessageSent is called when a message has successfully been sent out, to record | |
+// changes. | |
+func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) { | |
+ l := e.findOrCreate(p) | |
+ l.lk.Lock() | |
+ defer l.lk.Unlock() | |
+ | |
+ // Remove sent blocks from the want list for the peer | |
+ for _, block := range m.Blocks() { | |
+ e.scoreLedger.AddToSentBytes(l.Partner, len(block.RawData())) | |
+ l.wantList.RemoveType(block.Cid(), pb.Message_Wantlist_Block) | |
+ } | |
+ | |
+ // Remove sent block presences from the want list for the peer | |
+ for _, bp := range m.BlockPresences() { | |
+ // Don't record sent data. We reserve that for data blocks. | |
+ if bp.Type == pb.Message_Have { | |
+ l.wantList.RemoveType(bp.Cid, pb.Message_Wantlist_Have) | |
+ } | |
+ } | |
+} | |
+ | |
+// PeerConnected is called when a new peer connects, meaning we should start | |
+// sending blocks. | |
+func (e *Engine) PeerConnected(p peer.ID) { | |
+ e.lock.Lock() | |
+ defer e.lock.Unlock() | |
+ | |
+ _, ok := e.ledgerMap[p] | |
+ if !ok { | |
+ e.ledgerMap[p] = newLedger(p) | |
+ } | |
+ | |
+ e.scoreLedger.PeerConnected(p) | |
+} | |
+ | |
+// PeerDisconnected is called when a peer disconnects. | |
+func (e *Engine) PeerDisconnected(p peer.ID) { | |
+ e.lock.Lock() | |
+ defer e.lock.Unlock() | |
+ | |
+ ledger, ok := e.ledgerMap[p] | |
+ if ok { | |
+ ledger.lk.RLock() | |
+ entries := ledger.Entries() | |
+ ledger.lk.RUnlock() | |
+ | |
+ for _, entry := range entries { | |
+ e.peerLedger.CancelWant(p, entry.Cid) | |
+ } | |
+ } | |
+ delete(e.ledgerMap, p) | |
+ | |
+ e.scoreLedger.PeerDisconnected(p) | |
+} | |
+ | |
+// If the want is a want-have, and it's below a certain size, send the full | |
+// block (instead of sending a HAVE) | |
+func (e *Engine) sendAsBlock(wantType pb.Message_Wantlist_WantType, blockSize int) bool { | |
+ isWantBlock := wantType == pb.Message_Wantlist_Block | |
+ return isWantBlock || blockSize <= e.maxBlockSizeReplaceHasWithBlock | |
+} | |
+ | |
+func (e *Engine) numBytesSentTo(p peer.ID) uint64 { | |
+ return e.LedgerForPeer(p).Sent | |
+} | |
+ | |
+func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 { | |
+ return e.LedgerForPeer(p).Recv | |
+} | |
+ | |
+// ledger lazily instantiates a ledger | |
+func (e *Engine) findOrCreate(p peer.ID) *ledger { | |
+ // Take a read lock (as it's less expensive) to check if we have a ledger | |
+ // for the peer | |
+ e.lock.RLock() | |
+ l, ok := e.ledgerMap[p] | |
+ e.lock.RUnlock() | |
+ if ok { | |
+ return l | |
+ } | |
+ | |
+ // There's no ledger, so take a write lock, then check again and create the | |
+ // ledger if necessary | |
+ e.lock.Lock() | |
+ defer e.lock.Unlock() | |
+ l, ok = e.ledgerMap[p] | |
+ if !ok { | |
+ l = newLedger(p) | |
+ e.ledgerMap[p] = l | |
+ } | |
+ return l | |
+} | |
+ | |
+func (e *Engine) signalNewWork() { | |
+ // Signal task generation to restart (if stopped!) | |
+ select { | |
+ case e.workSignal <- struct{}{}: | |
+ default: | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ewma.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ewma.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ewma.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ewma.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,5 @@ | |
+package decision | |
+ | |
+func ewma(old, new, alpha float64) float64 { | |
+ return new*alpha + (1-alpha)*old | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ledger.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ledger.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ledger.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/ledger.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,46 @@ | |
+package decision | |
+ | |
+import ( | |
+ "sync" | |
+ | |
+ wl "github.com/ipfs/go-libipfs/bitswap/client/wantlist" | |
+ pb "github.com/ipfs/go-libipfs/bitswap/message/pb" | |
+ | |
+ "github.com/ipfs/go-cid" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+func newLedger(p peer.ID) *ledger { | |
+ return &ledger{ | |
+ wantList: wl.New(), | |
+ Partner: p, | |
+ } | |
+} | |
+ | |
+// Keeps the wantlist for the partner. NOT threadsafe! | |
+type ledger struct { | |
+ // Partner is the remote Peer. | |
+ Partner peer.ID | |
+ | |
+ // wantList is a (bounded, small) set of keys that Partner desires. | |
+ wantList *wl.Wantlist | |
+ | |
+ lk sync.RWMutex | |
+} | |
+ | |
+func (l *ledger) Wants(k cid.Cid, priority int32, wantType pb.Message_Wantlist_WantType) { | |
+ log.Debugf("peer %s wants %s", l.Partner, k) | |
+ l.wantList.Add(k, priority, wantType) | |
+} | |
+ | |
+func (l *ledger) CancelWant(k cid.Cid) bool { | |
+ return l.wantList.Remove(k) | |
+} | |
+ | |
+func (l *ledger) WantListContains(k cid.Cid) (wl.Entry, bool) { | |
+ return l.wantList.Contains(k) | |
+} | |
+ | |
+func (l *ledger) Entries() []wl.Entry { | |
+ return l.wantList.Entries() | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/peer_ledger.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/peer_ledger.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/peer_ledger.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/peer_ledger.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,46 @@ | |
+package decision | |
+ | |
+import ( | |
+ "github.com/ipfs/go-cid" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+type peerLedger struct { | |
+ cids map[cid.Cid]map[peer.ID]struct{} | |
+} | |
+ | |
+func newPeerLedger() *peerLedger { | |
+ return &peerLedger{cids: make(map[cid.Cid]map[peer.ID]struct{})} | |
+} | |
+ | |
+func (l *peerLedger) Wants(p peer.ID, k cid.Cid) { | |
+ m, ok := l.cids[k] | |
+ if !ok { | |
+ m = make(map[peer.ID]struct{}) | |
+ l.cids[k] = m | |
+ } | |
+ m[p] = struct{}{} | |
+} | |
+ | |
+func (l *peerLedger) CancelWant(p peer.ID, k cid.Cid) { | |
+ m, ok := l.cids[k] | |
+ if !ok { | |
+ return | |
+ } | |
+ delete(m, p) | |
+ if len(m) == 0 { | |
+ delete(l.cids, k) | |
+ } | |
+} | |
+ | |
+func (l *peerLedger) Peers(k cid.Cid) []peer.ID { | |
+ m, ok := l.cids[k] | |
+ if !ok { | |
+ return nil | |
+ } | |
+ peers := make([]peer.ID, 0, len(m)) | |
+ for p := range m { | |
+ peers = append(peers, p) | |
+ } | |
+ return peers | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/scoreledger.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/scoreledger.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/scoreledger.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/scoreledger.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,353 @@ | |
+package decision | |
+ | |
+import ( | |
+ "sync" | |
+ "time" | |
+ | |
+ "github.com/benbjohnson/clock" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+const ( | |
+ // the alpha for the EWMA used to track short term usefulness | |
+ shortTermAlpha = 0.5 | |
+ | |
+ // the alpha for the EWMA used to track long term usefulness | |
+ longTermAlpha = 0.05 | |
+ | |
+ // how frequently the engine should sample usefulness. Peers that | |
+ // interact every shortTerm time period are considered "active". | |
+ shortTerm = 10 * time.Second | |
+ | |
+ // long term ratio defines what "long term" means in terms of the | |
+ // shortTerm duration. Peers that interact once every longTermRatio are | |
+ // considered useful over the long term. | |
+ longTermRatio = 10 | |
+ | |
+ // long/short term scores for tagging peers | |
+ longTermScore = 10 // this is a high tag but it grows _very_ slowly. | |
+ shortTermScore = 10 // this is a high tag but it'll go away quickly if we aren't using the peer. | |
+) | |
+ | |
+// Stores the data exchange relationship between two peers. | |
+type scoreledger struct { | |
+ // Partner is the remote Peer. | |
+ partner peer.ID | |
+ | |
+ // tracks bytes sent... | |
+ bytesSent uint64 | |
+ | |
+ // ...and received. | |
+ bytesRecv uint64 | |
+ | |
+ // lastExchange is the time of the last data exchange. | |
+ lastExchange time.Time | |
+ | |
+ // These scores keep track of how useful we think this peer is. Short | |
+ // tracks short-term usefulness and long tracks long-term usefulness. | |
+ shortScore, longScore float64 | |
+ | |
+ // Score keeps track of the score used in the peer tagger. We track it | |
+ // here to avoid unnecessarily updating the tags in the connection manager. | |
+ score int | |
+ | |
+ // exchangeCount is the number of exchanges with this peer | |
+ exchangeCount uint64 | |
+ | |
+ // the record lock | |
+ lock sync.RWMutex | |
+ | |
+ clock clock.Clock | |
+} | |
+ | |
+// Receipt is a summary of the ledger for a given peer | |
+// collecting various pieces of aggregated data for external | |
+// reporting purposes. | |
+type Receipt struct { | |
+ Peer string | |
+ Value float64 | |
+ Sent uint64 | |
+ Recv uint64 | |
+ Exchanged uint64 | |
+} | |
+ | |
+// Increments the sent counter. | |
+func (l *scoreledger) AddToSentBytes(n int) { | |
+ l.lock.Lock() | |
+ defer l.lock.Unlock() | |
+ l.exchangeCount++ | |
+ l.lastExchange = l.clock.Now() | |
+ l.bytesSent += uint64(n) | |
+} | |
+ | |
+// Increments the received counter. | |
+func (l *scoreledger) AddToReceivedBytes(n int) { | |
+ l.lock.Lock() | |
+ defer l.lock.Unlock() | |
+ l.exchangeCount++ | |
+ l.lastExchange = l.clock.Now() | |
+ l.bytesRecv += uint64(n) | |
+} | |
+ | |
+// Returns the Receipt for this ledger record. | |
+func (l *scoreledger) Receipt() *Receipt { | |
+ l.lock.RLock() | |
+ defer l.lock.RUnlock() | |
+ | |
+ return &Receipt{ | |
+ Peer: l.partner.String(), | |
+ Value: float64(l.bytesSent) / float64(l.bytesRecv+1), | |
+ Sent: l.bytesSent, | |
+ Recv: l.bytesRecv, | |
+ Exchanged: l.exchangeCount, | |
+ } | |
+} | |
+ | |
+// DefaultScoreLedger is used by Engine as the default ScoreLedger. | |
+type DefaultScoreLedger struct { | |
+ // the score func | |
+ scorePeer ScorePeerFunc | |
+ // is closed on Close | |
+ closing chan struct{} | |
+ // protects the fields immediatly below | |
+ lock sync.RWMutex | |
+ // ledgerMap lists score ledgers by their partner key. | |
+ ledgerMap map[peer.ID]*scoreledger | |
+ // how frequently the engine should sample peer usefulness | |
+ peerSampleInterval time.Duration | |
+ // used by the tests to detect when a sample is taken | |
+ sampleCh chan struct{} | |
+ clock clock.Clock | |
+} | |
+ | |
+// scoreWorker keeps track of how "useful" our peers are, updating scores in the | |
+// connection manager. | |
+// | |
+// It does this by tracking two scores: short-term usefulness and long-term | |
+// usefulness. Short-term usefulness is sampled frequently and highly weights | |
+// new observations. Long-term usefulness is sampled less frequently and highly | |
+// weights on long-term trends. | |
+// | |
+// In practice, we do this by keeping two EWMAs. If we see an interaction | |
+// within the sampling period, we record the score, otherwise, we record a 0. | |
+// The short-term one has a high alpha and is sampled every shortTerm period. | |
+// The long-term one has a low alpha and is sampled every | |
+// longTermRatio*shortTerm period. | |
+// | |
+// To calculate the final score, we sum the short-term and long-term scores then | |
+// adjust it ±25% based on our debt ratio. Peers that have historically been | |
+// more useful to us than we are to them get the highest score. | |
+func (dsl *DefaultScoreLedger) scoreWorker() { | |
+ ticker := dsl.clock.Ticker(dsl.peerSampleInterval) | |
+ defer ticker.Stop() | |
+ | |
+ type update struct { | |
+ peer peer.ID | |
+ score int | |
+ } | |
+ var ( | |
+ lastShortUpdate, lastLongUpdate time.Time | |
+ updates []update | |
+ ) | |
+ | |
+ for i := 0; ; i = (i + 1) % longTermRatio { | |
+ var now time.Time | |
+ select { | |
+ case now = <-ticker.C: | |
+ case <-dsl.closing: | |
+ return | |
+ } | |
+ | |
+ // The long term update ticks every `longTermRatio` short | |
+ // intervals. | |
+ updateLong := i == 0 | |
+ | |
+ dsl.lock.Lock() | |
+ for _, l := range dsl.ledgerMap { | |
+ l.lock.Lock() | |
+ | |
+ // Update the short-term score. | |
+ if l.lastExchange.After(lastShortUpdate) { | |
+ l.shortScore = ewma(l.shortScore, shortTermScore, shortTermAlpha) | |
+ } else { | |
+ l.shortScore = ewma(l.shortScore, 0, shortTermAlpha) | |
+ } | |
+ | |
+ // Update the long-term score. | |
+ if updateLong { | |
+ if l.lastExchange.After(lastLongUpdate) { | |
+ l.longScore = ewma(l.longScore, longTermScore, longTermAlpha) | |
+ } else { | |
+ l.longScore = ewma(l.longScore, 0, longTermAlpha) | |
+ } | |
+ } | |
+ | |
+ // Calculate the new score. | |
+ // | |
+ // The accounting score adjustment prefers peers _we_ | |
+ // need over peers that need us. This doesn't help with | |
+ // leeching. | |
+ var lscore float64 | |
+ if l.bytesRecv == 0 { | |
+ lscore = 0 | |
+ } else { | |
+ lscore = float64(l.bytesRecv) / float64(l.bytesRecv+l.bytesSent) | |
+ } | |
+ score := int((l.shortScore + l.longScore) * (lscore*.5 + .75)) | |
+ | |
+ // Avoid updating the connection manager unless there's a change. This can be expensive. | |
+ if l.score != score { | |
+ // put these in a list so we can perform the updates outside _global_ the lock. | |
+ updates = append(updates, update{l.partner, score}) | |
+ l.score = score | |
+ } | |
+ l.lock.Unlock() | |
+ } | |
+ dsl.lock.Unlock() | |
+ | |
+ // record the times. | |
+ lastShortUpdate = now | |
+ if updateLong { | |
+ lastLongUpdate = now | |
+ } | |
+ | |
+ // apply the updates | |
+ for _, update := range updates { | |
+ dsl.scorePeer(update.peer, update.score) | |
+ } | |
+ // Keep the memory. It's not much and it saves us from having to allocate. | |
+ updates = updates[:0] | |
+ | |
+ // Used by the tests | |
+ if dsl.sampleCh != nil { | |
+ dsl.sampleCh <- struct{}{} | |
+ } | |
+ } | |
+} | |
+ | |
+// Returns the score ledger for the given peer or nil if that peer | |
+// is not on the ledger. | |
+func (dsl *DefaultScoreLedger) find(p peer.ID) *scoreledger { | |
+ // Take a read lock (as it's less expensive) to check if we have | |
+ // a ledger for the peer. | |
+ dsl.lock.RLock() | |
+ l, ok := dsl.ledgerMap[p] | |
+ dsl.lock.RUnlock() | |
+ if ok { | |
+ return l | |
+ } | |
+ return nil | |
+} | |
+ | |
+// Returns a new scoreledger. | |
+func newScoreLedger(p peer.ID, clock clock.Clock) *scoreledger { | |
+ return &scoreledger{ | |
+ partner: p, | |
+ clock: clock, | |
+ } | |
+} | |
+ | |
+// Lazily instantiates a ledger. | |
+func (dsl *DefaultScoreLedger) findOrCreate(p peer.ID) *scoreledger { | |
+ l := dsl.find(p) | |
+ if l != nil { | |
+ return l | |
+ } | |
+ | |
+ // There's no ledger, so take a write lock, then check again and | |
+ // create the ledger if necessary. | |
+ dsl.lock.Lock() | |
+ defer dsl.lock.Unlock() | |
+ l, ok := dsl.ledgerMap[p] | |
+ if !ok { | |
+ l = newScoreLedger(p, dsl.clock) | |
+ dsl.ledgerMap[p] = l | |
+ } | |
+ return l | |
+} | |
+ | |
+// GetReceipt returns aggregated data communication with a given peer. | |
+func (dsl *DefaultScoreLedger) GetReceipt(p peer.ID) *Receipt { | |
+ l := dsl.find(p) | |
+ if l != nil { | |
+ return l.Receipt() | |
+ } | |
+ | |
+ // Return a blank receipt otherwise. | |
+ return &Receipt{ | |
+ Peer: p.String(), | |
+ Value: 0, | |
+ Sent: 0, | |
+ Recv: 0, | |
+ Exchanged: 0, | |
+ } | |
+} | |
+ | |
+// Starts the default ledger sampling process. | |
+func (dsl *DefaultScoreLedger) Start(scorePeer ScorePeerFunc) { | |
+ dsl.init(scorePeer) | |
+ go dsl.scoreWorker() | |
+} | |
+ | |
+// Stops the sampling process. | |
+func (dsl *DefaultScoreLedger) Stop() { | |
+ close(dsl.closing) | |
+} | |
+ | |
+// Initializes the score ledger. | |
+func (dsl *DefaultScoreLedger) init(scorePeer ScorePeerFunc) { | |
+ dsl.lock.Lock() | |
+ defer dsl.lock.Unlock() | |
+ dsl.scorePeer = scorePeer | |
+} | |
+ | |
+// Increments the sent counter for the given peer. | |
+func (dsl *DefaultScoreLedger) AddToSentBytes(p peer.ID, n int) { | |
+ l := dsl.findOrCreate(p) | |
+ l.AddToSentBytes(n) | |
+} | |
+ | |
+// Increments the received counter for the given peer. | |
+func (dsl *DefaultScoreLedger) AddToReceivedBytes(p peer.ID, n int) { | |
+ l := dsl.findOrCreate(p) | |
+ l.AddToReceivedBytes(n) | |
+} | |
+ | |
+// PeerConnected should be called when a new peer connects, meaning | |
+// we should open accounting. | |
+func (dsl *DefaultScoreLedger) PeerConnected(p peer.ID) { | |
+ dsl.lock.Lock() | |
+ defer dsl.lock.Unlock() | |
+ _, ok := dsl.ledgerMap[p] | |
+ if !ok { | |
+ dsl.ledgerMap[p] = newScoreLedger(p, dsl.clock) | |
+ } | |
+} | |
+ | |
+// PeerDisconnected should be called when a peer disconnects to | |
+// clean up the accounting. | |
+func (dsl *DefaultScoreLedger) PeerDisconnected(p peer.ID) { | |
+ dsl.lock.Lock() | |
+ defer dsl.lock.Unlock() | |
+ delete(dsl.ledgerMap, p) | |
+} | |
+ | |
+// Creates a new instance of the default score ledger. | |
+func NewDefaultScoreLedger() *DefaultScoreLedger { | |
+ return &DefaultScoreLedger{ | |
+ ledgerMap: make(map[peer.ID]*scoreledger), | |
+ closing: make(chan struct{}), | |
+ peerSampleInterval: shortTerm, | |
+ clock: clock.New(), | |
+ } | |
+} | |
+ | |
+// Creates a new instance of the default score ledger with testing | |
+// parameters. | |
+func NewTestScoreLedger(peerSampleInterval time.Duration, sampleCh chan struct{}, clock clock.Clock) *DefaultScoreLedger { | |
+ dsl := NewDefaultScoreLedger() | |
+ dsl.peerSampleInterval = peerSampleInterval | |
+ dsl.sampleCh = sampleCh | |
+ dsl.clock = clock | |
+ return dsl | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/taskmerger.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/taskmerger.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/taskmerger.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/internal/decision/taskmerger.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,87 @@ | |
+package decision | |
+ | |
+import ( | |
+ "github.com/ipfs/go-peertaskqueue/peertask" | |
+) | |
+ | |
+// taskData is extra data associated with each task in the request queue | |
+type taskData struct { | |
+ // Tasks can be want-have or want-block | |
+ IsWantBlock bool | |
+ // Whether to immediately send a response if the block is not found | |
+ SendDontHave bool | |
+ // The size of the block corresponding to the task | |
+ BlockSize int | |
+ // Whether the block was found | |
+ HaveBlock bool | |
+} | |
+ | |
+type taskMerger struct{} | |
+ | |
+func newTaskMerger() *taskMerger { | |
+ return &taskMerger{} | |
+} | |
+ | |
+// The request queue uses this Method to decide if a newly pushed task has any | |
+// new information beyond the tasks with the same Topic (CID) in the queue. | |
+func (*taskMerger) HasNewInfo(task peertask.Task, existing []*peertask.Task) bool { | |
+ haveSize := false | |
+ isWantBlock := false | |
+ for _, et := range existing { | |
+ etd := et.Data.(*taskData) | |
+ if etd.HaveBlock { | |
+ haveSize = true | |
+ } | |
+ | |
+ if etd.IsWantBlock { | |
+ isWantBlock = true | |
+ } | |
+ } | |
+ | |
+ // If there is no active want-block and the new task is a want-block, | |
+ // the new task is better | |
+ newTaskData := task.Data.(*taskData) | |
+ if !isWantBlock && newTaskData.IsWantBlock { | |
+ return true | |
+ } | |
+ | |
+ // If there is no size information for the CID and the new task has | |
+ // size information, the new task is better | |
+ if !haveSize && newTaskData.HaveBlock { | |
+ return true | |
+ } | |
+ | |
+ return false | |
+} | |
+ | |
+// The request queue uses Merge to merge a newly pushed task with an existing | |
+// task with the same Topic (CID) | |
+func (*taskMerger) Merge(task peertask.Task, existing *peertask.Task) { | |
+ newTask := task.Data.(*taskData) | |
+ existingTask := existing.Data.(*taskData) | |
+ | |
+ // If we now have block size information, update the task with | |
+ // the new block size | |
+ if !existingTask.HaveBlock && newTask.HaveBlock { | |
+ existingTask.HaveBlock = newTask.HaveBlock | |
+ existingTask.BlockSize = newTask.BlockSize | |
+ } | |
+ | |
+ // If replacing a want-have with a want-block | |
+ if !existingTask.IsWantBlock && newTask.IsWantBlock { | |
+ // Change the type from want-have to want-block | |
+ existingTask.IsWantBlock = true | |
+ // If the want-have was a DONT_HAVE, or the want-block has a size | |
+ if !existingTask.HaveBlock || newTask.HaveBlock { | |
+ // Update the entry size | |
+ existingTask.HaveBlock = newTask.HaveBlock | |
+ existing.Work = task.Work | |
+ } | |
+ } | |
+ | |
+ // If the task is a want-block, make sure the entry size is equal | |
+ // to the block size (because we will send the whole block) | |
+ if existingTask.IsWantBlock && existingTask.HaveBlock { | |
+ existing.Work = existingTask.BlockSize | |
+ } | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/server/server.go a/vendor/github.com/ipfs/go-libipfs/bitswap/server/server.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/server/server.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/server/server.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,536 @@ | |
+package server | |
+ | |
+import ( | |
+ "context" | |
+ "errors" | |
+ "fmt" | |
+ "sort" | |
+ "sync" | |
+ "time" | |
+ | |
+ "github.com/ipfs/go-cid" | |
+ blockstore "github.com/ipfs/go-ipfs-blockstore" | |
+ "github.com/ipfs/go-libipfs/bitswap/internal/defaults" | |
+ "github.com/ipfs/go-libipfs/bitswap/message" | |
+ pb "github.com/ipfs/go-libipfs/bitswap/message/pb" | |
+ bmetrics "github.com/ipfs/go-libipfs/bitswap/metrics" | |
+ bsnet "github.com/ipfs/go-libipfs/bitswap/network" | |
+ "github.com/ipfs/go-libipfs/bitswap/server/internal/decision" | |
+ "github.com/ipfs/go-libipfs/bitswap/tracer" | |
+ blocks "github.com/ipfs/go-libipfs/blocks" | |
+ logging "github.com/ipfs/go-log" | |
+ "github.com/ipfs/go-metrics-interface" | |
+ process "github.com/jbenet/goprocess" | |
+ procctx "github.com/jbenet/goprocess/context" | |
+ "github.com/libp2p/go-libp2p/core/peer" | |
+ "go.uber.org/zap" | |
+) | |
+ | |
+var provideKeysBufferSize = 2048 | |
+ | |
+var log = logging.Logger("bitswap-server") | |
+var sflog = log.Desugar() | |
+ | |
+const provideWorkerMax = 6 | |
+ | |
+type Option func(*Server) | |
+ | |
+type Server struct { | |
+ sentHistogram metrics.Histogram | |
+ sendTimeHistogram metrics.Histogram | |
+ | |
+ // the engine is the bit of logic that decides who to send which blocks to | |
+ engine *decision.Engine | |
+ | |
+ // network delivers messages on behalf of the session | |
+ network bsnet.BitSwapNetwork | |
+ | |
+ // External statistics interface | |
+ tracer tracer.Tracer | |
+ | |
+ // Counters for various statistics | |
+ counterLk sync.Mutex | |
+ counters Stat | |
+ | |
+ // the total number of simultaneous threads sending outgoing messages | |
+ taskWorkerCount int | |
+ | |
+ process process.Process | |
+ | |
+ // newBlocks is a channel for newly added blocks to be provided to the | |
+ // network. blocks pushed down this channel get buffered and fed to the | |
+ // provideKeys channel later on to avoid too much network activity | |
+ newBlocks chan cid.Cid | |
+ // provideKeys directly feeds provide workers | |
+ provideKeys chan cid.Cid | |
+ | |
+ // Extra options to pass to the decision manager | |
+ engineOptions []decision.Option | |
+ | |
+ // the size of channel buffer to use | |
+ hasBlockBufferSize int | |
+ // whether or not to make provide announcements | |
+ provideEnabled bool | |
+} | |
+ | |
+func New(ctx context.Context, network bsnet.BitSwapNetwork, bstore blockstore.Blockstore, options ...Option) *Server { | |
+ ctx, cancel := context.WithCancel(ctx) | |
+ | |
+ px := process.WithTeardown(func() error { | |
+ return nil | |
+ }) | |
+ go func() { | |
+ <-px.Closing() // process closes first | |
+ cancel() | |
+ }() | |
+ | |
+ s := &Server{ | |
+ sentHistogram: bmetrics.SentHist(ctx), | |
+ sendTimeHistogram: bmetrics.SendTimeHist(ctx), | |
+ taskWorkerCount: defaults.BitswapTaskWorkerCount, | |
+ network: network, | |
+ process: px, | |
+ provideEnabled: true, | |
+ hasBlockBufferSize: defaults.HasBlockBufferSize, | |
+ provideKeys: make(chan cid.Cid, provideKeysBufferSize), | |
+ } | |
+ s.newBlocks = make(chan cid.Cid, s.hasBlockBufferSize) | |
+ | |
+ for _, o := range options { | |
+ o(s) | |
+ } | |
+ | |
+ s.engine = decision.NewEngine( | |
+ ctx, | |
+ bstore, | |
+ network.ConnectionManager(), | |
+ network.Self(), | |
+ s.engineOptions..., | |
+ ) | |
+ s.engineOptions = nil | |
+ | |
+ s.startWorkers(ctx, px) | |
+ | |
+ return s | |
+} | |
+ | |
+func TaskWorkerCount(count int) Option { | |
+ if count <= 0 { | |
+ panic(fmt.Sprintf("task worker count is %d but must be > 0", count)) | |
+ } | |
+ return func(bs *Server) { | |
+ bs.taskWorkerCount = count | |
+ } | |
+} | |
+ | |
+func WithTracer(tap tracer.Tracer) Option { | |
+ return func(bs *Server) { | |
+ bs.tracer = tap | |
+ } | |
+} | |
+ | |
+// ProvideEnabled is an option for enabling/disabling provide announcements | |
+func ProvideEnabled(enabled bool) Option { | |
+ return func(bs *Server) { | |
+ bs.provideEnabled = enabled | |
+ } | |
+} | |
+ | |
+func WithPeerBlockRequestFilter(pbrf decision.PeerBlockRequestFilter) Option { | |
+ o := decision.WithPeerBlockRequestFilter(pbrf) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// WithTaskComparator configures custom task prioritization logic. | |
+func WithTaskComparator(comparator decision.TaskComparator) Option { | |
+ o := decision.WithTaskComparator(comparator) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// Configures the engine to use the given score decision logic. | |
+func WithScoreLedger(scoreLedger decision.ScoreLedger) Option { | |
+ o := decision.WithScoreLedger(scoreLedger) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// LedgerForPeer returns aggregated data about blocks swapped and communication | |
+// with a given peer. | |
+func (bs *Server) LedgerForPeer(p peer.ID) *decision.Receipt { | |
+ return bs.engine.LedgerForPeer(p) | |
+} | |
+ | |
+// EngineTaskWorkerCount sets the number of worker threads used inside the engine | |
+func EngineTaskWorkerCount(count int) Option { | |
+ o := decision.WithTaskWorkerCount(count) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// SetSendDontHaves indicates what to do when the engine receives a want-block | |
+// for a block that is not in the blockstore. Either | |
+// - Send a DONT_HAVE message | |
+// - Simply don't respond | |
+// This option is only used for testing. | |
+func SetSendDontHaves(send bool) Option { | |
+ o := decision.WithSetSendDontHave(send) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// EngineBlockstoreWorkerCount sets the number of worker threads used for | |
+// blockstore operations in the decision engine | |
+func EngineBlockstoreWorkerCount(count int) Option { | |
+ o := decision.WithBlockstoreWorkerCount(count) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+func WithTargetMessageSize(tms int) Option { | |
+ o := decision.WithTargetMessageSize(tms) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// MaxOutstandingBytesPerPeer describes approximately how much work we are will to have outstanding to a peer at any | |
+// given time. Setting it to 0 will disable any limiting. | |
+func MaxOutstandingBytesPerPeer(count int) Option { | |
+ o := decision.WithMaxOutstandingBytesPerPeer(count) | |
+ return func(bs *Server) { | |
+ bs.engineOptions = append(bs.engineOptions, o) | |
+ } | |
+} | |
+ | |
+// HasBlockBufferSize configure how big the new blocks buffer should be. | |
+func HasBlockBufferSize(count int) Option { | |
+ if count < 0 { | |
+ panic("cannot have negative buffer size") | |
+ } | |
+ return func(bs *Server) { | |
+ bs.hasBlockBufferSize = count | |
+ } | |
+} | |
+ | |
+// WantlistForPeer returns the currently understood list of blocks requested by a | |
+// given peer. | |
+func (bs *Server) WantlistForPeer(p peer.ID) []cid.Cid { | |
+ var out []cid.Cid | |
+ for _, e := range bs.engine.WantlistForPeer(p) { | |
+ out = append(out, e.Cid) | |
+ } | |
+ return out | |
+} | |
+ | |
+func (bs *Server) startWorkers(ctx context.Context, px process.Process) { | |
+ bs.engine.StartWorkers(ctx, px) | |
+ | |
+ // Start up workers to handle requests from other nodes for the data on this node | |
+ for i := 0; i < bs.taskWorkerCount; i++ { | |
+ i := i | |
+ px.Go(func(px process.Process) { | |
+ bs.taskWorker(ctx, i) | |
+ }) | |
+ } | |
+ | |
+ if bs.provideEnabled { | |
+ // Start up a worker to manage sending out provides messages | |
+ px.Go(func(px process.Process) { | |
+ bs.provideCollector(ctx) | |
+ }) | |
+ | |
+ // Spawn up multiple workers to handle incoming blocks | |
+ // consider increasing number if providing blocks bottlenecks | |
+ // file transfers | |
+ px.Go(bs.provideWorker) | |
+ } | |
+} | |
+ | |
+func (bs *Server) taskWorker(ctx context.Context, id int) { | |
+ defer log.Debug("bitswap task worker shutting down...") | |
+ log := log.With("ID", id) | |
+ for { | |
+ log.Debug("Bitswap.TaskWorker.Loop") | |
+ select { | |
+ case nextEnvelope := <-bs.engine.Outbox(): | |
+ select { | |
+ case envelope, ok := <-nextEnvelope: | |
+ if !ok { | |
+ continue | |
+ } | |
+ | |
+ start := time.Now() | |
+ | |
+ // TODO: Only record message as sent if there was no error? | |
+ // Ideally, yes. But we'd need some way to trigger a retry and/or drop | |
+ // the peer. | |
+ bs.engine.MessageSent(envelope.Peer, envelope.Message) | |
+ if bs.tracer != nil { | |
+ bs.tracer.MessageSent(envelope.Peer, envelope.Message) | |
+ } | |
+ bs.sendBlocks(ctx, envelope) | |
+ | |
+ dur := time.Since(start) | |
+ bs.sendTimeHistogram.Observe(dur.Seconds()) | |
+ | |
+ case <-ctx.Done(): | |
+ return | |
+ } | |
+ case <-ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (bs *Server) logOutgoingBlocks(env *decision.Envelope) { | |
+ if ce := sflog.Check(zap.DebugLevel, "sent message"); ce == nil { | |
+ return | |
+ } | |
+ | |
+ self := bs.network.Self() | |
+ | |
+ for _, blockPresence := range env.Message.BlockPresences() { | |
+ c := blockPresence.Cid | |
+ switch blockPresence.Type { | |
+ case pb.Message_Have: | |
+ log.Debugw("sent message", | |
+ "type", "HAVE", | |
+ "cid", c, | |
+ "local", self, | |
+ "to", env.Peer, | |
+ ) | |
+ case pb.Message_DontHave: | |
+ log.Debugw("sent message", | |
+ "type", "DONT_HAVE", | |
+ "cid", c, | |
+ "local", self, | |
+ "to", env.Peer, | |
+ ) | |
+ default: | |
+ panic(fmt.Sprintf("unrecognized BlockPresence type %v", blockPresence.Type)) | |
+ } | |
+ | |
+ } | |
+ for _, block := range env.Message.Blocks() { | |
+ log.Debugw("sent message", | |
+ "type", "BLOCK", | |
+ "cid", block.Cid(), | |
+ "local", self, | |
+ "to", env.Peer, | |
+ ) | |
+ } | |
+} | |
+ | |
+func (bs *Server) sendBlocks(ctx context.Context, env *decision.Envelope) { | |
+ // Blocks need to be sent synchronously to maintain proper backpressure | |
+ // throughout the network stack | |
+ defer env.Sent() | |
+ | |
+ err := bs.network.SendMessage(ctx, env.Peer, env.Message) | |
+ if err != nil { | |
+ log.Debugw("failed to send blocks message", | |
+ "peer", env.Peer, | |
+ "error", err, | |
+ ) | |
+ return | |
+ } | |
+ | |
+ bs.logOutgoingBlocks(env) | |
+ | |
+ dataSent := 0 | |
+ blocks := env.Message.Blocks() | |
+ for _, b := range blocks { | |
+ dataSent += len(b.RawData()) | |
+ } | |
+ bs.counterLk.Lock() | |
+ bs.counters.BlocksSent += uint64(len(blocks)) | |
+ bs.counters.DataSent += uint64(dataSent) | |
+ bs.counterLk.Unlock() | |
+ bs.sentHistogram.Observe(float64(env.Message.Size())) | |
+ log.Debugw("sent message", "peer", env.Peer) | |
+} | |
+ | |
+type Stat struct { | |
+ Peers []string | |
+ ProvideBufLen int | |
+ BlocksSent uint64 | |
+ DataSent uint64 | |
+} | |
+ | |
+// Stat returns aggregated statistics about bitswap operations | |
+func (bs *Server) Stat() (Stat, error) { | |
+ bs.counterLk.Lock() | |
+ s := bs.counters | |
+ bs.counterLk.Unlock() | |
+ s.ProvideBufLen = len(bs.newBlocks) | |
+ | |
+ peers := bs.engine.Peers() | |
+ peersStr := make([]string, len(peers)) | |
+ for i, p := range peers { | |
+ peersStr[i] = p.Pretty() | |
+ } | |
+ sort.Strings(peersStr) | |
+ s.Peers = peersStr | |
+ | |
+ return s, nil | |
+} | |
+ | |
+// NotifyNewBlocks announces the existence of blocks to this bitswap service. The | |
+// service will potentially notify its peers. | |
+// Bitswap itself doesn't store new blocks. It's the caller responsibility to ensure | |
+// that those blocks are available in the blockstore before calling this function. | |
+func (bs *Server) NotifyNewBlocks(ctx context.Context, blks ...blocks.Block) error { | |
+ select { | |
+ case <-bs.process.Closing(): | |
+ return errors.New("bitswap is closed") | |
+ default: | |
+ } | |
+ | |
+ // Send wanted blocks to decision engine | |
+ bs.engine.NotifyNewBlocks(blks) | |
+ | |
+ // If the reprovider is enabled, send block to reprovider | |
+ if bs.provideEnabled { | |
+ for _, blk := range blks { | |
+ select { | |
+ case bs.newBlocks <- blk.Cid(): | |
+ // send block off to be reprovided | |
+ case <-bs.process.Closing(): | |
+ return bs.process.Close() | |
+ } | |
+ } | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
+func (bs *Server) provideCollector(ctx context.Context) { | |
+ defer close(bs.provideKeys) | |
+ var toProvide []cid.Cid | |
+ var nextKey cid.Cid | |
+ var keysOut chan cid.Cid | |
+ | |
+ for { | |
+ select { | |
+ case blkey, ok := <-bs.newBlocks: | |
+ if !ok { | |
+ log.Debug("newBlocks channel closed") | |
+ return | |
+ } | |
+ | |
+ if keysOut == nil { | |
+ nextKey = blkey | |
+ keysOut = bs.provideKeys | |
+ } else { | |
+ toProvide = append(toProvide, blkey) | |
+ } | |
+ case keysOut <- nextKey: | |
+ if len(toProvide) > 0 { | |
+ nextKey = toProvide[0] | |
+ toProvide = toProvide[1:] | |
+ } else { | |
+ keysOut = nil | |
+ } | |
+ case <-ctx.Done(): | |
+ return | |
+ } | |
+ } | |
+} | |
+ | |
+func (bs *Server) provideWorker(px process.Process) { | |
+ // FIXME: OnClosingContext returns a _custom_ context type. | |
+ // Unfortunately, deriving a new cancelable context from this custom | |
+ // type fires off a goroutine. To work around this, we create a single | |
+ // cancelable context up-front and derive all sub-contexts from that. | |
+ // | |
+ // See: https://github.com/ipfs/go-ipfs/issues/5810 | |
+ ctx := procctx.OnClosingContext(px) | |
+ ctx, cancel := context.WithCancel(ctx) | |
+ defer cancel() | |
+ | |
+ limit := make(chan struct{}, provideWorkerMax) | |
+ | |
+ limitedGoProvide := func(k cid.Cid, wid int) { | |
+ defer func() { | |
+ // replace token when done | |
+ <-limit | |
+ }() | |
+ | |
+ log.Debugw("Bitswap.ProvideWorker.Start", "ID", wid, "cid", k) | |
+ defer log.Debugw("Bitswap.ProvideWorker.End", "ID", wid, "cid", k) | |
+ | |
+ ctx, cancel := context.WithTimeout(ctx, defaults.ProvideTimeout) // timeout ctx | |
+ defer cancel() | |
+ | |
+ if err := bs.network.Provide(ctx, k); err != nil { | |
+ log.Warn(err) | |
+ } | |
+ } | |
+ | |
+ // worker spawner, reads from bs.provideKeys until it closes, spawning a | |
+ // _ratelimited_ number of workers to handle each key. | |
+ for wid := 2; ; wid++ { | |
+ log.Debug("Bitswap.ProvideWorker.Loop") | |
+ | |
+ select { | |
+ case <-px.Closing(): | |
+ return | |
+ case k, ok := <-bs.provideKeys: | |
+ if !ok { | |
+ log.Debug("provideKeys channel closed") | |
+ return | |
+ } | |
+ select { | |
+ case <-px.Closing(): | |
+ return | |
+ case limit <- struct{}{}: | |
+ go limitedGoProvide(k, wid) | |
+ } | |
+ } | |
+ } | |
+} | |
+ | |
+func (bs *Server) ReceiveMessage(ctx context.Context, p peer.ID, incoming message.BitSwapMessage) { | |
+ // This call records changes to wantlists, blocks received, | |
+ // and number of bytes transfered. | |
+ bs.engine.MessageReceived(ctx, p, incoming) | |
+ // TODO: this is bad, and could be easily abused. | |
+ // Should only track *useful* messages in ledger | |
+ | |
+ if bs.tracer != nil { | |
+ bs.tracer.MessageReceived(p, incoming) | |
+ } | |
+} | |
+ | |
+// ReceivedBlocks notify the decision engine that a peer is well behaving | |
+// and gave us usefull data, potentially increasing it's score and making us | |
+// send them more data in exchange. | |
+func (bs *Server) ReceivedBlocks(from peer.ID, blks []blocks.Block) { | |
+ bs.engine.ReceivedBlocks(from, blks) | |
+} | |
+ | |
+func (*Server) ReceiveError(err error) { | |
+ log.Infof("Bitswap Client ReceiveError: %s", err) | |
+ // TODO log the network error | |
+ // TODO bubble the network error up to the parent context/error logger | |
+ | |
+} | |
+func (bs *Server) PeerConnected(p peer.ID) { | |
+ bs.engine.PeerConnected(p) | |
+} | |
+func (bs *Server) PeerDisconnected(p peer.ID) { | |
+ bs.engine.PeerDisconnected(p) | |
+} | |
+ | |
+// Close is called to shutdown the Client | |
+func (bs *Server) Close() error { | |
+ return bs.process.Close() | |
+} | |
diff -Naur --color b/vendor/github.com/ipfs/go-libipfs/bitswap/tracer/tracer.go a/vendor/github.com/ipfs/go-libipfs/bitswap/tracer/tracer.go | |
--- b/vendor/github.com/ipfs/go-libipfs/bitswap/tracer/tracer.go 1970-01-01 01:00:00.000000000 +0100 | |
+++ a/vendor/github.com/ipfs/go-libipfs/bitswap/tracer/tracer.go 2023-01-30 20:34:49.308797731 +0100 | |
@@ -0,0 +1,13 @@ | |
+package tracer | |
+ | |
+import ( | |
+ bsmsg "github.com/ipfs/go-libipfs/bitswap/message" | |
+ peer "github.com/libp2p/go-libp2p/core/peer" | |
+) | |
+ | |
+// Tracer provides methods to access all messages sent and received by Bitswap. | |
+// This interface can be used to implement various statistics (this is original intent). | |
+type Tracer interface { | |
+ MessageReceived(peer.ID, bsmsg.BitSwapMessage) | |
+ MessageSent(peer.ID, bsmsg.BitSwapMessage) | |
+} | |
diff -Naur --color b/vendor/modules.txt a/vendor/modules.txt | |
--- b/vendor/modules.txt 2023-01-30 20:34:51.008813442 +0100 | |
+++ a/vendor/modules.txt 2023-01-30 20:34:49.928803463 +0100 | |
@@ -825,32 +825,6 @@ | |
# github.com/ipfs/go-bitfield v1.0.0 | |
## explicit; go 1.12 | |
github.com/ipfs/go-bitfield | |
-# github.com/ipfs/go-bitswap v0.10.2 | |
-## explicit; go 1.18 | |
-github.com/ipfs/go-bitswap | |
-github.com/ipfs/go-bitswap/client | |
-github.com/ipfs/go-bitswap/client/internal | |
-github.com/ipfs/go-bitswap/client/internal/blockpresencemanager | |
-github.com/ipfs/go-bitswap/client/internal/getter | |
-github.com/ipfs/go-bitswap/client/internal/messagequeue | |
-github.com/ipfs/go-bitswap/client/internal/notifications | |
-github.com/ipfs/go-bitswap/client/internal/peermanager | |
-github.com/ipfs/go-bitswap/client/internal/providerquerymanager | |
-github.com/ipfs/go-bitswap/client/internal/session | |
-github.com/ipfs/go-bitswap/client/internal/sessioninterestmanager | |
-github.com/ipfs/go-bitswap/client/internal/sessionmanager | |
-github.com/ipfs/go-bitswap/client/internal/sessionpeermanager | |
-github.com/ipfs/go-bitswap/client/wantlist | |
-github.com/ipfs/go-bitswap/internal | |
-github.com/ipfs/go-bitswap/internal/defaults | |
-github.com/ipfs/go-bitswap/message | |
-github.com/ipfs/go-bitswap/message/pb | |
-github.com/ipfs/go-bitswap/metrics | |
-github.com/ipfs/go-bitswap/network | |
-github.com/ipfs/go-bitswap/network/internal | |
-github.com/ipfs/go-bitswap/server | |
-github.com/ipfs/go-bitswap/server/internal/decision | |
-github.com/ipfs/go-bitswap/tracer | |
# github.com/ipfs/go-block-format v0.1.1 | |
## explicit; go 1.18 | |
github.com/ipfs/go-block-format | |
@@ -965,8 +939,8 @@ | |
# github.com/ipfs/go-ipfs-pq v0.0.2 | |
## explicit | |
github.com/ipfs/go-ipfs-pq | |
-# github.com/ipfs/go-ipfs-routing v0.2.1 | |
-## explicit; go 1.16 | |
+# github.com/ipfs/go-ipfs-routing v0.3.0 | |
+## explicit; go 1.18 | |
github.com/ipfs/go-ipfs-routing/none | |
# github.com/ipfs/go-ipfs-util v0.0.2 | |
## explicit; go 1.14 | |
@@ -985,8 +959,32 @@ | |
## explicit; go 1.18 | |
github.com/ipfs/go-ipns | |
github.com/ipfs/go-ipns/pb | |
-# github.com/ipfs/go-libipfs v0.3.0 | |
+# github.com/ipfs/go-libipfs v0.4.0 | |
## explicit; go 1.19 | |
+github.com/ipfs/go-libipfs/bitswap | |
+github.com/ipfs/go-libipfs/bitswap/client | |
+github.com/ipfs/go-libipfs/bitswap/client/internal | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/blockpresencemanager | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/getter | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/messagequeue | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/notifications | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/peermanager | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/providerquerymanager | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/session | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/sessioninterestmanager | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/sessionmanager | |
+github.com/ipfs/go-libipfs/bitswap/client/internal/sessionpeermanager | |
+github.com/ipfs/go-libipfs/bitswap/client/wantlist | |
+github.com/ipfs/go-libipfs/bitswap/internal | |
+github.com/ipfs/go-libipfs/bitswap/internal/defaults | |
+github.com/ipfs/go-libipfs/bitswap/message | |
+github.com/ipfs/go-libipfs/bitswap/message/pb | |
+github.com/ipfs/go-libipfs/bitswap/metrics | |
+github.com/ipfs/go-libipfs/bitswap/network | |
+github.com/ipfs/go-libipfs/bitswap/network/internal | |
+github.com/ipfs/go-libipfs/bitswap/server | |
+github.com/ipfs/go-libipfs/bitswap/server/internal/decision | |
+github.com/ipfs/go-libipfs/bitswap/tracer | |
github.com/ipfs/go-libipfs/blocks | |
# github.com/ipfs/go-log v1.0.5 | |
## explicit; go 1.12 | |
Saved to /tmp/tmp.LRvzpLRhCc/mod.diff |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment