-
-
Save msoxzw/8ae5c488edbc2985d41563c4d9c9cc04 to your computer and use it in GitHub Desktop.
import argparse | |
import asyncio | |
import struct | |
_I = struct.Struct('!I') | |
class UDPClient(asyncio.DatagramProtocol): | |
def __init__(self, size, time): | |
self.loop = asyncio.get_running_loop() | |
self.size = size | |
self.time = time | |
def datagram_received(self, data, addr): | |
self.size -= len(data) | |
if not self.size: | |
self.time.set_result(self.loop.time()) | |
class UDPServer(asyncio.DatagramProtocol): | |
def __init__(self, packet): | |
self.packet = packet | |
self.transport = None | |
def connection_made(self, transport): | |
self.transport = transport | |
def datagram_received(self, data, addr): | |
count, = _I.unpack(data) | |
for _ in range(count): | |
self.transport.sendto(self.packet, addr) | |
async def benchmark(count=65535, packet_size=1024, host='127.0.0.1', port=9999): | |
loop = asyncio.get_running_loop() | |
finish_time = loop.create_future() | |
size = count * packet_size | |
server, _ = await loop.create_datagram_endpoint( | |
lambda: UDPServer(bytes(packet_size)), | |
local_addr=(host, port), | |
) | |
client, _ = await loop.create_datagram_endpoint( | |
lambda: UDPClient(size, finish_time), | |
remote_addr=(host, port), | |
) | |
client.sendto(_I.pack(count), (host, port)) | |
start_time = loop.time() | |
try: | |
print('Transfer speed: {:.3f} Mbps'.format( | |
size * 8 / 1000 / 1000 / (await finish_time - start_time))) | |
finally: | |
client.close() | |
server.close() | |
if __name__ == '__main__': | |
parser = argparse.ArgumentParser() | |
parser.add_argument('count', type=int, nargs='?', default=65536) | |
args = parser.parse_args() | |
asyncio.run(benchmark(args.count)) |
I'm getting a nagging feeling that buffering does not work at all on Linux SelectorLoop with UDP 🤔
Well, I'm just running it locally, like this:
python test.py
And then it sends packets on loopback. It does not try to connect anywhere. And I definitely use no firewall.
When I stop it with the debugger, it is always stuck in EpollSelector._selector.poll(timeout, max_ev)
. Either this behaviour results from differences between socket usage semantics of Windows and Linux, or there is a horrible bug in SelectorLoop implementation.
A quick Wireshark session shows that all the packets leave the "server side", but only 93 (in case of 1024-bytes) packets ever reach the "client side". Decreasing the size of the packet to 512 increases the number of received packets to 167. Or 4 packets of 64000 bytes. So, seems like receive buffer problem.
The test does not work on Ubuntu (and Debian) if trying to send over 93 packets: it gets stuck in the select loop or something like that... Am I doing something wrong?