-
-
Save tpisto/5bd1ca71631289609fdfa89054ff0ee1 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#include <assert.h> | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <string.h> | |
#include <unistd.h> | |
#include <uv.h> | |
#include <uvw.hpp> | |
uv_loop_t *loop; | |
uv_pipe_t queue; | |
typedef struct { | |
uv_write_t req; | |
uv_buf_t buf; | |
} write_req_t; | |
void free_write_req(uv_write_t *req) { | |
write_req_t *wr = (write_req_t*) req; | |
free(wr->buf.base); | |
free(wr); | |
} | |
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) { | |
buf->base = static_cast<char*>(malloc(suggested_size)); | |
buf->len = suggested_size; | |
} | |
void echo_write(uv_write_t *req, int status) { | |
if (status) { | |
fprintf(stderr, "Write error %s\n", uv_err_name(status)); | |
} | |
free_write_req(req); | |
} | |
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { | |
if (nread > 0) { | |
write_req_t *req = (write_req_t*) malloc(sizeof(write_req_t)); | |
req->buf = uv_buf_init(buf->base, nread); | |
uv_write((uv_write_t*) req, client, &req->buf, 1, echo_write); | |
return; | |
} | |
if (nread < 0) { | |
if (nread != UV_EOF) | |
fprintf(stderr, "Read error %s\n", uv_err_name(nread)); | |
uv_close((uv_handle_t*) client, NULL); | |
} | |
free(buf->base); | |
} | |
void on_new_connection(uv_stream_t *q, ssize_t nread, const uv_buf_t *buf) { | |
if (nread < 0) { | |
if (nread != UV_EOF) | |
fprintf(stderr, "Read error %s\n", uv_err_name(nread)); | |
uv_close((uv_handle_t*) q, NULL); | |
return; | |
} | |
uv_pipe_t *pipe = (uv_pipe_t*) q; | |
if (!uv_pipe_pending_count(pipe)) { | |
fprintf(stderr, "No pending count\n"); | |
return; | |
} | |
uv_handle_type pending = uv_pipe_pending_type(pipe); | |
assert(pending == UV_TCP); | |
uv_tcp_t *client = (uv_tcp_t*) malloc(sizeof(uv_tcp_t)); | |
uv_tcp_init(loop, client); | |
if (uv_accept(q, (uv_stream_t*) client) == 0) { | |
uv_os_fd_t fd; | |
uv_fileno((const uv_handle_t*) client, &fd); | |
fprintf(stderr, "Worker %d: Accepted fd %d\n", getpid(), fd); | |
uv_read_start((uv_stream_t*) client, alloc_buffer, echo_read); | |
} | |
else { | |
uv_close((uv_handle_t*) client, NULL); | |
} | |
} | |
// ************* | |
// | |
// This is the new UVW based socket listener that should get the handle from IPC | |
// | |
// ************* | |
void listen(uvw::Loop &loop) { | |
auto server = loop.resource<uvw::PipeHandle>(true); | |
server->on<uvw::ErrorEvent>([](const uvw::ErrorEvent &errorEvent, uvw::PipeHandle &) { std::cerr << errorEvent.what(); }); | |
server->on<uvw::CloseEvent>([](const uvw::CloseEvent &, uvw::PipeHandle &socket) { std::cerr << "close"; }); | |
server->on<uvw::EndEvent>([](const uvw::EndEvent &, uvw::PipeHandle &socket) { std::cerr << "end"; socket.close(); }); | |
server->on<uvw::DataEvent>([](const uvw::DataEvent &, uvw::PipeHandle &socket) { | |
std::cerr << "\n\nPENDING" << socket.pending() << "\n"; | |
// TCP Client | |
std::shared_ptr<uvw::TCPHandle> tcpClient = socket.loop().resource<uvw::TCPHandle>(); | |
tcpClient->once<uvw::EndEvent>([](const uvw::EndEvent &, uvw::TCPHandle &client) { client.close(); }); | |
tcpClient->on<uvw::DataEvent>([](const uvw::DataEvent &, uvw::TCPHandle &) { | |
std::cerr << "Data received!!!"; | |
}); | |
// THIS FAILS! | |
socket.accept(*tcpClient); | |
tcpClient->read(); | |
}); | |
server->open(0); | |
server->read(); | |
} | |
int main() { | |
// Original C -based system | |
// loop = uv_default_loop(); | |
// uv_pipe_init(loop, &queue, 1 /* ipc */); | |
// uv_pipe_open(&queue, 0); | |
// uv_read_start((uv_stream_t*)&queue, alloc_buffer, on_new_connection); | |
// return uv_run(loop, UV_RUN_DEFAULT); | |
// Let's try to do the same with UVW | |
auto loop = uvw::Loop::getDefault(); | |
listen(*loop); | |
loop->run(); | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment