Skip to content

Instantly share code, notes, and snippets.

@vinipsmaker
Last active August 29, 2015 14:27
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vinipsmaker/c2455df44e53bb305ca1 to your computer and use it in GitHub Desktop.
Save vinipsmaker/c2455df44e53bb305ca1 to your computer and use it in GitHub Desktop.
weighttp -n 20000 -c 1000 -k http://localhost:8080/
#include <iostream>
#include <algorithm>
#include <boost/utility/string_ref.hpp>
#include <boost/asio/io_service.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/http/buffered_socket.hpp>
#include <boost/http/algorithm.hpp>
using namespace std;
using namespace boost;
class connection: public std::enable_shared_from_this<connection>
{
public:
typedef boost::system::error_code ec_t;
void handle_ec(ec_t ec)
{
auto self = shared_from_this();
if (ec != system::error_code{asio::error::eof}) {
cerr << '[' << self->counter << "] Aborting on error: "
<< ec << endl;
std::exit(1);
}
cout << '[' << self->counter << "] Error: " << ec << endl;
}
void operator()()
{
auto self = shared_from_this();
cout << "--\n[" << self->counter << "] About to receive a new message"
<< endl;
self->socket.async_read_request(self->method, self->path, self->message,
[self](ec_t ec) {
if (ec) {
self->handle_ec(ec);
return;
}
self->message.body().clear(); // freeing not used resources
self->schedule_continued();
});
}
void schedule_continued()
{
auto self = shared_from_this();
if (http::request_continue_required(self->message)) {
cout << '[' << self->counter
<< "] Continue required. About to send"
" \"100-continue\""
<< std::endl;
self->socket.async_write_response_continue([self](ec_t ec) {
if (ec) {
self->handle_ec(ec);
return;
}
self->schedule_empty_read_state();
});
} else {
self->schedule_empty_read_state();
}
}
void schedule_empty_read_state()
{
auto self = shared_from_this();
cout << '[' << self->counter << "] Message not fully received" << endl;
switch (self->socket.read_state()) {
case http::read_state::message_ready:
cout << '[' << self->counter << "] About to receive some body"
<< endl;
self->socket.async_read_some(self->message, [self](ec_t ec) {
if (ec) {
self->handle_ec(ec);
return;
}
self->schedule_empty_read_state();
});
break;
case http::read_state::body_ready:
cout << '[' << self->counter << "] About to receive trailers"
<< endl;
self->socket.async_read_trailers(self->message, [self](ec_t ec) {
if (ec) {
self->handle_ec(ec);
return;
}
self->schedule_empty_read_state();
});
break;
default:
self->schedule_write_message();
}
}
void schedule_write_message()
{
auto self = shared_from_this();
//cout << "BODY:==";
//for (const auto &e: self->message.body()) {
// cout << char(e);
//}
//cout << "==" << endl;
cout << '[' << self->counter << "] Message received. State = "
<< int(self->socket.read_state()) << endl;
cout << '[' << self->counter << "] Method: " << self->method
<< endl;
cout << '[' << self->counter << "] Path: " << self->path
<< endl;
std::string request_uri;
{
auto host = self->message.headers().find("host");
if (host != self->message.headers().end()) {
cout << '[' << self->counter << "] Host header: "
<< host->second << endl;
request_uri = host->second;
}
}
request_uri += self->path;
std::cout << '[' << self->counter << "] Write state = "
<< int(self->socket.write_state()) << std::endl;
cout << '[' << self->counter << "] About to send a reply" << endl;
http::message reply;
//reply.headers().emplace("connection", "close");
std::copy(request_uri.begin(), request_uri.end(),
std::back_inserter(reply.body()));
self->socket.async_write_response(200, string_ref("OK"), reply,
[self](ec_t ec) {
self->on_write_response();
});
}
void on_write_response()
{
auto self = shared_from_this();
if (self->socket.is_open())
(*self)();
}
asio::ip::tcp::socket &tcp_layer()
{
return socket.next_layer();
}
static std::shared_ptr<connection> make_connection(asio::io_service &ios,
int counter)
{
return std::shared_ptr<connection>{new connection{ios, counter}};
}
private:
connection(asio::io_service &ios, int counter)
: socket(ios)
, counter(counter)
{}
http::buffered_socket socket;
int counter;
std::string method;
std::string path;
http::message message;
};
int main()
{
asio::io_service ios;
asio::ip::tcp::acceptor acceptor(ios,
asio::ip::tcp
::endpoint(asio::ip::tcp::v6(), 8080));
auto work = [&acceptor](asio::yield_context yield) {
int counter = 0;
for ( ; true ; ++counter ) {
try {
auto connection
= connection::make_connection(acceptor.get_io_service(),
counter);
cout << "About to accept a new connection" << endl;
acceptor.async_accept(connection->tcp_layer(), yield);
auto handle_connection
= [connection](asio::yield_context yield) mutable {
(*connection)();
};
spawn(acceptor.get_io_service(), handle_connection);
} catch (std::exception &e) {
cerr << "Aborting on exception: " << e.what() << endl;
std::exit(1);
}
}
};
cout << "About to spawn" << endl;
spawn(ios, work);
cout << "About to run" << endl;
ios.run();
return 0;
}
weighttp - a lightweight and simple webserver benchmarking tool
starting benchmark...
spawning thread #1: 1000 concurrent requests, 20000 total requests
progress: 10% done
progress: 20% done
progress: 30% done
progress: 40% done
progress: 50% done
progress: 60% done
progress: 70% done
progress: 80% done
progress: 90% done
progress: 100% done
finished in 8 sec, 890 millisec and 479 microsec, 2249 req/s, 118 kbyte/s
requests: 20000 total, 20000 started, 20000 done, 20000 succeeded, 0 failed, 0 errored
status codes: 20000 2xx, 0 3xx, 0 4xx, 0 5xx
traffic: 1080000 bytes total, 780000 bytes http, 300000 bytes data
#include <pion/http/server.hpp>
#include <pion/http/response_writer.hpp>
#include <boost/make_shared.hpp>
using namespace pion;
class WebServer
{
public:
/**
* Start the web server
* @param _port the port to listen on.
*/
void start(unsigned int _port);
/**
* Stop the web server.
*/
void stop();
private:
/**
* Handle http requests.
* @param _httpRequest the request
* @param _tcpConn the connection
*/
void requestHandler(http::request_ptr& _httpRequest, tcp::connection_ptr& _tcpConn);
http::server_ptr m_httpServer;
};
/**
* Start the web server
* @param _port the port to listen on.
*/
void WebServer::start(unsigned int _port)
{
// Create a web server and specify the port on witch it will listen.
m_httpServer = boost::make_shared<http::server>(_port);
// Add a resource.
m_httpServer->add_resource("/",
boost::bind(&WebServer::requestHandler, this, _1, _2));
// Start the web server.
m_httpServer->start();
}
/**
* Stop the web server.
*/
void WebServer::stop()
{
// Just check that the http server has been created before stopping it.
if (m_httpServer.get() != NULL)
{
m_httpServer->stop();
}
}
/**
* Handle http requests.
* @param _httpRequest the request
* @param _tcpConn the connection
*/
void WebServer::requestHandler(http::request_ptr& _httpRequest, tcp::connection_ptr& _tcpConn)
{
http::response_writer_ptr writer(
http::response_writer::create(
_tcpConn,
*_httpRequest,
boost::bind(&tcp::connection::finish, _tcpConn)));
http::response& r = writer->get_response();
ihash_multimap& params = _httpRequest->get_queries();
std::string request_uri = _httpRequest->get_header("host");
request_uri += _httpRequest->get_resource();
writer->write(request_uri);
r.set_status_code(http::types::RESPONSE_CODE_OK);
r.set_status_message(http::types::RESPONSE_MESSAGE_OK);
writer->send();
}
int main()
{
WebServer server;
server.start(8080);
while (1)
{
sleep(5000);
}
return 0;
}
weighttp - a lightweight and simple webserver benchmarking tool
starting benchmark...
spawning thread #1: 1000 concurrent requests, 20000 total requests
progress: 10% done
progress: 20% done
progress: 30% done
progress: 40% done
progress: 50% done
progress: 60% done
progress: 70% done
progress: 80% done
progress: 90% done
progress: 100% done
finished in 7 sec, 285 millisec and 887 microsec, 2745 req/s, 209 kbyte/s
requests: 20000 total, 20000 started, 20000 done, 20000 succeeded, 0 failed, 0 errored
status codes: 20000 2xx, 0 3xx, 0 4xx, 0 5xx
traffic: 1560000 bytes total, 1260000 bytes http, 300000 bytes data
#include <Poco/Net/HTTPServer.h>
#include <Poco/Net/HTTPRequestHandler.h>
#include <Poco/Net/HTTPRequestHandlerFactory.h>
#include <Poco/Net/HTTPServerParams.h>
#include <Poco/Net/HTTPServerRequest.h>
#include <Poco/Net/HTTPServerResponse.h>
#include <Poco/Net/HTTPServerParams.h>
#include <Poco/Net/ServerSocket.h>
#include <Poco/Exception.h>
#include <Poco/ThreadPool.h>
#include <Poco/Util/ServerApplication.h>
#include <Poco/Util/Option.h>
#include <Poco/Util/OptionSet.h>
#include <Poco/Util/HelpFormatter.h>
#include <iostream>
using Poco::Net::ServerSocket;
using Poco::Net::HTTPRequestHandler;
using Poco::Net::HTTPRequestHandlerFactory;
using Poco::Net::HTTPServer;
using Poco::Net::HTTPServerRequest;
using Poco::Net::HTTPServerResponse;
using Poco::Net::HTTPServerParams;
using Poco::ThreadPool;
using Poco::Util::ServerApplication;
using Poco::Util::Application;
using Poco::Util::Option;
using Poco::Util::OptionSet;
using Poco::Util::OptionCallback;
using Poco::Util::HelpFormatter;
class TimeRequestHandler: public HTTPRequestHandler
{
public:
TimeRequestHandler() {}
void handleRequest(HTTPServerRequest& request,
HTTPServerResponse& response)
{
Application& app = Application::instance();
app.logger().information("Request from "
+ request.clientAddress().toString());
//Timestamp now;
//std::string dt(DateTimeFormatter::format(now, _format));
//response.setChunkedTransferEncoding(true);
//response.setContentType("text/html");
std::ostream& ostr = response.send();
ostr << request.get("host", "") + request.getURI();
}
};
class TimeRequestHandlerFactory: public HTTPRequestHandlerFactory
{
public:
TimeRequestHandlerFactory() {}
HTTPRequestHandler* createRequestHandler(
const HTTPServerRequest& request)
{
return new TimeRequestHandler;
}
};
class HTTPTimeServer: public Poco::Util::ServerApplication
{
public:
HTTPTimeServer(): _helpRequested(false)
{
}
~HTTPTimeServer()
{
}
protected:
void initialize(Application& self)
{
loadConfiguration();
ServerApplication::initialize(self);
}
void uninitialize()
{
ServerApplication::uninitialize();
}
void defineOptions(OptionSet& options)
{
ServerApplication::defineOptions(options);
options.addOption(
Option("help", "h", "display argument help information")
.required(false)
.repeatable(false)
.callback(OptionCallback<HTTPTimeServer>(
this, &HTTPTimeServer::handleHelp)));
}
void handleHelp(const std::string& name,
const std::string& value)
{
HelpFormatter helpFormatter(options());
helpFormatter.setCommand(commandName());
helpFormatter.setUsage("OPTIONS");
helpFormatter.setHeader(
"A web server that serves the current date and time.");
helpFormatter.format(std::cout);
stopOptionsProcessing();
_helpRequested = true;
}
int main(const std::vector<std::string>& args)
{
if (!_helpRequested)
{
unsigned short port = (unsigned short)
config().getInt("HTTPTimeServer.port", 8080);
ServerSocket svs(port);
HTTPServer srv(new TimeRequestHandlerFactory,
svs, new HTTPServerParams);
srv.start();
waitForTerminationRequest();
srv.stop();
}
return Application::EXIT_OK;
}
private:
bool _helpRequested;
};
int main(int argc, char** argv)
{
HTTPTimeServer app;
return app.run(argc, argv);
}
weighttp - a lightweight and simple webserver benchmarking tool
starting benchmark...
spawning thread #1: 1000 concurrent requests, 20000 total requests
progress: 10% done
progress: 20% done
progress: 30% done
progress: 40% done
progress: 50% done
progress: 60% done
progress: 70% done
progress: 80% done
progress: 90% done
progress: 100% done
finished in 9 sec, 457 millisec and 530 microsec, 2114 req/s, 164 kbyte/s
requests: 20000 total, 20000 started, 20000 done, 0 succeeded, 20000 failed, 0 errored
status codes: 17691 2xx, 0 3xx, 0 4xx, 0 5xx
traffic: 1592190 bytes total, 1592190 bytes http, 0 bytes data
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment