Skip to content

Instantly share code, notes, and snippets.

@ashiqopu
Last active June 17, 2020 05:21
Show Gist options
  • Save ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69 to your computer and use it in GitHub Desktop.
Save ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69 to your computer and use it in GitHub Desktop.
TCP-buffer-effect
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2010 Georgia Institute of Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: George F. Riley <riley@ece.gatech.edu>
*/
#include "ns3/log.h"
#include "ns3/address.h"
#include "ns3/node.h"
#include "ns3/nstime.h"
#include "ns3/socket.h"
#include "ns3/simulator.h"
#include "ns3/socket-factory.h"
#include "ns3/packet.h"
#include "ns3/uinteger.h"
#include "ns3/trace-source-accessor.h"
#include "ns3/tcp-socket-factory.h"
#include "bulk-send-application.h"
namespace ns3 {
NS_LOG_COMPONENT_DEFINE ("BulkSendApplication");
NS_OBJECT_ENSURE_REGISTERED (BulkSendApplication);
TypeId
BulkSendApplication::GetTypeId (void)
{
static TypeId tid = TypeId ("ns3::BulkSendApplication")
.SetParent<Application> ()
.SetGroupName("Applications")
.AddConstructor<BulkSendApplication> ()
.AddAttribute ("SendSize", "The amount of data to send each time.",
UintegerValue (512),
MakeUintegerAccessor (&BulkSendApplication::m_sendSize),
MakeUintegerChecker<uint32_t> (1))
.AddAttribute ("Remote", "The address of the destination",
AddressValue (),
MakeAddressAccessor (&BulkSendApplication::m_peer),
MakeAddressChecker ())
.AddAttribute ("MaxBytes",
"The total number of bytes to send. "
"Once these bytes are sent, "
"no data is sent again. The value zero means "
"that there is no limit.",
UintegerValue (0),
MakeUintegerAccessor (&BulkSendApplication::m_maxBytes),
MakeUintegerChecker<uint64_t> ())
.AddAttribute ("Protocol", "The type of protocol to use.",
TypeIdValue (TcpSocketFactory::GetTypeId ()),
MakeTypeIdAccessor (&BulkSendApplication::m_tid),
MakeTypeIdChecker ())
.AddTraceSource ("Tx", "A new packet is created and is sent",
MakeTraceSourceAccessor (&BulkSendApplication::m_txTrace),
"ns3::Packet::TracedCallback")
;
return tid;
}
BulkSendApplication::BulkSendApplication ()
: m_socket (0),
m_connected (false),
m_totBytes (0)
{
NS_LOG_FUNCTION (this);
}
BulkSendApplication::~BulkSendApplication ()
{
NS_LOG_FUNCTION (this);
}
void
BulkSendApplication::SetMaxBytes (uint64_t maxBytes)
{
NS_LOG_FUNCTION (this << maxBytes);
m_maxBytes = maxBytes;
}
Ptr<Socket>
BulkSendApplication::GetSocket (void) const
{
NS_LOG_FUNCTION (this);
return m_socket;
}
void
BulkSendApplication::DoDispose (void)
{
NS_LOG_FUNCTION (this);
m_socket = 0;
// chain up
Application::DoDispose ();
}
// Application Methods
void BulkSendApplication::StartApplication (void) // Called at time specified by Start
{
NS_LOG_FUNCTION (this);
// Create the socket if not already
if (!m_socket)
{
m_socket = Socket::CreateSocket (GetNode (), m_tid);
// Fatal error if socket type is not NS3_SOCK_STREAM or NS3_SOCK_SEQPACKET
if (m_socket->GetSocketType () != Socket::NS3_SOCK_STREAM &&
m_socket->GetSocketType () != Socket::NS3_SOCK_SEQPACKET)
{
NS_FATAL_ERROR ("Using BulkSend with an incompatible socket type. "
"BulkSend requires SOCK_STREAM or SOCK_SEQPACKET. "
"In other words, use TCP instead of UDP.");
}
if (Inet6SocketAddress::IsMatchingType (m_peer))
{
if (m_socket->Bind6 () == -1)
{
NS_FATAL_ERROR ("Failed to bind socket");
}
}
else if (InetSocketAddress::IsMatchingType (m_peer))
{
if (m_socket->Bind () == -1)
{
NS_FATAL_ERROR ("Failed to bind socket");
}
}
m_socket->Connect (m_peer);
m_socket->ShutdownRecv ();
m_socket->SetConnectCallback (
MakeCallback (&BulkSendApplication::ConnectionSucceeded, this),
MakeCallback (&BulkSendApplication::ConnectionFailed, this));
m_socket->SetSendCallback (
MakeCallback (&BulkSendApplication::DataSend, this));
m_socket->TraceConnectWithoutContext ("CongestionWindow", MakeCallback (&BulkSendApplication::CwndChange, this));
}
if (m_connected)
{
SendData ();
}
}
void BulkSendApplication::StopApplication (void) // Called at time specified by Stop
{
NS_LOG_FUNCTION (this);
if (m_socket != 0)
{
m_socket->Close ();
m_connected = false;
}
else
{
NS_LOG_WARN ("BulkSendApplication found null socket to close in StopApplication");
}
}
// Private helpers
void BulkSendApplication::SendData (void)
{
NS_LOG_FUNCTION (this);
while (m_maxBytes == 0 || m_totBytes < m_maxBytes)
{ // Time to send more
// uint64_t to allow the comparison later.
// the result is in a uint32_t range anyway, because
// m_sendSize is uint32_t.
uint64_t toSend = m_sendSize;
// Make sure we don't send too many
if (m_maxBytes > 0)
{
toSend = std::min (toSend, m_maxBytes - m_totBytes);
}
NS_LOG_LOGIC ("sending packet at " << Simulator::Now ());
Ptr<Packet> packet = Create<Packet> (toSend);
int actual = m_socket->Send (packet);
if (actual > 0)
{
m_totBytes += actual;
m_txTrace (packet);
}
// We exit this loop when actual < toSend as the send side
// buffer is full. The "DataSent" callback will pop when
// some buffer space has freed up.
if ((unsigned)actual != toSend)
{
break;
}
}
// Check if time to close (all sent)
if (m_totBytes == m_maxBytes && m_connected)
{
m_socket->Close ();
m_connected = false;
}
}
void BulkSendApplication::ConnectionSucceeded (Ptr<Socket> socket)
{
NS_LOG_FUNCTION (this << socket);
NS_LOG_LOGIC ("BulkSendApplication Connection succeeded");
m_connected = true;
SendData ();
}
void BulkSendApplication::ConnectionFailed (Ptr<Socket> socket)
{
NS_LOG_FUNCTION (this << socket);
NS_LOG_LOGIC ("BulkSendApplication, Connection Failed");
}
void BulkSendApplication::DataSend (Ptr<Socket>, uint32_t)
{
NS_LOG_FUNCTION (this);
if (m_connected)
{ // Only send new data if the connection has completed
SendData ();
}
}
void BulkSendApplication::CwndChange (uint32_t oldCwnd, uint32_t newCwnd)
{
NS_LOG_FUNCTION (this);
m_cwndTrace.push_back(std::make_pair(ns3::Simulator::Now(), newCwnd));
}
std::vector< std::pair<ns3::Time, double> >
BulkSendApplication::GetCwndTrace()
{
return m_cwndTrace;
}
} // Namespace ns3
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2010 Georgia Institute of Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: George F. Riley <riley@ece.gatech.edu>
*/
#ifndef BULK_SEND_APPLICATION_H
#define BULK_SEND_APPLICATION_H
#include "ns3/address.h"
#include "ns3/application.h"
#include "ns3/event-id.h"
#include "ns3/ptr.h"
#include "ns3/traced-callback.h"
namespace ns3 {
class Address;
class Socket;
/**
* \ingroup applications
* \defgroup bulksend BulkSendApplication
*
* This traffic generator simply sends data
* as fast as possible up to MaxBytes or until
* the application is stopped (if MaxBytes is
* zero). Once the lower layer send buffer is
* filled, it waits until space is free to
* send more data, essentially keeping a
* constant flow of data. Only SOCK_STREAM
* and SOCK_SEQPACKET sockets are supported.
* For example, TCP sockets can be used, but
* UDP sockets can not be used.
*/
/**
* \ingroup bulksend
*
* \brief Send as much traffic as possible, trying to fill the bandwidth.
*
* This traffic generator simply sends data
* as fast as possible up to MaxBytes or until
* the application is stopped (if MaxBytes is
* zero). Once the lower layer send buffer is
* filled, it waits until space is free to
* send more data, essentially keeping a
* constant flow of data. Only SOCK_STREAM
* and SOCK_SEQPACKET sockets are supported.
* For example, TCP sockets can be used, but
* UDP sockets can not be used.
*
*/
class BulkSendApplication : public Application
{
public:
/**
* \brief Get the type ID.
* \return the object TypeId
*/
static TypeId GetTypeId (void);
BulkSendApplication ();
virtual ~BulkSendApplication ();
/**
* \brief Set the upper bound for the total number of bytes to send.
*
* Once this bound is reached, no more application bytes are sent. If the
* application is stopped during the simulation and restarted, the
* total number of bytes sent is not reset; however, the maxBytes
* bound is still effective and the application will continue sending
* up to maxBytes. The value zero for maxBytes means that
* there is no upper bound; i.e. data is sent until the application
* or simulation is stopped.
*
* \param maxBytes the upper bound of bytes to send
*/
void SetMaxBytes (uint64_t maxBytes);
/**
* \brief Get the socket this application is attached to.
* \return pointer to associated socket
*/
Ptr<Socket> GetSocket (void) const;
std::vector< std::pair<ns3::Time, double> >
GetCwndTrace();
protected:
virtual void DoDispose (void);
private:
// inherited from Application base class.
virtual void StartApplication (void); // Called at time specified by Start
virtual void StopApplication (void); // Called at time specified by Stop
/**
* \brief Send data until the L4 transmission buffer is full.
*/
void SendData ();
Ptr<Socket> m_socket; //!< Associated socket
Address m_peer; //!< Peer address
bool m_connected; //!< True if connected
uint32_t m_sendSize; //!< Size of data to send each time
uint64_t m_maxBytes; //!< Limit total number of bytes sent
uint64_t m_totBytes; //!< Total bytes sent so far
TypeId m_tid; //!< The type of protocol to use.
std::vector< std::pair<ns3::Time, double> > m_cwndTrace;
/// Traced Callback: sent packets
TracedCallback<Ptr<const Packet> > m_txTrace;
private:
/**
* \brief Connection Succeeded (called by Socket through a callback)
* \param socket the connected socket
*/
void ConnectionSucceeded (Ptr<Socket> socket);
/**
* \brief Connection Failed (called by Socket through a callback)
* \param socket the connected socket
*/
void ConnectionFailed (Ptr<Socket> socket);
/**
* \brief Send more data as soon as some has been transmitted.
*/
void DataSend (Ptr<Socket>, uint32_t); // for socket's SetSendCallback
void
CwndChange (uint32_t oldCwnd, uint32_t newCwnd);
};
} // namespace ns3
#endif /* BULK_SEND_APPLICATION_H */

I am trying to understand the effects of manually setting the TCP Socket's Tx/Rx buffer size on the CWND over time.

// ns3 Tx/Rx buffer default initial = 131072 bytes or roughly 88 packets
Config::SetDefault ("ns3::TcpSocket::SndBufSize", UintegerValue (tcpQ*1500)); // 1500 MSS
Config::SetDefault ("ns3::TcpSocket::RcvBufSize", UintegerValue (tcpQ*1500));

I tested with a simple linear topology, here, 1* means one or more intermediate routers/nodes. By default, each link is 5Mbps, 1ms delay. However, I can also enable at most one bottleneck link of 1Mbps, 10ms delay.

Snd-------(1*)-------Rcv

However, I am observing something interesting when the buffers are set too low, say 10 or 44 packets (half of default minimum). I am keeping the QueueBase and CoDelQueue to their default values. I am collecting the CwndTrace within the BulkSendApplication and then plotting the CWND in packets (Y-axis) over time(X-axis). Payloadsize is set 1460bytes. Here are two examples:

Example 1:

Fig 1: https://gist.github.com/ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69#gistcomment-3344133

As we can see, for very small buffer capacity, without bottleneck (no-btl), except for the default buffer capacity, CWND goes haywire and has a linear increase over time! However, with the bottleneck enabled (btl), only the 10pkt buffer shows similar linear growth but the growth over time is much smaller compared to no-btl versions.

To verify my sim code, below I'm showing the fully default queue, qdisc and buffer plots with and without bottlenecks which I think are self explanatory.

Fig 2: https://gist.github.com/ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69#gistcomment-3344134

Now, the below plot also shows the effect of the chain length with at-most 1 bottleneck link as before, with socket buffer size set to 44 packets:

Fig 3: https://gist.github.com/ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69#gistcomment-3344135

Here, while the 3 node chain has usual cwnd behavior with btlneck enabled (from previous example), the cwnd for 10 nodes again goes linear.

Now, looking at the Throughput, Sent packets by Sender and Received packets by Receiver, it appears that the sender is sending out much higher number of packets when the socket buffers are set to 44 packets compared to default. (SkyBlue bar for buffer=44pkt, DarkBlue bar for default buffer).

Fig 4: https://gist.github.com/ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69#gistcomment-3344136

This also makes sense given the sender will have a huge bottleneck directly on its Send buffer, causing many packets to be dropped causing Retx.

But What is bugging me is the cwnd size over time. I am wondering why this is happening when I am setting the socket buffer size to low. Trying to understand the reason here.

I am linking my complete code here: https://gist.github.com/ashiqopu/3b7d070119bee4c0f8d6cb51eb084a69

Any pointer to reading materials would also be really helpful to understand the effect.

/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2009 The Boeing Company
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
//
// ./waf --run="wired-tcp --totalNodes=10"
//
// topo: more than one hop can have 1 bottleneck if enabled
//
#include "ns3/core-module.h"
#include "ns3/network-module.h"
#include "ns3/internet-module.h"
#include "ns3/point-to-point-module.h"
#include "ns3/applications-module.h"
#include "ns3/flow-monitor-helper.h"
#include "ns3/ipv4-flow-classifier.h"
#include "ns3/traffic-control-module.h"
#include <fstream>
#include <vector>
#include <string>
#include <iomanip>
#include <map>
using namespace ns3;
NS_LOG_COMPONENT_DEFINE ("wired-tcp");
Ptr<PacketSink> sink; /* Pointer to the packet sink application */
typedef struct D {
uint32_t rxPkts;
uint32_t txPkts;
double throughput;
} Data;
std::map<uint32_t, Data> data;
void SimRun (uint32_t payloadSize, uint64_t maxBytes, uint32_t totalNodes,
bool enableBtl, bool enableSack, bool enableEcn, double errRate,
double simTime, std::string tcpVariant, uint32_t runID,
uint32_t baseQ, uint32_t codelQ, uint32_t tcpQ)
{
if( baseQ > 0 ) {
Config::SetDefault("ns3::QueueBase::MaxSize", StringValue(std::to_string(baseQ)+"p"));
}
std::string tcpAlg = tcpVariant;
tcpVariant = std::string ("ns3::") + tcpVariant;
// Select TCP variant
if (tcpVariant.compare ("ns3::TcpWestwoodPlus") == 0)
{
// TcpWestwoodPlus is not an actual TypeId name; we need TcpWestwood here
Config::SetDefault ("ns3::TcpL4Protocol::SocketType", TypeIdValue (TcpWestwood::GetTypeId ()));
// the default protocol type in ns3::TcpWestwood is WESTWOOD
Config::SetDefault ("ns3::TcpWestwood::ProtocolType", EnumValue (TcpWestwood::WESTWOODPLUS));
}
else
{
TypeId tcpTid;
NS_ABORT_MSG_UNLESS (TypeId::LookupByNameFailSafe (tcpVariant, &tcpTid), "TypeId " << tcpVariant << " not found");
Config::SetDefault ("ns3::TcpL4Protocol::SocketType", TypeIdValue (TypeId::LookupByName (tcpVariant)));
}
/* Configure TCP Options */
Config::SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (payloadSize));
Config::SetDefault ("ns3::TcpSocketBase::Sack", BooleanValue (enableSack));
if (tcpQ > 0) {
Config::SetDefault ("ns3::TcpSocket::SndBufSize", UintegerValue (tcpQ*1500)); // 1500 MSS
Config::SetDefault ("ns3::TcpSocket::RcvBufSize", UintegerValue (tcpQ*1500));
}
/* Enable or disable Congestion Window Limit */
// Config::SetDefault ("ns3::TcpSocketState::EnableCWL", BooleanValue (enableCWL));
TrafficControlHelper tchCoDel;
tchCoDel.SetRootQueueDisc ("ns3::CoDelQueueDisc");
if (enableEcn) {
Config::SetDefault ("ns3::CoDelQueueDisc::UseEcn", BooleanValue (true));
Config::SetDefault ("ns3::TcpSocketBase::EcnMode", StringValue ("ClassicEcn"));
}
if (codelQ > 0) {
Config::SetDefault ("ns3::CoDelQueueDisc::MaxSize", StringValue (std::to_string(codelQ)+"p"));
}
NodeContainer c;
c.Create (totalNodes);
InternetStackHelper stack;
stack.Install (c);
// totalNodes-1 links and subnets
uint32_t midNode = (totalNodes/2.0);
PointToPointHelper p2p;
std::vector<NetDeviceContainer> devices(totalNodes-1);
for (uint32_t i = 0; i < totalNodes-1; i++) {
if ( i > 0 && i == midNode && enableBtl) {
p2p.SetDeviceAttribute ("DataRate", StringValue ("1Mbps"));
p2p.SetChannelAttribute ("Delay", StringValue ("10ms"));
}
else {
p2p.SetDeviceAttribute ("DataRate", StringValue ("5Mbps"));
p2p.SetChannelAttribute ("Delay", StringValue ("1ms"));
}
devices[i] = p2p.Install(c.Get(i), c.Get(i+1));
tchCoDel.Install (devices[i]);
}
NS_LOG_INFO ("assigning ip address");
Ipv4AddressHelper ipv4;
NS_LOG_INFO ("Assign IP Addresses.");
std::vector<Ipv4InterfaceContainer> iface (totalNodes-1);
// set the n-1 subnets
for (uint32_t i = 0; i < totalNodes-1; i++) {
std::string subnetIP= "10.1." + std::to_string(i+1) + ".0";
ipv4.SetBase (subnetIP.c_str(), "255.255.255.0");
iface[i] = ipv4.Assign (devices[i]);
}
// Create router nodes, initialize routing database and set up the routing
// tables in the nodes.
Ipv4GlobalRoutingHelper::PopulateRoutingTables ();
DoubleValue rate (errRate);
Ptr<RateErrorModel> em1 =
CreateObjectWithAttributes<RateErrorModel> ("RanVar", StringValue ("ns3::UniformRandomVariable[Min=0.0|Max=1.0]"), "ErrorRate", rate);
Ptr<RateErrorModel> em2 =
CreateObjectWithAttributes<RateErrorModel> ("RanVar", StringValue ("ns3::UniformRandomVariable[Min=0.0|Max=1.0]"), "ErrorRate", rate);
// This enables the specified errRate on both link endpoints.
devices[0].Get (0)->SetAttribute ("ReceiveErrorModel", PointerValue (em1));
devices[totalNodes-2].Get (1)->SetAttribute ("ReceiveErrorModel", PointerValue (em2));
NS_LOG_INFO ("Create Applications.");
//
// Create a BulkSendApplication and install it on node 0
//
uint16_t port = 9; // well-known echo port number
BulkSendHelper source ("ns3::TcpSocketFactory",
InetSocketAddress (iface[totalNodes-2].GetAddress(1), port));
// Set the amount of data to send in bytes. Zero is unlimited.
source.SetAttribute ("MaxBytes", UintegerValue (maxBytes));
ApplicationContainer sourceApps = source.Install (c.Get (0));
sourceApps.Start (Seconds (0.0));
sourceApps.Stop (Seconds (simTime));
//
// Create a PacketSinkApplication and install it on node 1
//
PacketSinkHelper sinkHelper ("ns3::TcpSocketFactory",
InetSocketAddress (Ipv4Address::GetAny (), port));
ApplicationContainer sinkApps = sinkHelper.Install (c.Get (totalNodes-1));
sink = StaticCast<PacketSink> (sinkApps.Get (0));
sinkApps.Start (Seconds (0.0));
sinkApps.Stop (Seconds (simTime));
FlowMonitorHelper flowmon;
Ptr<FlowMonitor> monitor = flowmon.InstallAll ();
Simulator::Stop (Seconds (simTime+2.0));
Simulator::Run ();
double throughput = ((sink->GetTotalRx () * 8) / (1e6 * simTime));
uint64_t totalRxPkts = sink->GetTotalRx()/payloadSize;
uint32_t totalTxPkts = 0;
// 10. Print per flow statistics
monitor->CheckForLostPackets ();
Ptr<Ipv4FlowClassifier> classifier = DynamicCast<Ipv4FlowClassifier> (flowmon.GetClassifier ());
FlowMonitor::FlowStatsContainer stats = monitor->GetFlowStats ();
for (std::map<FlowId, FlowMonitor::FlowStats>::const_iterator i = stats.begin (); i != stats.end (); ++i)
{
Ipv4FlowClassifier::FiveTuple t = classifier->FindFlow (i->first);
if (t.sourceAddress == iface[0].GetAddress (0) &&
t.destinationAddress == iface[totalNodes-2].GetAddress (1) )
{
//std::cout << "Flow " << i->first << " (" << t.sourceAddress << " -> " << t.destinationAddress << ")\n";
totalTxPkts = i->second.txPackets;
break;
}
}
if ( data.find(totalNodes) == data.end() ) {
data[totalNodes].rxPkts = 0;
data[totalNodes].txPkts = 0;
data[totalNodes].throughput = 0.0;
}
data[totalNodes].rxPkts += totalRxPkts;
data[totalNodes].txPkts += totalTxPkts;
data[totalNodes].throughput += throughput;
std::ofstream cwndWriter;
std::string str = "logs/linear/tcp-cwnd/n-" +
std::to_string(totalNodes) +
"-" + tcpAlg +
"-btl-" + std::to_string(enableBtl) +
"-bq-" + std::to_string(baseQ) +
"-cq-" + std::to_string(codelQ) +
"-tq-" + std::to_string(tcpQ) +
"-sack-" + std::to_string(enableSack) +
"-ecn-" + std::to_string(enableEcn) +
"-err-" + std::to_string(errRate) +
"-run-" + std::to_string(runID) +
".txt";
auto serverApp = DynamicCast<ns3::BulkSendApplication>(sourceApps.Get(0));
cwnd = serverApp->GetCwndTrace();
cwndWriter.open (str.c_str());
for (uint32_t i = 0; i < cwnd.size(); i++)
{
cwndWriter << (cwnd[i].first-0.0)/1000000000.0 << "\t"
<< (double)(cwnd[i].second/payloadSize) << "\n";
}
cwndWriter.close();
Simulator::Destroy ();
}
int main (int argc, char *argv[])
{
uint32_t payloadSize = 1460; // bytes
uint64_t maxBytes = std::numeric_limits<uint64_t>::max()-10; // 10*1024*1024; // 10MB=10*1024*1024
double simTime = 10.0;
std::string tcpVariant = "TcpNewReno"; /* TCP variant type. */
uint32_t totalNodes = 2;
uint32_t numRun = 1;
bool enableCWL = true;
bool enableSack = true;
bool enableEcn = true;
bool enableBtl = true;
double errRate = 0.000001; // 1460 ~ 1%
uint32_t baseQ = 0; // ns3 default = 100 packets
uint32_t codelQ = 0; // ns3 default = 1000 packets
uint32_t tcpQ = 0; // keep 0 for default; ns3 Tx/Rx buffer default initial = 131072 bytes
CommandLine cmd;
cmd.AddValue ("payloadSize", "size of application payload sent", payloadSize);
cmd.AddValue ("maxBytes","Total number of bytes for application to send", maxBytes);
cmd.AddValue ("totalNodes","Total number of nodes in the chain", totalNodes);
cmd.AddValue ("tcpVariant","TCP congestion control algorithm", tcpVariant);
cmd.AddValue ("enableCWL","Enable or Disable Congestion Window Limit", enableCWL);
cmd.AddValue ("enableSack","Enable or Disable TCP SACK", enableSack);
cmd.AddValue ("enableEcn","Enable or Disable Explicit Congestion Notification", enableEcn);
cmd.AddValue ("enableBtl","Enable or Disable bottleneck link", enableBtl);
cmd.AddValue ("errRate", "Error rate to apply to link", errRate);
cmd.AddValue ("simTime","Set simulation time limit", simTime);
cmd.AddValue ("baseQ","Set base queue size", baseQ);
cmd.AddValue ("codelQ","Set codel queue size", codelQ);
cmd.AddValue ("tcpQ","Set tcp buffer size", tcpQ);
cmd.AddValue ("numRun","Set simulation runs", numRun);
cmd.Parse (argc, argv);
for (uint32_t i = 1; i <= numRun; i+=1)
{
RngSeedManager::SetSeed (i); // Changes seed from default of 1 to 3
// RngSeedManager::SetRun (7); // Changes run number from default of 1 to 7
SimRun(payloadSize, maxBytes, totalNodes, enableBtl,
enableSack, enableEcn, errRate, simTime, tcpVariant, i,
baseQ, codelQ, tcpQ);
}
std::ofstream throughputWriter;
std::string str = "logs/linear/throughput/wired-tcp-n-" +
std::to_string(totalNodes) +
"-" + tcpVariant +
"-btl-" + std::to_string(enableBtl) +
"-bq-" + std::to_string(baseQ) +
"-cq-" + std::to_string(codelQ) +
"-tq-" + std::to_string(tcpQ) +
"-sack-" + std::to_string(enableSack) +
"-ecn-" + std::to_string(enableEcn) +
"-err-" + std::to_string(errRate) +
".txt";
throughputWriter.open(str.c_str(), std::ios::app);
if (throughputWriter.is_open()) {
throughputWriter << totalNodes << "\t"
<< std::fixed << std::setprecision(2)
<< data[totalNodes].throughput/numRun << "\t"
<< (double)(data[totalNodes].rxPkts/numRun) << "\t"
<< (double)(data[totalNodes].txPkts/numRun) << "\t"
<< std::endl;
}
else {
std::cerr << "Failed file open\n";
}
throughputWriter.close();
return 0;
}
@ashiqopu
Copy link
Author

sock-buffer-effect

@ashiqopu
Copy link
Author

sock-buffer-effect-btl

@ashiqopu
Copy link
Author

sock-buffer-effect-nodes

@ashiqopu
Copy link
Author

sock-buffer-throughput

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment