catchunks: refactor in preparation for CUBIC pipeline.
Rename AIMD pipeline to "Adaptive Pipeline".
Remove "aimd-" from command line options.
Change-Id: Ie5689cf3c0b90bedb322fd42dbf289f8a04e56d2
refs: #4861
diff --git a/tests/chunks/pipeline-interests-aimd.t.cpp b/tests/chunks/pipeline-interests-adaptive.t.cpp
similarity index 73%
rename from tests/chunks/pipeline-interests-aimd.t.cpp
rename to tests/chunks/pipeline-interests-adaptive.t.cpp
index 77dc1f0..1cc4f04 100644
--- a/tests/chunks/pipeline-interests-aimd.t.cpp
+++ b/tests/chunks/pipeline-interests-adaptive.t.cpp
@@ -22,24 +22,24 @@
*
* @author Weiwei Liu
* @author Chavoosh Ghasemi
+ * @author Klaus Schneider
*/
-#include "tools/chunks/catchunks/pipeline-interests-aimd.hpp"
+#include "tools/chunks/catchunks/pipeline-interests-adaptive.hpp"
#include "tools/chunks/catchunks/options.hpp"
#include "pipeline-interests-fixture.hpp"
namespace ndn {
namespace chunks {
-namespace aimd {
namespace tests {
using namespace ndn::tests;
-class PipelineInterestAimdFixture : public chunks::tests::PipelineInterestsFixture
+class PipelineInterestAdaptiveFixture : public PipelineInterestsFixture
{
public:
- PipelineInterestAimdFixture()
+ PipelineInterestAdaptiveFixture()
: opt(makePipelineOptions())
, rttEstimator(makeRttEstimatorOptions())
{
@@ -49,16 +49,16 @@
void
createPipeline()
{
- auto pline = make_unique<PipelineInterestsAimd>(face, rttEstimator, opt);
- aimdPipeline = pline.get();
+ auto pline = make_unique<PipelineInterestsAdaptive>(face, rttEstimator, opt);
+ pipeline = pline.get();
setPipeline(std::move(pline));
}
private:
- static PipelineInterestsAimd::Options
+ static PipelineInterestsAdaptive::Options
makePipelineOptions()
{
- PipelineInterestsAimd::Options pipelineOptions;
+ PipelineInterestsAdaptive::Options pipelineOptions;
pipelineOptions.isQuiet = true;
pipelineOptions.isVerbose = false;
pipelineOptions.disableCwa = false;
@@ -84,24 +84,24 @@
}
protected:
- PipelineInterestsAimd::Options opt;
+ PipelineInterestsAdaptive::Options opt;
RttEstimator rttEstimator;
- PipelineInterestsAimd* aimdPipeline;
+ PipelineInterestsAdaptive* pipeline;
static constexpr double MARGIN = 0.01;
};
-constexpr double PipelineInterestAimdFixture::MARGIN;
+constexpr double PipelineInterestAdaptiveFixture::MARGIN;
BOOST_AUTO_TEST_SUITE(Chunks)
-BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsAimd, PipelineInterestAimdFixture)
+BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsAdaptive, PipelineInterestAdaptiveFixture)
BOOST_AUTO_TEST_CASE(SlowStart)
{
nDataSegments = 4;
- aimdPipeline->m_ssthresh = 8.0;
- BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
+ pipeline->m_ssthresh = 8.0;
+ BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
- double preCwnd = aimdPipeline->m_cwnd;
+ double preCwnd = pipeline->m_cwnd;
run(name);
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
@@ -109,8 +109,8 @@
for (uint64_t i = 0; i < nDataSegments - 1; ++i) {
face.receive(*makeDataWithSegment(i));
advanceClocks(io, time::nanoseconds(1));
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, 1, MARGIN);
- preCwnd = aimdPipeline->m_cwnd;
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd - preCwnd, 1, MARGIN);
+ preCwnd = pipeline->m_cwnd;
}
BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
@@ -119,27 +119,27 @@
BOOST_AUTO_TEST_CASE(CongestionAvoidance)
{
nDataSegments = 7;
- aimdPipeline->m_ssthresh = 4.0;
- BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
+ pipeline->m_ssthresh = 4.0;
+ BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
- double preCwnd = aimdPipeline->m_cwnd;
+ double preCwnd = pipeline->m_cwnd;
run(name);
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
- for (uint64_t i = 0; i < aimdPipeline->m_ssthresh; ++i) { // slow start
+ for (uint64_t i = 0; i < pipeline->m_ssthresh; ++i) { // slow start
face.receive(*makeDataWithSegment(i));
advanceClocks(io, time::nanoseconds(1));
- preCwnd = aimdPipeline->m_cwnd;
+ preCwnd = pipeline->m_cwnd;
}
BOOST_CHECK_CLOSE(preCwnd, 4.25, MARGIN);
- for (uint64_t i = aimdPipeline->m_ssthresh; i < nDataSegments - 1; ++i) { // congestion avoidance
+ for (uint64_t i = pipeline->m_ssthresh; i < nDataSegments - 1; ++i) { // congestion avoidance
face.receive(*makeDataWithSegment(i));
advanceClocks(io, time::nanoseconds(1));
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, opt.aiStep / floor(aimdPipeline->m_cwnd), MARGIN);
- preCwnd = aimdPipeline->m_cwnd;
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd - preCwnd, opt.aiStep / floor(pipeline->m_cwnd), MARGIN);
+ preCwnd = pipeline->m_cwnd;
}
BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
@@ -148,8 +148,8 @@
BOOST_AUTO_TEST_CASE(Timeout)
{
nDataSegments = 8;
- aimdPipeline->m_ssthresh = 4.0;
- BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
+ pipeline->m_ssthresh = 4.0;
+ BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -162,7 +162,7 @@
}
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 3);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4, MARGIN);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4, MARGIN);
BOOST_CHECK_EQUAL(face.sentInterests.size(), 7); // request for segment 7 has been sent
advanceClocks(io, time::milliseconds(100));
@@ -176,48 +176,48 @@
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.5, MARGIN);
BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all the segment requests have been sent
- BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts, 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nLossDecr, 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nMarkDecr, 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nRetransmitted, 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nSkippedRetx, 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nTimeouts, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nLossDecr, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nMarkDecr, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nRetransmitted, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nSkippedRetx, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 0);
// timeout segment 3 & 6
advanceClocks(io, time::milliseconds(150));
- BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts, 2);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nRetransmitted, 1);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nLossDecr, 1);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nSkippedRetx, 0);
+ BOOST_CHECK_EQUAL(pipeline->m_nTimeouts, 2);
+ BOOST_CHECK_EQUAL(pipeline->m_nRetransmitted, 1);
+ BOOST_CHECK_EQUAL(pipeline->m_nLossDecr, 1);
+ BOOST_CHECK_EQUAL(pipeline->m_nSkippedRetx, 0);
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drop to 1/2 of previous size
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 1);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // window size drop to 1/2 of previous size
+ BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 1);
// receive segment 6, retransmit 3
face.receive(*makeDataWithSegment(6));
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.75, MARGIN); // congestion avoidance
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.75, MARGIN); // congestion avoidance
+ BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[3], 1);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts, 2);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nRetransmitted, 2);
- BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts,
- aimdPipeline->m_nRetransmitted + aimdPipeline->m_nSkippedRetx);
+ BOOST_CHECK_EQUAL(pipeline->m_nTimeouts, 2);
+ BOOST_CHECK_EQUAL(pipeline->m_nRetransmitted, 2);
+ BOOST_CHECK_EQUAL(pipeline->m_nTimeouts,
+ pipeline->m_nRetransmitted + pipeline->m_nSkippedRetx);
}
BOOST_AUTO_TEST_CASE(CongestionMarksWithCwa)
{
nDataSegments = 7;
- aimdPipeline->m_ssthresh = 4.0;
- BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
+ pipeline->m_ssthresh = 4.0;
+ BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -230,14 +230,14 @@
}
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.5, MARGIN);
// receive segment 5 with congestion mark
face.receive(*makeDataWithSegmentAndCongMark(5));
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
// receive the last segment with congestion mark
@@ -245,15 +245,15 @@
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // conservative window adaption (window size should not decrease)
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // conservative window adaption (window size should not decrease)
+ BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
// make sure no interest is retransmitted for marked data packets
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[5], 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[nDataSegments - 1], 0);
// check number of received marked data packets
- BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
+ BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 2);
}
BOOST_AUTO_TEST_CASE(CongestionMarksWithoutCwa)
@@ -262,8 +262,8 @@
createPipeline();
nDataSegments = 7;
- aimdPipeline->m_ssthresh = 4.0;
- BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
+ pipeline->m_ssthresh = 4.0;
+ BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -276,14 +276,14 @@
}
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.5, MARGIN);
// receive segment 5 with congestion mark
face.receive(*makeDataWithSegmentAndCongMark(5));
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
// receive the last segment with congestion mark
@@ -291,16 +291,16 @@
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, PipelineInterestsAimd::MIN_SSTHRESH,
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, PipelineInterestsAdaptive::MIN_SSTHRESH,
MARGIN); // window size should decrease, as cwa is disabled
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
// make sure no interest is retransmitted for marked data packets
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[5], 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[nDataSegments - 1], 0);
// check number of received marked data packets
- BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
+ BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 2);
}
BOOST_AUTO_TEST_CASE(IgnoreCongestionMarks)
@@ -309,8 +309,8 @@
createPipeline();
nDataSegments = 7;
- aimdPipeline->m_ssthresh = 4.0;
- BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
+ pipeline->m_ssthresh = 4.0;
+ BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -323,7 +323,7 @@
}
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.75, MARGIN);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.75, MARGIN);
BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
// receive the last segment with congestion mark
@@ -331,20 +331,20 @@
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
- BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 5.0, MARGIN); // window size increases
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
+ BOOST_CHECK_CLOSE(pipeline->m_cwnd, 5.0, MARGIN); // window size increases
+ BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
// make sure no interest is retransmitted for marked data packet
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[nDataSegments - 1], 0);
// check number of received marked data packets
- BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 1);
+ BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 1);
}
BOOST_AUTO_TEST_CASE(Nack)
{
nDataSegments = 5;
- aimdPipeline->m_cwnd = 10.0;
+ pipeline->m_cwnd = 10.0;
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -365,7 +365,7 @@
// nack1 is ignored
BOOST_CHECK_EQUAL(hasFailed, false);
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
+ BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
// receive a nack with NackReason::CONGESTION for segment 2
auto nack2 = makeNack(face.sentInterests[2], lp::NackReason::CONGESTION);
@@ -373,7 +373,7 @@
advanceClocks(io, time::nanoseconds(1));
// segment 2 is retransmitted
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[2], 1);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[2], 1);
// receive a nack with NackReason::NONE for segment 3
auto nack3 = makeNack(face.sentInterests[3], lp::NackReason::NONE);
@@ -388,7 +388,7 @@
BOOST_AUTO_TEST_CASE(FinalBlockIdNotSetAtBeginning)
{
nDataSegments = 4;
- aimdPipeline->m_cwnd = 4;
+ pipeline->m_cwnd = 4;
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -399,7 +399,7 @@
// interests for segment 0 - 5 have been sent
BOOST_CHECK_EQUAL(face.sentInterests.size(), 6);
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 1);
- BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, false);
+ BOOST_CHECK_EQUAL(pipeline->m_hasFinalBlockId, false);
// pending interests: segment 1, 2, 3, 4, 5
BOOST_CHECK_EQUAL(face.getNPendingInterests(), 5);
@@ -407,7 +407,7 @@
face.receive(*makeDataWithSegment(1));
advanceClocks(io, time::nanoseconds(1));
BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
- BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
+ BOOST_CHECK_EQUAL(pipeline->m_hasFinalBlockId, true);
// pending interests for segment 1, 4, 5 haven been removed
BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
@@ -420,7 +420,7 @@
// part of the content.
nDataSegments = 4;
- aimdPipeline->m_cwnd = 4;
+ pipeline->m_cwnd = 4;
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -460,7 +460,7 @@
// not part of the content, and it was actually a spurious failure
nDataSegments = 4;
- aimdPipeline->m_cwnd = 4;
+ pipeline->m_cwnd = 4;
run(name);
advanceClocks(io, time::nanoseconds(1));
@@ -493,7 +493,7 @@
advanceClocks(io, time::seconds(1));
// segment 3 is retransmitted
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount[3], 1);
// receive segment 3
face.receive(*makeDataWithSegment(3));
@@ -523,8 +523,8 @@
BOOST_CHECK_EQUAL(face.sentInterests.size(), 3);
// check if segment 2's state is FirstTimeSent
- auto it = aimdPipeline->m_segmentInfo.find(2);
- BOOST_REQUIRE(it != aimdPipeline->m_segmentInfo.end());
+ auto it = pipeline->m_segmentInfo.find(2);
+ BOOST_REQUIRE(it != pipeline->m_segmentInfo.end());
BOOST_CHECK(it->second.state == SegmentState::FirstTimeSent);
// timeout segment 2 twice
@@ -533,20 +533,20 @@
BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
// check if segment 2's state is Retransmitted
- it = aimdPipeline->m_segmentInfo.find(2);
- BOOST_REQUIRE(it != aimdPipeline->m_segmentInfo.end());
+ it = pipeline->m_segmentInfo.find(2);
+ BOOST_REQUIRE(it != pipeline->m_segmentInfo.end());
BOOST_CHECK(it->second.state == SegmentState::Retransmitted);
// check if segment 2 was retransmitted twice
- BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount.at(2), 2);
+ BOOST_CHECK_EQUAL(pipeline->m_retxCount.at(2), 2);
// receive segment 2 the first time
face.receive(*makeDataWithSegment(2));
advanceClocks(io, time::nanoseconds(1));
// check if segment 2 was erased from m_segmentInfo
- it = aimdPipeline->m_segmentInfo.find(2);
- BOOST_CHECK(it == aimdPipeline->m_segmentInfo.end());
+ it = pipeline->m_segmentInfo.find(2);
+ BOOST_CHECK(it == pipeline->m_segmentInfo.end());
auto prevRtt = rttEstimator.getAvgRtt();
auto prevRto = rttEstimator.getEstimatedRto();
@@ -556,8 +556,8 @@
advanceClocks(io, time::nanoseconds(1));
// nothing changed
- it = aimdPipeline->m_segmentInfo.find(2);
- BOOST_CHECK(it == aimdPipeline->m_segmentInfo.end());
+ it = pipeline->m_segmentInfo.find(2);
+ BOOST_CHECK(it == pipeline->m_segmentInfo.end());
BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
BOOST_CHECK_EQUAL(rttEstimator.getAvgRtt(), prevRtt);
BOOST_CHECK_EQUAL(rttEstimator.getEstimatedRto(), prevRto);
@@ -573,7 +573,7 @@
// change the underlying buffer and save the old buffer
auto oldBuf = std::cerr.rdbuf(ss.rdbuf());
- aimdPipeline->printSummary();
+ pipeline->printSummary();
std::string line;
bool found = false;
@@ -602,16 +602,15 @@
face.receive(*makeDataWithSegment(0));
advanceClocks(io, time::nanoseconds(1));
- BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
- BOOST_CHECK_EQUAL(aimdPipeline->m_segmentInfo.size(), 0);
+ BOOST_CHECK_EQUAL(pipeline->m_hasFinalBlockId, true);
+ BOOST_CHECK_EQUAL(pipeline->m_segmentInfo.size(), 0);
BOOST_CHECK_EQUAL(face.getNPendingInterests(), 0);
}
-BOOST_AUTO_TEST_SUITE_END() // TestPipelineInterestsAimd
+BOOST_AUTO_TEST_SUITE_END() // TestPipelineInterestsAdaptive
BOOST_AUTO_TEST_SUITE_END() // Chunks
} // namespace tests
-} // namespace aimd
} // namespace chunks
} // namespace ndn
diff --git a/tests/chunks/pipeline-interests-fixed-window.t.cpp b/tests/chunks/pipeline-interests-fixed.t.cpp
similarity index 94%
rename from tests/chunks/pipeline-interests-fixed-window.t.cpp
rename to tests/chunks/pipeline-interests-fixed.t.cpp
index e75b4a2..d7f17d6 100644
--- a/tests/chunks/pipeline-interests-fixed-window.t.cpp
+++ b/tests/chunks/pipeline-interests-fixed.t.cpp
@@ -24,7 +24,7 @@
* @author Chavoosh Ghasemi
*/
-#include "tools/chunks/catchunks/pipeline-interests-fixed-window.hpp"
+#include "tools/chunks/catchunks/pipeline-interests-fixed.hpp"
#include "tools/chunks/catchunks/data-fetcher.hpp"
#include "pipeline-interests-fixture.hpp"
@@ -33,20 +33,28 @@
namespace chunks {
namespace tests {
-class PipelineInterestFixedWindowFixture : public PipelineInterestsFixture
+class PipelineInterestFixedFixture : public PipelineInterestsFixture
{
public:
- PipelineInterestFixedWindowFixture()
+ PipelineInterestFixedFixture()
: opt(makeOptions())
{
- setPipeline(make_unique<PipelineInterestsFixedWindow>(face, PipelineInterestsFixedWindow::Options(opt)));
+ createPipeline();
+ }
+
+ void
+ createPipeline()
+ {
+ auto pline = make_unique<PipelineInterestsFixed>(face, opt);
+ pipeline = pline.get();
+ setPipeline(std::move(pline));
}
private:
- static PipelineInterestsFixedWindow::Options
+ static PipelineInterestsFixed::Options
makeOptions()
{
- PipelineInterestsFixedWindow::Options options;
+ PipelineInterestsFixed::Options options;
options.isQuiet = true;
options.isVerbose = false;
options.interestLifetime = time::seconds(1);
@@ -56,11 +64,12 @@
}
protected:
- PipelineInterestsFixedWindow::Options opt;
+ PipelineInterestsFixed::Options opt;
+ PipelineInterestsFixed* pipeline;
};
BOOST_AUTO_TEST_SUITE(Chunks)
-BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsFixedWindow, PipelineInterestFixedWindowFixture)
+BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsFixed, PipelineInterestFixedFixture)
BOOST_AUTO_TEST_CASE(FullPipeline)
{
diff --git a/tests/chunks/pipeline-interests-fixture.hpp b/tests/chunks/pipeline-interests-fixture.hpp
index 0d456c3..dc7eabd 100644
--- a/tests/chunks/pipeline-interests-fixture.hpp
+++ b/tests/chunks/pipeline-interests-fixture.hpp
@@ -47,7 +47,7 @@
void
setPipeline(unique_ptr<PipelineInterests> pline)
{
- pipeline = std::move(pline);
+ m_pipeline = std::move(pline);
}
shared_ptr<Data>
@@ -72,18 +72,20 @@
void
run(const Name& name, uint64_t version = 0)
{
- pipeline->run(Name(name).appendVersion(version),
- [] (const Data&) {},
- [this] (const std::string&) { hasFailed = true; });
+ m_pipeline->run(Name(name).appendVersion(version),
+ [] (const Data&) {},
+ [this] (const std::string&) { hasFailed = true; });
}
protected:
boost::asio::io_service io;
util::DummyClientFace face{io};
- unique_ptr<PipelineInterests> pipeline;
Name name{"/ndn/chunks/test"};
uint64_t nDataSegments = 0;
bool hasFailed = false;
+
+private:
+ unique_ptr<PipelineInterests> m_pipeline;
};
} // namespace tests
diff --git a/tests/chunks/aimd-rtt-estimator.t.cpp b/tests/chunks/rtt-estimator.t.cpp
similarity index 95%
rename from tests/chunks/aimd-rtt-estimator.t.cpp
rename to tests/chunks/rtt-estimator.t.cpp
index d254985..e2ff3fb 100644
--- a/tests/chunks/aimd-rtt-estimator.t.cpp
+++ b/tests/chunks/rtt-estimator.t.cpp
@@ -1,6 +1,6 @@
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
- * Copyright (c) 2016-2018, Regents of the University of California,
+ * Copyright (c) 2016-2019, Regents of the University of California,
* Colorado State University,
* University Pierre & Marie Curie, Sorbonne University.
*
@@ -24,13 +24,11 @@
* @author Chavoosh Ghasemi
*/
-#include "tools/chunks/catchunks/aimd-rtt-estimator.hpp"
-
+#include "tools/chunks/catchunks/rtt-estimator.hpp"
#include "tests/test-common.hpp"
namespace ndn {
namespace chunks {
-namespace aimd {
namespace tests {
class RttEstimatorFixture
@@ -61,7 +59,7 @@
};
BOOST_AUTO_TEST_SUITE(Chunks)
-BOOST_FIXTURE_TEST_SUITE(TestAimdRttEstimator, RttEstimatorFixture)
+BOOST_FIXTURE_TEST_SUITE(TestRttEstimator, RttEstimatorFixture)
BOOST_AUTO_TEST_CASE(MinAvgMaxRtt)
{
@@ -170,10 +168,9 @@
BOOST_CHECK_CLOSE(rttEstimator.m_rto.count(), 4000, 0.1);
}
-BOOST_AUTO_TEST_SUITE_END() // TestAimdRttEstimator
+BOOST_AUTO_TEST_SUITE_END() // TestRttEstimator
BOOST_AUTO_TEST_SUITE_END() // Chunks
} // namespace tests
-} // namespace aimd
} // namespace chunks
} // namespace ndn
diff --git a/tools/chunks/catchunks/ndncatchunks.cpp b/tools/chunks/catchunks/ndncatchunks.cpp
index 92d5401..1405b23 100644
--- a/tools/chunks/catchunks/ndncatchunks.cpp
+++ b/tools/chunks/catchunks/ndncatchunks.cpp
@@ -29,14 +29,14 @@
* @author Chavoosh Ghasemi
*/
-#include "aimd-statistics-collector.hpp"
-#include "aimd-rtt-estimator.hpp"
#include "consumer.hpp"
#include "discover-version-fixed.hpp"
#include "discover-version-realtime.hpp"
-#include "pipeline-interests-aimd.hpp"
-#include "pipeline-interests-fixed-window.hpp"
#include "options.hpp"
+#include "pipeline-interests-adaptive.hpp"
+#include "pipeline-interests-fixed.hpp"
+#include "rtt-estimator.hpp"
+#include "statistics-collector.hpp"
#include "core/version.hpp"
#include <fstream>
@@ -93,44 +93,41 @@
"size of the Interest pipeline")
;
- po::options_description aimdPipeDesc("AIMD pipeline options");
- aimdPipeDesc.add_options()
- ("aimd-debug-cwnd", po::value<std::string>(&cwndPath),
- "log file for AIMD cwnd statistics")
- ("aimd-debug-rtt", po::value<std::string>(&rttPath),
- "log file for AIMD rtt statistics")
- ("aimd-disable-cwa", po::bool_switch(&disableCwa),
- "disable Conservative Window Adaptation, "
- "i.e. reduce window on each congestion event (timeout or congestion mark) "
- "instead of at most once per RTT")
- ("aimd-ignore-cong-marks", po::bool_switch(&ignoreCongMarks),
- "disable reaction to congestion marks, "
- "the default is to decrease the window after receiving a congestion mark")
- ("aimd-reset-cwnd-to-init", po::bool_switch(&resetCwndToInit),
- "reset cwnd to initial cwnd when loss event occurs, default is "
- "resetting to ssthresh")
- ("aimd-initial-cwnd", po::value<int>(&initCwnd)->default_value(initCwnd),
- "initial cwnd")
- ("aimd-initial-ssthresh", po::value<int>(&initSsthresh),
- "initial slow start threshold (defaults to infinity)")
- ("aimd-aistep", po::value<double>(&aiStep)->default_value(aiStep),
- "additive-increase step")
- ("aimd-mdcoef", po::value<double>(&mdCoef)->default_value(mdCoef),
- "multiplicative-decrease coefficient")
- ("aimd-rto-alpha", po::value<double>(&alpha)->default_value(alpha),
- "alpha value for rto calculation")
- ("aimd-rto-beta", po::value<double>(&beta)->default_value(beta),
- "beta value for rto calculation")
- ("aimd-rto-k", po::value<int>(&k)->default_value(k),
- "k value for rto calculation")
- ("aimd-rto-min", po::value<double>(&minRto)->default_value(minRto),
- "min rto value in milliseconds")
- ("aimd-rto-max", po::value<double>(&maxRto)->default_value(maxRto),
- "max rto value in milliseconds")
+ po::options_description adaptivePipeDesc("Adaptive pipeline options (AIMD)");
+ adaptivePipeDesc.add_options()
+ ("log-cwnd", po::value<std::string>(&cwndPath), "log file for cwnd statistics")
+ ("log-rtt", po::value<std::string>(&rttPath), "log file for rtt statistics")
+ ("disable-cwa", po::bool_switch(&disableCwa),
+ "disable Conservative Window Adaptation, "
+ "i.e. reduce window on each congestion event (timeout or congestion mark) "
+ "instead of at most once per RTT")
+ ("ignore-marks", po::bool_switch(&ignoreCongMarks),
+ "ignore congestion marks, "
+ "the default is to decrease the window after receiving a congestion mark")
+ ("reset-cwnd-to-init", po::bool_switch(&resetCwndToInit),
+ "reset cwnd to initial value after loss/mark, default is "
+ "resetting to ssthresh")
+ ("init-cwnd", po::value<int>(&initCwnd)->default_value(initCwnd), "initial cwnd")
+ ("init-ssthresh", po::value<int>(&initSsthresh),
+ "initial slow start threshold (defaults to infinity)")
+ ("aistep", po::value<double>(&aiStep)->default_value(aiStep),
+ "additive-increase step")
+ ("mdcoef", po::value<double>(&mdCoef)->default_value(mdCoef),
+ "multiplicative-decrease coefficient")
+ ("rto-alpha", po::value<double>(&alpha)->default_value(alpha),
+ "alpha value for rto calculation")
+ ("rto-beta", po::value<double>(&beta)->default_value(beta),
+ "beta value for rto calculation")
+ ("rto-k", po::value<int>(&k)->default_value(k),
+ "k value for rto calculation")
+ ("min-rto", po::value<double>(&minRto)->default_value(minRto),
+ "minimum rto value in milliseconds")
+ ("max-rto", po::value<double>(&maxRto)->default_value(maxRto),
+ "maximum rto value in milliseconds")
;
po::options_description visibleDesc;
- visibleDesc.add(basicDesc).add(realDiscoveryDesc).add(fixedPipeDesc).add(aimdPipeDesc);
+ visibleDesc.add(basicDesc).add(realDiscoveryDesc).add(fixedPipeDesc).add(adaptivePipeDesc);
po::options_description hiddenDesc;
hiddenDesc.add_options()
@@ -224,28 +221,28 @@
}
unique_ptr<PipelineInterests> pipeline;
- unique_ptr<aimd::StatisticsCollector> statsCollector;
- unique_ptr<aimd::RttEstimator> rttEstimator;
+ unique_ptr<StatisticsCollector> statsCollector;
+ unique_ptr<RttEstimator> rttEstimator;
std::ofstream statsFileCwnd;
std::ofstream statsFileRtt;
if (pipelineType == "fixed") {
- PipelineInterestsFixedWindow::Options optionsPipeline(options);
+ PipelineInterestsFixed::Options optionsPipeline(options);
optionsPipeline.maxPipelineSize = maxPipelineSize;
- pipeline = make_unique<PipelineInterestsFixedWindow>(face, optionsPipeline);
+ pipeline = make_unique<PipelineInterestsFixed>(face, optionsPipeline);
}
else if (pipelineType == "aimd") {
- aimd::RttEstimator::Options optionsRttEst;
+ RttEstimator::Options optionsRttEst;
optionsRttEst.isVerbose = options.isVerbose;
optionsRttEst.alpha = alpha;
optionsRttEst.beta = beta;
optionsRttEst.k = k;
- optionsRttEst.minRto = aimd::Milliseconds(minRto);
- optionsRttEst.maxRto = aimd::Milliseconds(maxRto);
+ optionsRttEst.minRto = Milliseconds(minRto);
+ optionsRttEst.maxRto = Milliseconds(maxRto);
- rttEstimator = make_unique<aimd::RttEstimator>(optionsRttEst);
+ rttEstimator = make_unique<RttEstimator>(optionsRttEst);
- PipelineInterestsAimd::Options optionsPipeline(options);
+ PipelineInterestsAdaptive::Options optionsPipeline(options);
optionsPipeline.disableCwa = disableCwa;
optionsPipeline.resetCwndToInit = resetCwndToInit;
optionsPipeline.initCwnd = static_cast<double>(initCwnd);
@@ -254,7 +251,7 @@
optionsPipeline.mdCoef = mdCoef;
optionsPipeline.ignoreCongMarks = ignoreCongMarks;
- auto aimdPipeline = make_unique<PipelineInterestsAimd>(face, *rttEstimator, optionsPipeline);
+ auto adaptivePipeline = make_unique<PipelineInterestsAdaptive>(face, *rttEstimator, optionsPipeline);
if (!cwndPath.empty() || !rttPath.empty()) {
if (!cwndPath.empty()) {
@@ -271,11 +268,11 @@
return 4;
}
}
- statsCollector = make_unique<aimd::StatisticsCollector>(*aimdPipeline, *rttEstimator,
- statsFileCwnd, statsFileRtt);
+ statsCollector = make_unique<StatisticsCollector>(*adaptivePipeline, *rttEstimator,
+ statsFileCwnd, statsFileRtt);
}
- pipeline = std::move(aimdPipeline);
+ pipeline = std::move(adaptivePipeline);
}
else {
std::cerr << "ERROR: Interest pipeline type not valid" << std::endl;
diff --git a/tools/chunks/catchunks/pipeline-interests-aimd.cpp b/tools/chunks/catchunks/pipeline-interests-adaptive.cpp
similarity index 89%
rename from tools/chunks/catchunks/pipeline-interests-aimd.cpp
rename to tools/chunks/catchunks/pipeline-interests-adaptive.cpp
index b802e34..7e394ef 100644
--- a/tools/chunks/catchunks/pipeline-interests-aimd.cpp
+++ b/tools/chunks/catchunks/pipeline-interests-adaptive.cpp
@@ -23,22 +23,23 @@
* @author Shuo Yang
* @author Weiwei Liu
* @author Chavoosh Ghasemi
+ * @author Klaus Schneider
*/
-#include "pipeline-interests-aimd.hpp"
+#include "pipeline-interests-adaptive.hpp"
#include "data-fetcher.hpp"
#include <cmath>
#include <iomanip>
+
namespace ndn {
namespace chunks {
-namespace aimd {
-constexpr double PipelineInterestsAimd::MIN_SSTHRESH;
+constexpr double PipelineInterestsAdaptive::MIN_SSTHRESH;
-PipelineInterestsAimd::PipelineInterestsAimd(Face& face, RttEstimator& rttEstimator,
- const Options& options)
+PipelineInterestsAdaptive::PipelineInterestsAdaptive(Face& face, RttEstimator& rttEstimator,
+ const Options& options)
: PipelineInterests(face)
, m_options(options)
, m_rttEstimator(rttEstimator)
@@ -64,13 +65,13 @@
}
}
-PipelineInterestsAimd::~PipelineInterestsAimd()
+PipelineInterestsAdaptive::~PipelineInterestsAdaptive()
{
cancel();
}
void
-PipelineInterestsAimd::doRun()
+PipelineInterestsAdaptive::doRun()
{
if (allSegmentsReceived()) {
cancel();
@@ -87,14 +88,14 @@
}
void
-PipelineInterestsAimd::doCancel()
+PipelineInterestsAdaptive::doCancel()
{
m_checkRtoEvent.cancel();
m_segmentInfo.clear();
}
void
-PipelineInterestsAimd::checkRto()
+PipelineInterestsAdaptive::checkRto()
{
if (isStopping())
return;
@@ -123,7 +124,7 @@
}
void
-PipelineInterestsAimd::sendInterest(uint64_t segNo, bool isRetransmission)
+PipelineInterestsAdaptive::sendInterest(uint64_t segNo, bool isRetransmission)
{
if (isStopping())
return;
@@ -165,9 +166,9 @@
SegmentInfo& segInfo = m_segmentInfo[segNo];
segInfo.interestHdl = m_face.expressInterest(interest,
- bind(&PipelineInterestsAimd::handleData, this, _1, _2),
- bind(&PipelineInterestsAimd::handleNack, this, _1, _2),
- bind(&PipelineInterestsAimd::handleLifetimeExpiration, this, _1));
+ bind(&PipelineInterestsAdaptive::handleData, this, _1, _2),
+ bind(&PipelineInterestsAdaptive::handleNack, this, _1, _2),
+ bind(&PipelineInterestsAdaptive::handleLifetimeExpiration, this, _1));
segInfo.timeSent = time::steady_clock::now();
segInfo.rto = m_rttEstimator.getEstimatedRto();
@@ -185,7 +186,7 @@
}
void
-PipelineInterestsAimd::schedulePackets()
+PipelineInterestsAdaptive::schedulePackets()
{
BOOST_ASSERT(m_nInFlight >= 0);
auto availableWindowSize = static_cast<int64_t>(m_cwnd) - m_nInFlight;
@@ -209,7 +210,7 @@
}
void
-PipelineInterestsAimd::handleData(const Interest& interest, const Data& data)
+PipelineInterestsAdaptive::handleData(const Interest& interest, const Data& data)
{
if (isStopping())
return;
@@ -305,7 +306,7 @@
}
void
-PipelineInterestsAimd::handleNack(const Interest& interest, const lp::Nack& nack)
+PipelineInterestsAdaptive::handleNack(const Interest& interest, const lp::Nack& nack)
{
if (isStopping())
return;
@@ -334,7 +335,7 @@
}
void
-PipelineInterestsAimd::handleLifetimeExpiration(const Interest& interest)
+PipelineInterestsAdaptive::handleLifetimeExpiration(const Interest& interest)
{
if (isStopping())
return;
@@ -346,7 +347,7 @@
}
void
-PipelineInterestsAimd::recordTimeout()
+PipelineInterestsAdaptive::recordTimeout()
{
if (m_options.disableCwa || m_highData > m_recPoint) {
// react to only one timeout per RTT (conservative window adaptation)
@@ -364,7 +365,7 @@
}
void
-PipelineInterestsAimd::enqueueForRetransmission(uint64_t segNo)
+PipelineInterestsAdaptive::enqueueForRetransmission(uint64_t segNo)
{
BOOST_ASSERT(m_nInFlight > 0);
m_nInFlight--;
@@ -373,7 +374,7 @@
}
void
-PipelineInterestsAimd::handleFail(uint64_t segNo, const std::string& reason)
+PipelineInterestsAdaptive::handleFail(uint64_t segNo, const std::string& reason)
{
if (isStopping())
return;
@@ -399,7 +400,7 @@
}
void
-PipelineInterestsAimd::increaseWindow()
+PipelineInterestsAdaptive::increaseWindow()
{
if (m_cwnd < m_ssthresh) {
m_cwnd += m_options.aiStep; // additive increase
@@ -412,7 +413,7 @@
}
void
-PipelineInterestsAimd::decreaseWindow()
+PipelineInterestsAdaptive::decreaseWindow()
{
// please refer to RFC 5681, Section 3.1 for the rationale behind it
m_ssthresh = std::max(MIN_SSTHRESH, m_cwnd * m_options.mdCoef); // multiplicative decrease
@@ -422,7 +423,7 @@
}
void
-PipelineInterestsAimd::cancelInFlightSegmentsGreaterThan(uint64_t segNo)
+PipelineInterestsAdaptive::cancelInFlightSegmentsGreaterThan(uint64_t segNo)
{
for (auto it = m_segmentInfo.begin(); it != m_segmentInfo.end();) {
// cancel fetching all segments that follow
@@ -437,7 +438,7 @@
}
void
-PipelineInterestsAimd::printSummary() const
+PipelineInterestsAdaptive::printSummary() const
{
PipelineInterests::printSummary();
std::cerr << "Congestion marks: " << m_nCongMarks << " (caused " << m_nMarkDecr << " window decreases)\n"
@@ -477,9 +478,9 @@
}
std::ostream&
-operator<<(std::ostream& os, const PipelineInterestsAimdOptions& options)
+operator<<(std::ostream& os, const PipelineInterestsAdaptiveOptions& options)
{
- os << "AIMD pipeline parameters:\n"
+ os << "Adaptive pipeline parameters:\n"
<< "\tInitial congestion window size = " << options.initCwnd << "\n"
<< "\tInitial slow start threshold = " << options.initSsthresh << "\n"
<< "\tAdditive increase step = " << options.aiStep << "\n"
@@ -493,6 +494,5 @@
return os;
}
-} // namespace aimd
} // namespace chunks
} // namespace ndn
diff --git a/tools/chunks/catchunks/pipeline-interests-aimd.hpp b/tools/chunks/catchunks/pipeline-interests-adaptive.hpp
similarity index 84%
rename from tools/chunks/catchunks/pipeline-interests-aimd.hpp
rename to tools/chunks/catchunks/pipeline-interests-adaptive.hpp
index 4ef21ac..b65a2c3 100644
--- a/tools/chunks/catchunks/pipeline-interests-aimd.hpp
+++ b/tools/chunks/catchunks/pipeline-interests-adaptive.hpp
@@ -23,13 +23,14 @@
* @author Shuo Yang
* @author Weiwei Liu
* @author Chavoosh Ghasemi
+ * @author Klaus Schneider
*/
-#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_AIMD_HPP
-#define NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_AIMD_HPP
+#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_ADAPTIVE_HPP
+#define NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_ADAPTIVE_HPP
#include "options.hpp"
-#include "aimd-rtt-estimator.hpp"
+#include "rtt-estimator.hpp"
#include "pipeline-interests.hpp"
#include <queue>
@@ -37,13 +38,12 @@
namespace ndn {
namespace chunks {
-namespace aimd {
-class PipelineInterestsAimdOptions : public Options
+class PipelineInterestsAdaptiveOptions : public Options
{
public:
explicit
- PipelineInterestsAimdOptions(const Options& options = Options())
+ PipelineInterestsAdaptiveOptions(const Options& options = Options())
: Options(options)
{
}
@@ -85,7 +85,7 @@
/**
* @brief Service for retrieving Data via an Interest pipeline
*
- * Retrieves all segmented Data under the specified prefix by maintaining a dynamic AIMD
+ * Retrieves all segmented Data under the specified prefix by maintaining a dynamic
* congestion window combined with a Conservative Loss Adaptation algorithm. For details,
* please refer to the description in section "Interest pipeline types in ndncatchunks" of
* tools/chunks/README.md
@@ -93,22 +93,22 @@
* Provides retrieved Data on arrival with no ordering guarantees. Data is delivered to the
* PipelineInterests' user via callback immediately upon arrival.
*/
-class PipelineInterestsAimd : public PipelineInterests
+class PipelineInterestsAdaptive : public PipelineInterests
{
public:
- typedef PipelineInterestsAimdOptions Options;
+ typedef PipelineInterestsAdaptiveOptions Options;
public:
/**
- * @brief create a PipelineInterestsAimd service
+ * @brief create a PipelineInterestsAdaptive service
*
* Configures the pipelining service without specifying the retrieval namespace. After this
* configuration the method run must be called to start the Pipeline.
*/
- PipelineInterestsAimd(Face& face, RttEstimator& rttEstimator,
- const Options& options = Options());
+ PipelineInterestsAdaptive(Face& face, RttEstimator& rttEstimator,
+ const Options& options = Options());
- ~PipelineInterestsAimd() final;
+ ~PipelineInterestsAdaptive() final;
/**
* @brief Signals when cwnd changes
@@ -116,14 +116,15 @@
* The callback function should be: void(Milliseconds age, double cwnd) where age is the
* duration since pipeline starts, and cwnd is the new congestion window size (in segments).
*/
- signal::Signal<PipelineInterestsAimd, Milliseconds, double> afterCwndChange;
+ signal::Signal<PipelineInterestsAdaptive, Milliseconds, double> afterCwndChange;
private:
/**
* @brief fetch all the segments between 0 and lastSegment of the specified prefix
*
- * Starts the pipeline with an AIMD algorithm to control the window size. The pipeline will
- * fetch every segment until the last segment is successfully received or an error occurs.
+ * Starts the pipeline with an adaptive window algorithm to control the window size.
+ * The pipeline will fetch every segment until the last segment is successfully received
+ * or an error occurs.
*/
void
doRun() final;
@@ -169,13 +170,13 @@
handleFail(uint64_t segNo, const std::string& reason);
/**
- * @brief increase congestion window size based on AIMD scheme
+ * @brief increase congestion window size
*/
void
increaseWindow();
/**
- * @brief decrease congestion window size based on AIMD scheme
+ * @brief decrease congestion window size
*/
void
decreaseWindow();
@@ -225,13 +226,10 @@
};
std::ostream&
-operator<<(std::ostream& os, const PipelineInterestsAimdOptions& options);
+operator<<(std::ostream& os, const PipelineInterestsAdaptiveOptions& options);
-} // namespace aimd
-
-using aimd::PipelineInterestsAimd;
} // namespace chunks
} // namespace ndn
-#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_AIMD_HPP
+#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_ADAPTIVE_HPP
diff --git a/tools/chunks/catchunks/pipeline-interests-fixed-window.cpp b/tools/chunks/catchunks/pipeline-interests-fixed.cpp
similarity index 87%
rename from tools/chunks/catchunks/pipeline-interests-fixed-window.cpp
rename to tools/chunks/catchunks/pipeline-interests-fixed.cpp
index 1794310..6df6d27 100644
--- a/tools/chunks/catchunks/pipeline-interests-fixed-window.cpp
+++ b/tools/chunks/catchunks/pipeline-interests-fixed.cpp
@@ -1,6 +1,6 @@
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
- * Copyright (c) 2016-2018, Regents of the University of California,
+ * Copyright (c) 2016-2019, Regents of the University of California,
* Colorado State University,
* University Pierre & Marie Curie, Sorbonne University.
*
@@ -27,13 +27,13 @@
* @author Chavoosh Ghasemi
*/
-#include "pipeline-interests-fixed-window.hpp"
+#include "pipeline-interests-fixed.hpp"
#include "data-fetcher.hpp"
namespace ndn {
namespace chunks {
-PipelineInterestsFixedWindow::PipelineInterestsFixedWindow(Face& face, const Options& options)
+PipelineInterestsFixed::PipelineInterestsFixed(Face& face, const Options& options)
: PipelineInterests(face)
, m_options(options)
, m_hasFailure(false)
@@ -41,13 +41,13 @@
m_segmentFetchers.resize(m_options.maxPipelineSize);
}
-PipelineInterestsFixedWindow::~PipelineInterestsFixedWindow()
+PipelineInterestsFixed::~PipelineInterestsFixed()
{
cancel();
}
void
-PipelineInterestsFixedWindow::doRun()
+PipelineInterestsFixed::doRun()
{
// if the FinalBlockId is unknown, this could potentially request non-existent segments
for (size_t nRequestedSegments = 0;
@@ -60,7 +60,7 @@
}
bool
-PipelineInterestsFixedWindow::fetchNextSegment(std::size_t pipeNo)
+PipelineInterestsFixed::fetchNextSegment(std::size_t pipeNo)
{
if (isStopping())
return false;
@@ -86,9 +86,9 @@
auto fetcher = DataFetcher::fetch(m_face, interest,
m_options.maxRetriesOnTimeoutOrNack,
m_options.maxRetriesOnTimeoutOrNack,
- bind(&PipelineInterestsFixedWindow::handleData, this, _1, _2, pipeNo),
- bind(&PipelineInterestsFixedWindow::handleFail, this, _2, pipeNo),
- bind(&PipelineInterestsFixedWindow::handleFail, this, _2, pipeNo),
+ bind(&PipelineInterestsFixed::handleData, this, _1, _2, pipeNo),
+ bind(&PipelineInterestsFixed::handleFail, this, _2, pipeNo),
+ bind(&PipelineInterestsFixed::handleFail, this, _2, pipeNo),
m_options.isVerbose);
BOOST_ASSERT(!m_segmentFetchers[pipeNo].first || !m_segmentFetchers[pipeNo].first->isRunning());
@@ -98,7 +98,7 @@
}
void
-PipelineInterestsFixedWindow::doCancel()
+PipelineInterestsFixed::doCancel()
{
for (auto& fetcher : m_segmentFetchers) {
if (fetcher.first)
@@ -109,7 +109,7 @@
}
void
-PipelineInterestsFixedWindow::handleData(const Interest& interest, const Data& data, size_t pipeNo)
+PipelineInterestsFixed::handleData(const Interest& interest, const Data& data, size_t pipeNo)
{
if (isStopping())
return;
@@ -150,7 +150,7 @@
}
}
-void PipelineInterestsFixedWindow::handleFail(const std::string& reason, std::size_t pipeNo)
+void PipelineInterestsFixed::handleFail(const std::string& reason, std::size_t pipeNo)
{
if (isStopping())
return;
diff --git a/tools/chunks/catchunks/pipeline-interests-fixed-window.hpp b/tools/chunks/catchunks/pipeline-interests-fixed.hpp
similarity index 81%
rename from tools/chunks/catchunks/pipeline-interests-fixed-window.hpp
rename to tools/chunks/catchunks/pipeline-interests-fixed.hpp
index 22f1a4b..d7fd95d 100644
--- a/tools/chunks/catchunks/pipeline-interests-fixed-window.hpp
+++ b/tools/chunks/catchunks/pipeline-interests-fixed.hpp
@@ -1,6 +1,6 @@
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
- * Copyright (c) 2016-2017, Regents of the University of California,
+ * Copyright (c) 2016-2019, Regents of the University of California,
* Colorado State University,
* University Pierre & Marie Curie, Sorbonne University.
*
@@ -27,8 +27,8 @@
* @author Chavoosh Ghasemi
*/
-#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_FIXED_WINDOW_HPP
-#define NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_FIXED_WINDOW_HPP
+#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_FIXED_HPP
+#define NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_FIXED_HPP
#include "options.hpp"
#include "pipeline-interests.hpp"
@@ -38,11 +38,11 @@
class DataFetcher;
-class PipelineInterestsFixedWindowOptions : public Options
+class PipelineInterestsFixedOptions : public Options
{
public:
explicit
- PipelineInterestsFixedWindowOptions(const Options& options = Options())
+ PipelineInterestsFixedOptions(const Options& options = Options())
: Options(options)
, maxPipelineSize(1)
{
@@ -62,22 +62,22 @@
* No guarantees are made as to the order in which segments are fetched or callbacks are invoked,
* i.e. out-of-order delivery is possible.
*/
-class PipelineInterestsFixedWindow : public PipelineInterests
+class PipelineInterestsFixed : public PipelineInterests
{
public:
- typedef PipelineInterestsFixedWindowOptions Options;
+ typedef PipelineInterestsFixedOptions Options;
public:
/**
- * @brief create a PipelineInterestsFixedWindow service
+ * @brief create a PipelineInterestsFixed service
*
* Configures the pipelining service without specifying the retrieval namespace. After this
* configuration the method run must be called to start the Pipeline.
*/
explicit
- PipelineInterestsFixedWindow(Face& face, const Options& options = Options());
+ PipelineInterestsFixed(Face& face, const Options& options = Options());
- ~PipelineInterestsFixedWindow() final;
+ ~PipelineInterestsFixed() final;
private:
/**
@@ -120,4 +120,4 @@
} // namespace chunks
} // namespace ndn
-#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_FIXED_WINDOW_HPP
+#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_PIPELINE_INTERESTS_FIXED_HPP
diff --git a/tools/chunks/catchunks/aimd-rtt-estimator.cpp b/tools/chunks/catchunks/rtt-estimator.cpp
similarity index 95%
rename from tools/chunks/catchunks/aimd-rtt-estimator.cpp
rename to tools/chunks/catchunks/rtt-estimator.cpp
index 2e1852f..fd3a592 100644
--- a/tools/chunks/catchunks/aimd-rtt-estimator.cpp
+++ b/tools/chunks/catchunks/rtt-estimator.cpp
@@ -1,6 +1,6 @@
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
- * Copyright (c) 2016-2018, Arizona Board of Regents.
+ * Copyright (c) 2016-2019, Arizona Board of Regents.
*
* This file is part of ndn-tools (Named Data Networking Essential Tools).
* See AUTHORS.md for complete list of ndn-tools authors and contributors.
@@ -23,13 +23,12 @@
* @author Chavoosh Ghasemi
*/
-#include "aimd-rtt-estimator.hpp"
+#include "rtt-estimator.hpp"
#include <cmath>
namespace ndn {
namespace chunks {
-namespace aimd {
RttEstimator::RttEstimator(const Options& options)
: m_options(options)
@@ -93,6 +92,5 @@
return os;
}
-} // namespace aimd
} // namespace chunks
} // namespace ndn
diff --git a/tools/chunks/catchunks/aimd-rtt-estimator.hpp b/tools/chunks/catchunks/rtt-estimator.hpp
similarity index 93%
rename from tools/chunks/catchunks/aimd-rtt-estimator.hpp
rename to tools/chunks/catchunks/rtt-estimator.hpp
index d7d1aca..6f10326 100644
--- a/tools/chunks/catchunks/aimd-rtt-estimator.hpp
+++ b/tools/chunks/catchunks/rtt-estimator.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, Arizona Board of Regents.
+ * Copyright (c) 2016-2019, Arizona Board of Regents.
*
* This file is part of ndn-tools (Named Data Networking Essential Tools).
* See AUTHORS.md for complete list of ndn-tools authors and contributors.
@@ -22,14 +22,13 @@
* @author Chavoosh Ghasemi
*/
-#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_AIMD_RTT_ESTIMATOR_HPP
-#define NDN_TOOLS_CHUNKS_CATCHUNKS_AIMD_RTT_ESTIMATOR_HPP
+#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_RTT_ESTIMATOR_HPP
+#define NDN_TOOLS_CHUNKS_CATCHUNKS_RTT_ESTIMATOR_HPP
#include "core/common.hpp"
namespace ndn {
namespace chunks {
-namespace aimd {
typedef time::duration<double, time::milliseconds::period> Milliseconds;
@@ -161,8 +160,7 @@
std::ostream&
operator<<(std::ostream& os, const RttEstimator::Options& options);
-} // namespace aimd
} // namespace chunks
} // namespace ndn
-#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_AIMD_RTT_ESTIMATOR_HPP
+#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_RTT_ESTIMATOR_HPP
diff --git a/tools/chunks/catchunks/aimd-statistics-collector.cpp b/tools/chunks/catchunks/statistics-collector.cpp
similarity index 81%
rename from tools/chunks/catchunks/aimd-statistics-collector.cpp
rename to tools/chunks/catchunks/statistics-collector.cpp
index 9462b4f..44c342c 100644
--- a/tools/chunks/catchunks/aimd-statistics-collector.cpp
+++ b/tools/chunks/catchunks/statistics-collector.cpp
@@ -1,7 +1,7 @@
-/**
- * Copyright (c) 2016, Regents of the University of California,
- * Colorado State University,
- * University Pierre & Marie Curie, Sorbonne University.
+/*
+ * Copyright (c) 2016-2019, Regents of the University of California,
+ * Colorado State University,
+ * University Pierre & Marie Curie, Sorbonne University.
*
* This file is part of ndn-tools (Named Data Networking Essential Tools).
* See AUTHORS.md for complete list of ndn-tools authors and contributors.
@@ -22,13 +22,12 @@
* @author Weiwei Liu
*/
-#include "aimd-statistics-collector.hpp"
+#include "statistics-collector.hpp"
namespace ndn {
namespace chunks {
-namespace aimd {
-StatisticsCollector::StatisticsCollector(PipelineInterestsAimd& pipeline, RttEstimator& rttEstimator,
+StatisticsCollector::StatisticsCollector(PipelineInterestsAdaptive& pipeline, RttEstimator& rttEstimator,
std::ostream& osCwnd, std::ostream& osRtt)
: m_osCwnd(osCwnd)
, m_osRtt(osRtt)
@@ -49,6 +48,5 @@
});
}
-} // namespace aimd
} // namespace chunks
} // namespace ndn
\ No newline at end of file
diff --git a/tools/chunks/catchunks/aimd-statistics-collector.hpp b/tools/chunks/catchunks/statistics-collector.hpp
similarity index 65%
rename from tools/chunks/catchunks/aimd-statistics-collector.hpp
rename to tools/chunks/catchunks/statistics-collector.hpp
index 3e11a38..cd71018 100644
--- a/tools/chunks/catchunks/aimd-statistics-collector.hpp
+++ b/tools/chunks/catchunks/statistics-collector.hpp
@@ -1,7 +1,7 @@
-/**
- * Copyright (c) 2016, Regents of the University of California,
- * Colorado State University,
- * University Pierre & Marie Curie, Sorbonne University.
+/*
+ * Copyright (c) 2016-2019, Regents of the University of California,
+ * Colorado State University,
+ * University Pierre & Marie Curie, Sorbonne University.
*
* This file is part of ndn-tools (Named Data Networking Essential Tools).
* See AUTHORS.md for complete list of ndn-tools authors and contributors.
@@ -22,23 +22,22 @@
* @author Weiwei Liu
*/
-#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_AIMD_STATISTICS_COLLECTOR_HPP
-#define NDN_TOOLS_CHUNKS_CATCHUNKS_AIMD_STATISTICS_COLLECTOR_HPP
+#ifndef NDN_TOOLS_CHUNKS_CATCHUNKS_STATISTICS_COLLECTOR_HPP
+#define NDN_TOOLS_CHUNKS_CATCHUNKS_STATISTICS_COLLECTOR_HPP
-#include "pipeline-interests-aimd.hpp"
-#include "aimd-rtt-estimator.hpp"
+#include "pipeline-interests-adaptive.hpp"
+#include "rtt-estimator.hpp"
namespace ndn {
namespace chunks {
-namespace aimd {
/**
- * @brief Statistics collector for AIMD pipeline
+ * @brief Statistics collector for Adaptive pipelines
*/
class StatisticsCollector : noncopyable
{
public:
- StatisticsCollector(PipelineInterestsAimd& pipeline, RttEstimator& rttEstimator,
+ StatisticsCollector(PipelineInterestsAdaptive& pipeline, RttEstimator& rttEstimator,
std::ostream& osCwnd, std::ostream& osRtt);
private:
@@ -46,8 +45,7 @@
std::ostream& m_osRtt;
};
-} // namespace aimd
} // namespace chunks
} // namespace ndn
-#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_AIMD_STATISTICS_COLLECTOR_HPP
+#endif // NDN_TOOLS_CHUNKS_CATCHUNKS_STATISTICS_COLLECTOR_HPP