blob: d0d0bb0b0cef0d0d90ae1742a5a4d247356c033f [file] [log] [blame]
Weiwei Liu245d7912016-07-28 00:04:25 -07001/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
Davide Pesaventoe9c69852017-11-04 18:08:37 -04002/*
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -08003 * Copyright (c) 2016-2019, Regents of the University of California,
Davide Pesavento958896e2017-01-19 00:52:04 -05004 * Colorado State University,
5 * University Pierre & Marie Curie, Sorbonne University.
Weiwei Liu245d7912016-07-28 00:04:25 -07006 *
7 * This file is part of ndn-tools (Named Data Networking Essential Tools).
8 * See AUTHORS.md for complete list of ndn-tools authors and contributors.
9 *
10 * ndn-tools is free software: you can redistribute it and/or modify it under the terms
11 * of the GNU General Public License as published by the Free Software Foundation,
12 * either version 3 of the License, or (at your option) any later version.
13 *
14 * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
15 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
16 * PURPOSE. See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
20 *
21 * See AUTHORS.md for complete list of ndn-cxx authors and contributors.
22 *
23 * @author Weiwei Liu
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000024 * @author Chavoosh Ghasemi
schneiderklausd8197df2019-03-16 11:31:40 -070025 * @author Klaus Schneider
Weiwei Liu245d7912016-07-28 00:04:25 -070026 */
27
Klaus Schneider9e5122b2019-03-19 17:03:25 -070028#include "tools/chunks/catchunks/pipeline-interests-aimd.hpp"
Weiwei Liu245d7912016-07-28 00:04:25 -070029#include "tools/chunks/catchunks/options.hpp"
30
31#include "pipeline-interests-fixture.hpp"
32
33namespace ndn {
34namespace chunks {
Weiwei Liu245d7912016-07-28 00:04:25 -070035namespace tests {
36
37using namespace ndn::tests;
38
Klaus Schneider9e5122b2019-03-19 17:03:25 -070039class PipelineInterestAimdFixture : public PipelineInterestsFixture
Weiwei Liu245d7912016-07-28 00:04:25 -070040{
41public:
Klaus Schneider9e5122b2019-03-19 17:03:25 -070042 PipelineInterestAimdFixture()
Davide Pesaventof6991e12018-01-08 20:58:50 -050043 : opt(makePipelineOptions())
Weiwei Liu245d7912016-07-28 00:04:25 -070044 , rttEstimator(makeRttEstimatorOptions())
45 {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000046 createPipeline();
47 }
48
49 void
50 createPipeline()
51 {
Klaus Schneider9e5122b2019-03-19 17:03:25 -070052 auto pline = make_unique<PipelineInterestsAimd>(face, rttEstimator, opt);
schneiderklausd8197df2019-03-16 11:31:40 -070053 pipeline = pline.get();
Weiwei Liu245d7912016-07-28 00:04:25 -070054 setPipeline(std::move(pline));
55 }
56
57private:
schneiderklausd8197df2019-03-16 11:31:40 -070058 static PipelineInterestsAdaptive::Options
Weiwei Liu245d7912016-07-28 00:04:25 -070059 makePipelineOptions()
60 {
schneiderklausd8197df2019-03-16 11:31:40 -070061 PipelineInterestsAdaptive::Options pipelineOptions;
Davide Pesaventof6991e12018-01-08 20:58:50 -050062 pipelineOptions.isQuiet = true;
63 pipelineOptions.isVerbose = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070064 pipelineOptions.disableCwa = false;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000065 pipelineOptions.ignoreCongMarks = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070066 pipelineOptions.resetCwndToInit = false;
67 pipelineOptions.initCwnd = 1.0;
68 pipelineOptions.aiStep = 1.0;
69 pipelineOptions.mdCoef = 0.5;
70 pipelineOptions.initSsthresh = std::numeric_limits<int>::max();
71 return pipelineOptions;
72 }
73
Davide Pesavento5e3773d2019-08-22 15:35:08 -040074 static shared_ptr<RttEstimatorWithStats::Options>
Weiwei Liu245d7912016-07-28 00:04:25 -070075 makeRttEstimatorOptions()
76 {
Davide Pesavento5e3773d2019-08-22 15:35:08 -040077 auto rttOptions = make_shared<RttEstimatorWithStats::Options>();
78 rttOptions->alpha = 0.125;
79 rttOptions->beta = 0.25;
80 rttOptions->k = 4;
81 rttOptions->initialRto = 1_s;
82 rttOptions->minRto = 200_ms;
83 rttOptions->maxRto = 4_s;
84 rttOptions->rtoBackoffMultiplier = 2;
Weiwei Liu245d7912016-07-28 00:04:25 -070085 return rttOptions;
86 }
87
88protected:
schneiderklausd8197df2019-03-16 11:31:40 -070089 PipelineInterestsAdaptive::Options opt;
Davide Pesavento5e3773d2019-08-22 15:35:08 -040090 RttEstimatorWithStats rttEstimator;
schneiderklausd8197df2019-03-16 11:31:40 -070091 PipelineInterestsAdaptive* pipeline;
Davide Pesaventoba560662019-06-26 22:45:44 -040092 static constexpr double MARGIN = 0.001;
Weiwei Liu245d7912016-07-28 00:04:25 -070093};
94
Klaus Schneider9e5122b2019-03-19 17:03:25 -070095constexpr double PipelineInterestAimdFixture::MARGIN;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000096
Weiwei Liu245d7912016-07-28 00:04:25 -070097BOOST_AUTO_TEST_SUITE(Chunks)
Klaus Schneider9e5122b2019-03-19 17:03:25 -070098BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsAimd, PipelineInterestAimdFixture)
Weiwei Liu245d7912016-07-28 00:04:25 -070099
100BOOST_AUTO_TEST_CASE(SlowStart)
101{
102 nDataSegments = 4;
schneiderklausd8197df2019-03-16 11:31:40 -0700103 pipeline->m_ssthresh = 8.0;
104 BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700105
schneiderklausd8197df2019-03-16 11:31:40 -0700106 double preCwnd = pipeline->m_cwnd;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800107 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700108 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400109 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700110
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800111 for (uint64_t i = 0; i < nDataSegments - 1; ++i) {
Weiwei Liu245d7912016-07-28 00:04:25 -0700112 face.receive(*makeDataWithSegment(i));
113 advanceClocks(io, time::nanoseconds(1));
schneiderklausd8197df2019-03-16 11:31:40 -0700114 BOOST_CHECK_CLOSE(pipeline->m_cwnd - preCwnd, 1, MARGIN);
115 preCwnd = pipeline->m_cwnd;
Weiwei Liu245d7912016-07-28 00:04:25 -0700116 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400117
118 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700119}
120
121BOOST_AUTO_TEST_CASE(CongestionAvoidance)
122{
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800123 nDataSegments = 7;
schneiderklausd8197df2019-03-16 11:31:40 -0700124 pipeline->m_ssthresh = 4.0;
125 BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700126
schneiderklausd8197df2019-03-16 11:31:40 -0700127 double preCwnd = pipeline->m_cwnd;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800128 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700129 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400130 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700131
schneiderklausd8197df2019-03-16 11:31:40 -0700132 for (uint64_t i = 0; i < pipeline->m_ssthresh; ++i) { // slow start
Weiwei Liu245d7912016-07-28 00:04:25 -0700133 face.receive(*makeDataWithSegment(i));
134 advanceClocks(io, time::nanoseconds(1));
schneiderklausd8197df2019-03-16 11:31:40 -0700135 preCwnd = pipeline->m_cwnd;
Weiwei Liu245d7912016-07-28 00:04:25 -0700136 }
137
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800138 BOOST_CHECK_CLOSE(preCwnd, 4.25, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700139
schneiderklausd8197df2019-03-16 11:31:40 -0700140 for (uint64_t i = pipeline->m_ssthresh; i < nDataSegments - 1; ++i) { // congestion avoidance
Weiwei Liu245d7912016-07-28 00:04:25 -0700141 face.receive(*makeDataWithSegment(i));
142 advanceClocks(io, time::nanoseconds(1));
schneiderklausd8197df2019-03-16 11:31:40 -0700143 BOOST_CHECK_CLOSE(pipeline->m_cwnd - preCwnd, opt.aiStep / floor(pipeline->m_cwnd), MARGIN);
144 preCwnd = pipeline->m_cwnd;
Weiwei Liu245d7912016-07-28 00:04:25 -0700145 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400146
147 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700148}
149
150BOOST_AUTO_TEST_CASE(Timeout)
151{
152 nDataSegments = 8;
schneiderklausd8197df2019-03-16 11:31:40 -0700153 pipeline->m_ssthresh = 4.0;
154 BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700155
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800156 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700157 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400158 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700159
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800160 // receive segment 0, 1, and 2
161 for (uint64_t i = 0; i < 3; ++i) {
Weiwei Liu245d7912016-07-28 00:04:25 -0700162 face.receive(*makeDataWithSegment(i));
163 advanceClocks(io, time::nanoseconds(1));
164 }
165
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400166 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 3);
schneiderklausd8197df2019-03-16 11:31:40 -0700167 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4, MARGIN);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800168 BOOST_CHECK_EQUAL(face.sentInterests.size(), 7); // request for segment 7 has been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700169
170 advanceClocks(io, time::milliseconds(100));
171
172 // receive segment 4
173 face.receive(*makeDataWithSegment(4));
174 advanceClocks(io, time::nanoseconds(1));
175
176 // receive segment 5
177 face.receive(*makeDataWithSegment(5));
178 advanceClocks(io, time::nanoseconds(1));
179
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400180 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
schneiderklausd8197df2019-03-16 11:31:40 -0700181 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800182 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all the segment requests have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700183
schneiderklausd8197df2019-03-16 11:31:40 -0700184 BOOST_CHECK_EQUAL(pipeline->m_nTimeouts, 0);
185 BOOST_CHECK_EQUAL(pipeline->m_nLossDecr, 0);
186 BOOST_CHECK_EQUAL(pipeline->m_nMarkDecr, 0);
187 BOOST_CHECK_EQUAL(pipeline->m_nRetransmitted, 0);
188 BOOST_CHECK_EQUAL(pipeline->m_nSkippedRetx, 0);
189 BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 0);
schneiderklaus8ff3abd2019-03-12 22:15:12 -0700190
191 // timeout segment 3 & 6
Weiwei Liu245d7912016-07-28 00:04:25 -0700192 advanceClocks(io, time::milliseconds(150));
schneiderklausd8197df2019-03-16 11:31:40 -0700193 BOOST_CHECK_EQUAL(pipeline->m_nTimeouts, 2);
194 BOOST_CHECK_EQUAL(pipeline->m_nRetransmitted, 1);
195 BOOST_CHECK_EQUAL(pipeline->m_nLossDecr, 1);
196 BOOST_CHECK_EQUAL(pipeline->m_nSkippedRetx, 0);
Weiwei Liu245d7912016-07-28 00:04:25 -0700197
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400198 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
schneiderklausd8197df2019-03-16 11:31:40 -0700199 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // window size drop to 1/2 of previous size
200 BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700201
202 // receive segment 6, retransmit 3
203 face.receive(*makeDataWithSegment(6));
204 advanceClocks(io, time::nanoseconds(1));
205
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400206 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
schneiderklausd8197df2019-03-16 11:31:40 -0700207 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.75, MARGIN); // congestion avoidance
208 BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
209 BOOST_CHECK_EQUAL(pipeline->m_retxCount[3], 1);
schneiderklaus8ff3abd2019-03-12 22:15:12 -0700210
schneiderklausd8197df2019-03-16 11:31:40 -0700211 BOOST_CHECK_EQUAL(pipeline->m_nTimeouts, 2);
212 BOOST_CHECK_EQUAL(pipeline->m_nRetransmitted, 2);
213 BOOST_CHECK_EQUAL(pipeline->m_nTimeouts,
214 pipeline->m_nRetransmitted + pipeline->m_nSkippedRetx);
schneiderklaus8ff3abd2019-03-12 22:15:12 -0700215
Weiwei Liu245d7912016-07-28 00:04:25 -0700216}
217
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000218BOOST_AUTO_TEST_CASE(CongestionMarksWithCwa)
219{
220 nDataSegments = 7;
schneiderklausd8197df2019-03-16 11:31:40 -0700221 pipeline->m_ssthresh = 4.0;
222 BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000223
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800224 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000225 advanceClocks(io, time::nanoseconds(1));
226 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
227
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800228 // receive segments 0 to 4
229 for (uint64_t i = 0; i < 5; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000230 face.receive(*makeDataWithSegment(i));
231 advanceClocks(io, time::nanoseconds(1));
232 }
233
234 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
schneiderklausd8197df2019-03-16 11:31:40 -0700235 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000236
237 // receive segment 5 with congestion mark
238 face.receive(*makeDataWithSegmentAndCongMark(5));
239 advanceClocks(io, time::nanoseconds(1));
240
241 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
schneiderklausd8197df2019-03-16 11:31:40 -0700242 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800243 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000244
245 // receive the last segment with congestion mark
246 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
247 advanceClocks(io, time::nanoseconds(1));
248
249 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
schneiderklausd8197df2019-03-16 11:31:40 -0700250 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // conservative window adaption (window size should not decrease)
251 BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000252
253 // make sure no interest is retransmitted for marked data packets
schneiderklausd8197df2019-03-16 11:31:40 -0700254 BOOST_CHECK_EQUAL(pipeline->m_retxCount[5], 0);
255 BOOST_CHECK_EQUAL(pipeline->m_retxCount[nDataSegments - 1], 0);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000256
257 // check number of received marked data packets
schneiderklausd8197df2019-03-16 11:31:40 -0700258 BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 2);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000259}
260
261BOOST_AUTO_TEST_CASE(CongestionMarksWithoutCwa)
262{
263 opt.disableCwa = true;
264 createPipeline();
265
266 nDataSegments = 7;
schneiderklausd8197df2019-03-16 11:31:40 -0700267 pipeline->m_ssthresh = 4.0;
268 BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000269
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800270 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000271 advanceClocks(io, time::nanoseconds(1));
272 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
273
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800274 // receive segments 0 to 4
275 for (uint64_t i = 0; i < 5; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000276 face.receive(*makeDataWithSegment(i));
277 advanceClocks(io, time::nanoseconds(1));
278 }
279
280 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
schneiderklausd8197df2019-03-16 11:31:40 -0700281 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000282
283 // receive segment 5 with congestion mark
284 face.receive(*makeDataWithSegmentAndCongMark(5));
285 advanceClocks(io, time::nanoseconds(1));
286
287 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
schneiderklausd8197df2019-03-16 11:31:40 -0700288 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800289 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000290
291 // receive the last segment with congestion mark
292 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
293 advanceClocks(io, time::nanoseconds(1));
294
295 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
schneiderklausd8197df2019-03-16 11:31:40 -0700296 BOOST_CHECK_CLOSE(pipeline->m_cwnd, PipelineInterestsAdaptive::MIN_SSTHRESH,
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000297 MARGIN); // window size should decrease, as cwa is disabled
schneiderklausd8197df2019-03-16 11:31:40 -0700298 BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000299
300 // make sure no interest is retransmitted for marked data packets
schneiderklausd8197df2019-03-16 11:31:40 -0700301 BOOST_CHECK_EQUAL(pipeline->m_retxCount[5], 0);
302 BOOST_CHECK_EQUAL(pipeline->m_retxCount[nDataSegments - 1], 0);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000303
304 // check number of received marked data packets
schneiderklausd8197df2019-03-16 11:31:40 -0700305 BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 2);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000306}
307
308BOOST_AUTO_TEST_CASE(IgnoreCongestionMarks)
309{
310 opt.ignoreCongMarks = true;
311 createPipeline();
312
313 nDataSegments = 7;
schneiderklausd8197df2019-03-16 11:31:40 -0700314 pipeline->m_ssthresh = 4.0;
315 BOOST_REQUIRE_CLOSE(pipeline->m_cwnd, 1, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000316
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800317 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000318 advanceClocks(io, time::nanoseconds(1));
319 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
320
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800321 // receive segments 0 to 5
322 for (uint64_t i = 0; i < 6; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000323 face.receive(*makeDataWithSegment(i));
324 advanceClocks(io, time::nanoseconds(1));
325 }
326
327 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
schneiderklausd8197df2019-03-16 11:31:40 -0700328 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 4.75, MARGIN);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800329 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000330
331 // receive the last segment with congestion mark
332 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
333 advanceClocks(io, time::nanoseconds(1));
334
335 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
schneiderklausd8197df2019-03-16 11:31:40 -0700336 BOOST_CHECK_CLOSE(pipeline->m_cwnd, 5.0, MARGIN); // window size increases
337 BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000338
339 // make sure no interest is retransmitted for marked data packet
schneiderklausd8197df2019-03-16 11:31:40 -0700340 BOOST_CHECK_EQUAL(pipeline->m_retxCount[nDataSegments - 1], 0);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000341
342 // check number of received marked data packets
schneiderklausd8197df2019-03-16 11:31:40 -0700343 BOOST_CHECK_EQUAL(pipeline->m_nCongMarks, 1);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000344}
345
Weiwei Liu245d7912016-07-28 00:04:25 -0700346BOOST_AUTO_TEST_CASE(Nack)
347{
348 nDataSegments = 5;
schneiderklausd8197df2019-03-16 11:31:40 -0700349 pipeline->m_cwnd = 10.0;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800350 run(name);
351 advanceClocks(io, time::nanoseconds(1));
352
353 face.receive(*makeDataWithSegment(0));
Weiwei Liu245d7912016-07-28 00:04:25 -0700354 advanceClocks(io, time::nanoseconds(1));
355
356 face.receive(*makeDataWithSegment(1));
357 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400358
359 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800360 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 10);
Weiwei Liu245d7912016-07-28 00:04:25 -0700361
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800362 // receive a nack with NackReason::DUPLICATE for segment 1
Weiwei Liu245d7912016-07-28 00:04:25 -0700363 auto nack1 = makeNack(face.sentInterests[1], lp::NackReason::DUPLICATE);
364 face.receive(nack1);
365 advanceClocks(io, time::nanoseconds(1));
366
367 // nack1 is ignored
368 BOOST_CHECK_EQUAL(hasFailed, false);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400369 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
schneiderklausd8197df2019-03-16 11:31:40 -0700370 BOOST_CHECK_EQUAL(pipeline->m_retxQueue.size(), 0);
Weiwei Liu245d7912016-07-28 00:04:25 -0700371
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800372 // receive a nack with NackReason::CONGESTION for segment 2
Weiwei Liu245d7912016-07-28 00:04:25 -0700373 auto nack2 = makeNack(face.sentInterests[2], lp::NackReason::CONGESTION);
374 face.receive(nack2);
375 advanceClocks(io, time::nanoseconds(1));
376
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800377 // segment 2 is retransmitted
schneiderklausd8197df2019-03-16 11:31:40 -0700378 BOOST_CHECK_EQUAL(pipeline->m_retxCount[2], 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700379
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800380 // receive a nack with NackReason::NONE for segment 3
Weiwei Liu245d7912016-07-28 00:04:25 -0700381 auto nack3 = makeNack(face.sentInterests[3], lp::NackReason::NONE);
382 face.receive(nack3);
383 advanceClocks(io, time::nanoseconds(1));
384
385 // Other types of Nack will trigger a failure
386 BOOST_CHECK_EQUAL(hasFailed, true);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400387 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700388}
389
390BOOST_AUTO_TEST_CASE(FinalBlockIdNotSetAtBeginning)
391{
392 nDataSegments = 4;
schneiderklausd8197df2019-03-16 11:31:40 -0700393 pipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800394 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700395 advanceClocks(io, time::nanoseconds(1));
396
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800397 // receive segment 0 without FinalBlockId
398 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700399 advanceClocks(io, time::nanoseconds(1));
400
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800401 // interests for segment 0 - 5 have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700402 BOOST_CHECK_EQUAL(face.sentInterests.size(), 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800403 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 1);
schneiderklausd8197df2019-03-16 11:31:40 -0700404 BOOST_CHECK_EQUAL(pipeline->m_hasFinalBlockId, false);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800405 // pending interests: segment 1, 2, 3, 4, 5
Weiwei Liu245d7912016-07-28 00:04:25 -0700406 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 5);
407
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800408 // receive segment 1 with FinalBlockId
409 face.receive(*makeDataWithSegment(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700410 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800411 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
schneiderklausd8197df2019-03-16 11:31:40 -0700412 BOOST_CHECK_EQUAL(pipeline->m_hasFinalBlockId, true);
Weiwei Liu245d7912016-07-28 00:04:25 -0700413
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800414 // pending interests for segment 1, 4, 5 haven been removed
415 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700416}
417
418BOOST_AUTO_TEST_CASE(FailureBeforeFinalBlockIdReceived)
419{
420 // failed to retrieve segNo while the FinalBlockId has not yet been
421 // set, and later received a FinalBlockId >= segNo, i.e. segNo is
422 // part of the content.
423
424 nDataSegments = 4;
schneiderklausd8197df2019-03-16 11:31:40 -0700425 pipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800426 run(name);
427 advanceClocks(io, time::nanoseconds(1));
428
429 // receive segment 0 without FinalBlockId
430 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700431 advanceClocks(io, time::nanoseconds(1));
432
433 // receive segment 1 without FinalBlockId
434 face.receive(*makeDataWithSegment(1, false));
435 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800436
437 // interests for segment 0 - 7 have been sent
438 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 8);
Weiwei Liu245d7912016-07-28 00:04:25 -0700439
440 // receive nack with NackReason::NONE for segment 3
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800441 auto nack = makeNack(face.sentInterests[3], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700442 face.receive(nack);
443 advanceClocks(io, time::nanoseconds(1));
444
445 // error not triggered
446 // pending interests for segment > 3 haven been removed
447 BOOST_CHECK_EQUAL(hasFailed, false);
448 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 1);
449
450 // receive segment 2 with FinalBlockId
451 face.receive(*makeDataWithSegment(2));
452 advanceClocks(io, time::nanoseconds(1));
453
454 // error triggered since segment 3 is part of the content
455 BOOST_CHECK_EQUAL(hasFailed, true);
456}
457
458BOOST_AUTO_TEST_CASE(SpuriousFailureBeforeFinalBlockIdReceived)
459{
460 // failed to retrieve segNo while the FinalBlockId has not yet been
461 // set, and later received a FinalBlockId < segNo, i.e. segNo is
462 // not part of the content, and it was actually a spurious failure
463
464 nDataSegments = 4;
schneiderklausd8197df2019-03-16 11:31:40 -0700465 pipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800466 run(name);
467 advanceClocks(io, time::nanoseconds(1));
468
469 // receive segment 0 without FinalBlockId
470 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700471 advanceClocks(io, time::nanoseconds(1));
472
473 // receive segment 1 without FinalBlockId
474 face.receive(*makeDataWithSegment(1, false));
475 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800476
477 // interests for segment 0 - 7 have been sent
478 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 8);
Weiwei Liu245d7912016-07-28 00:04:25 -0700479
480 // receive nack with NackReason::NONE for segment 4
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800481 auto nack = makeNack(face.sentInterests[4], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700482 face.receive(nack);
483 advanceClocks(io, time::nanoseconds(1));
484
485 // error not triggered
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800486 // pending interests for segment > 3 have been removed
Weiwei Liu245d7912016-07-28 00:04:25 -0700487 BOOST_CHECK_EQUAL(hasFailed, false);
488 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
489
490 // receive segment 2 with FinalBlockId
491 face.receive(*makeDataWithSegment(2));
492 advanceClocks(io, time::nanoseconds(1));
493
494 // timeout segment 3
Davide Pesavento958896e2017-01-19 00:52:04 -0500495 advanceClocks(io, time::seconds(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700496
497 // segment 3 is retransmitted
schneiderklausd8197df2019-03-16 11:31:40 -0700498 BOOST_CHECK_EQUAL(pipeline->m_retxCount[3], 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700499
500 // receive segment 3
501 face.receive(*makeDataWithSegment(3));
502 advanceClocks(io, time::nanoseconds(1));
503
504 BOOST_CHECK_EQUAL(hasFailed, false);
505}
506
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500507BOOST_AUTO_TEST_CASE(SegmentInfoMaintenance)
508{
509 // test that m_segmentInfo is properly maintained when
510 // a segment is received after two consecutive timeouts
511
512 nDataSegments = 3;
513
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800514 run(name);
515 advanceClocks(io, time::nanoseconds(1));
516
517 // receive segment 0
518 face.receive(*makeDataWithSegment(0));
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500519 advanceClocks(io, time::nanoseconds(1));
520
521 // receive segment 1
522 face.receive(*makeDataWithSegment(1));
523 advanceClocks(io, time::nanoseconds(1));
524
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800525 BOOST_CHECK_EQUAL(face.sentInterests.size(), 3);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500526
527 // check if segment 2's state is FirstTimeSent
schneiderklausd8197df2019-03-16 11:31:40 -0700528 auto it = pipeline->m_segmentInfo.find(2);
529 BOOST_REQUIRE(it != pipeline->m_segmentInfo.end());
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500530 BOOST_CHECK(it->second.state == SegmentState::FirstTimeSent);
531
532 // timeout segment 2 twice
533 advanceClocks(io, time::milliseconds(400), 3);
534
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800535 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500536
537 // check if segment 2's state is Retransmitted
schneiderklausd8197df2019-03-16 11:31:40 -0700538 it = pipeline->m_segmentInfo.find(2);
539 BOOST_REQUIRE(it != pipeline->m_segmentInfo.end());
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500540 BOOST_CHECK(it->second.state == SegmentState::Retransmitted);
541
542 // check if segment 2 was retransmitted twice
schneiderklausd8197df2019-03-16 11:31:40 -0700543 BOOST_CHECK_EQUAL(pipeline->m_retxCount.at(2), 2);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500544
545 // receive segment 2 the first time
546 face.receive(*makeDataWithSegment(2));
547 advanceClocks(io, time::nanoseconds(1));
548
549 // check if segment 2 was erased from m_segmentInfo
schneiderklausd8197df2019-03-16 11:31:40 -0700550 it = pipeline->m_segmentInfo.find(2);
551 BOOST_CHECK(it == pipeline->m_segmentInfo.end());
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500552
553 auto prevRtt = rttEstimator.getAvgRtt();
554 auto prevRto = rttEstimator.getEstimatedRto();
555
556 // receive segment 2 the second time
557 face.receive(*makeDataWithSegment(2));
558 advanceClocks(io, time::nanoseconds(1));
559
560 // nothing changed
schneiderklausd8197df2019-03-16 11:31:40 -0700561 it = pipeline->m_segmentInfo.find(2);
562 BOOST_CHECK(it == pipeline->m_segmentInfo.end());
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800563 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500564 BOOST_CHECK_EQUAL(rttEstimator.getAvgRtt(), prevRtt);
565 BOOST_CHECK_EQUAL(rttEstimator.getEstimatedRto(), prevRto);
566}
567
Chavoosh Ghasemi75309ae2018-03-26 14:46:24 -0400568BOOST_AUTO_TEST_CASE(PrintSummaryWithNoRttMeasurements)
569{
570 // test the console ouptut when no RTT measurement is available,
571 // to make sure a proper message will be printed out
572
573 std::stringstream ss;
574
575 // change the underlying buffer and save the old buffer
576 auto oldBuf = std::cerr.rdbuf(ss.rdbuf());
577
schneiderklausd8197df2019-03-16 11:31:40 -0700578 pipeline->printSummary();
Chavoosh Ghasemi75309ae2018-03-26 14:46:24 -0400579 std::string line;
580
581 bool found = false;
582 while (std::getline(ss, line)) {
583 if (line == "RTT stats unavailable") {
584 found = true;
585 break;
586 }
587 }
588 BOOST_CHECK(found);
589 std::cerr.rdbuf(oldBuf); // reset
590}
591
Ryan Wickman034f30f2018-06-06 11:11:11 -0500592BOOST_AUTO_TEST_CASE(StopsWhenFileSizeLessThanChunkSize)
593{
594 // test to see if the program doesn't hang,
595 // when transfer is complete, for files less than the chunk size
596 // (i.e. when only one segment is sent/received)
597
598 createPipeline();
599 nDataSegments = 1;
600
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800601 run(name);
Ryan Wickman034f30f2018-06-06 11:11:11 -0500602 advanceClocks(io, time::nanoseconds(1));
603
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800604 face.receive(*makeDataWithSegment(0));
Ryan Wickman034f30f2018-06-06 11:11:11 -0500605 advanceClocks(io, time::nanoseconds(1));
606
schneiderklausd8197df2019-03-16 11:31:40 -0700607 BOOST_CHECK_EQUAL(pipeline->m_hasFinalBlockId, true);
608 BOOST_CHECK_EQUAL(pipeline->m_segmentInfo.size(), 0);
Ryan Wickman034f30f2018-06-06 11:11:11 -0500609 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 0);
610}
611
612
Klaus Schneider9e5122b2019-03-19 17:03:25 -0700613BOOST_AUTO_TEST_SUITE_END() // TestPipelineInterestsAimd
Weiwei Liu245d7912016-07-28 00:04:25 -0700614BOOST_AUTO_TEST_SUITE_END() // Chunks
615
616} // namespace tests
Weiwei Liu245d7912016-07-28 00:04:25 -0700617} // namespace chunks
618} // namespace ndn