blob: 77dc1f02d6ed4d66c23296137f4c7ea9962ee534 [file] [log] [blame]
Weiwei Liu245d7912016-07-28 00:04:25 -07001/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
Davide Pesaventoe9c69852017-11-04 18:08:37 -04002/*
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -08003 * Copyright (c) 2016-2019, Regents of the University of California,
Davide Pesavento958896e2017-01-19 00:52:04 -05004 * Colorado State University,
5 * University Pierre & Marie Curie, Sorbonne University.
Weiwei Liu245d7912016-07-28 00:04:25 -07006 *
7 * This file is part of ndn-tools (Named Data Networking Essential Tools).
8 * See AUTHORS.md for complete list of ndn-tools authors and contributors.
9 *
10 * ndn-tools is free software: you can redistribute it and/or modify it under the terms
11 * of the GNU General Public License as published by the Free Software Foundation,
12 * either version 3 of the License, or (at your option) any later version.
13 *
14 * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
15 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
16 * PURPOSE. See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
20 *
21 * See AUTHORS.md for complete list of ndn-cxx authors and contributors.
22 *
23 * @author Weiwei Liu
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000024 * @author Chavoosh Ghasemi
Weiwei Liu245d7912016-07-28 00:04:25 -070025 */
26
27#include "tools/chunks/catchunks/pipeline-interests-aimd.hpp"
28#include "tools/chunks/catchunks/options.hpp"
29
30#include "pipeline-interests-fixture.hpp"
31
32namespace ndn {
33namespace chunks {
34namespace aimd {
35namespace tests {
36
37using namespace ndn::tests;
38
Davide Pesaventof6991e12018-01-08 20:58:50 -050039class PipelineInterestAimdFixture : public chunks::tests::PipelineInterestsFixture
Weiwei Liu245d7912016-07-28 00:04:25 -070040{
41public:
42 PipelineInterestAimdFixture()
Davide Pesaventof6991e12018-01-08 20:58:50 -050043 : opt(makePipelineOptions())
Weiwei Liu245d7912016-07-28 00:04:25 -070044 , rttEstimator(makeRttEstimatorOptions())
45 {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000046 createPipeline();
47 }
48
49 void
50 createPipeline()
51 {
Weiwei Liu245d7912016-07-28 00:04:25 -070052 auto pline = make_unique<PipelineInterestsAimd>(face, rttEstimator, opt);
53 aimdPipeline = pline.get();
54 setPipeline(std::move(pline));
55 }
56
57private:
Davide Pesaventof6991e12018-01-08 20:58:50 -050058 static PipelineInterestsAimd::Options
Weiwei Liu245d7912016-07-28 00:04:25 -070059 makePipelineOptions()
60 {
Davide Pesaventof6991e12018-01-08 20:58:50 -050061 PipelineInterestsAimd::Options pipelineOptions;
62 pipelineOptions.isQuiet = true;
63 pipelineOptions.isVerbose = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070064 pipelineOptions.disableCwa = false;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000065 pipelineOptions.ignoreCongMarks = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070066 pipelineOptions.resetCwndToInit = false;
67 pipelineOptions.initCwnd = 1.0;
68 pipelineOptions.aiStep = 1.0;
69 pipelineOptions.mdCoef = 0.5;
70 pipelineOptions.initSsthresh = std::numeric_limits<int>::max();
71 return pipelineOptions;
72 }
73
74 static RttEstimator::Options
75 makeRttEstimatorOptions()
76 {
77 RttEstimator::Options rttOptions;
78 rttOptions.alpha = 0.125;
79 rttOptions.beta = 0.25;
80 rttOptions.k = 4;
81 rttOptions.minRto = Milliseconds(200);
82 rttOptions.maxRto = Milliseconds(4000);
83 return rttOptions;
84 }
85
86protected:
Davide Pesaventof6991e12018-01-08 20:58:50 -050087 PipelineInterestsAimd::Options opt;
Weiwei Liu245d7912016-07-28 00:04:25 -070088 RttEstimator rttEstimator;
89 PipelineInterestsAimd* aimdPipeline;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000090 static constexpr double MARGIN = 0.01;
Weiwei Liu245d7912016-07-28 00:04:25 -070091};
92
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000093constexpr double PipelineInterestAimdFixture::MARGIN;
94
Weiwei Liu245d7912016-07-28 00:04:25 -070095BOOST_AUTO_TEST_SUITE(Chunks)
96BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsAimd, PipelineInterestAimdFixture)
97
98BOOST_AUTO_TEST_CASE(SlowStart)
99{
100 nDataSegments = 4;
101 aimdPipeline->m_ssthresh = 8.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000102 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700103
104 double preCwnd = aimdPipeline->m_cwnd;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800105 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700106 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400107 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700108
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800109 for (uint64_t i = 0; i < nDataSegments - 1; ++i) {
Weiwei Liu245d7912016-07-28 00:04:25 -0700110 face.receive(*makeDataWithSegment(i));
111 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000112 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700113 preCwnd = aimdPipeline->m_cwnd;
114 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400115
116 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700117}
118
119BOOST_AUTO_TEST_CASE(CongestionAvoidance)
120{
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800121 nDataSegments = 7;
Weiwei Liu245d7912016-07-28 00:04:25 -0700122 aimdPipeline->m_ssthresh = 4.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000123 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700124
125 double preCwnd = aimdPipeline->m_cwnd;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800126 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700127 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400128 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700129
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800130 for (uint64_t i = 0; i < aimdPipeline->m_ssthresh; ++i) { // slow start
Weiwei Liu245d7912016-07-28 00:04:25 -0700131 face.receive(*makeDataWithSegment(i));
132 advanceClocks(io, time::nanoseconds(1));
133 preCwnd = aimdPipeline->m_cwnd;
134 }
135
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800136 BOOST_CHECK_CLOSE(preCwnd, 4.25, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700137
138 for (uint64_t i = aimdPipeline->m_ssthresh; i < nDataSegments - 1; ++i) { // congestion avoidance
139 face.receive(*makeDataWithSegment(i));
140 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000141 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, opt.aiStep / floor(aimdPipeline->m_cwnd), MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700142 preCwnd = aimdPipeline->m_cwnd;
143 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400144
145 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700146}
147
148BOOST_AUTO_TEST_CASE(Timeout)
149{
150 nDataSegments = 8;
151 aimdPipeline->m_ssthresh = 4.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000152 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700153
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800154 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700155 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400156 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700157
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800158 // receive segment 0, 1, and 2
159 for (uint64_t i = 0; i < 3; ++i) {
Weiwei Liu245d7912016-07-28 00:04:25 -0700160 face.receive(*makeDataWithSegment(i));
161 advanceClocks(io, time::nanoseconds(1));
162 }
163
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400164 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 3);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800165 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4, MARGIN);
166 BOOST_CHECK_EQUAL(face.sentInterests.size(), 7); // request for segment 7 has been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700167
168 advanceClocks(io, time::milliseconds(100));
169
170 // receive segment 4
171 face.receive(*makeDataWithSegment(4));
172 advanceClocks(io, time::nanoseconds(1));
173
174 // receive segment 5
175 face.receive(*makeDataWithSegment(5));
176 advanceClocks(io, time::nanoseconds(1));
177
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400178 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800179 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
180 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all the segment requests have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700181
schneiderklaus8ff3abd2019-03-12 22:15:12 -0700182 BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts, 0);
183 BOOST_CHECK_EQUAL(aimdPipeline->m_nLossDecr, 0);
184 BOOST_CHECK_EQUAL(aimdPipeline->m_nMarkDecr, 0);
185 BOOST_CHECK_EQUAL(aimdPipeline->m_nRetransmitted, 0);
186 BOOST_CHECK_EQUAL(aimdPipeline->m_nSkippedRetx, 0);
187 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 0);
188
189 // timeout segment 3 & 6
Weiwei Liu245d7912016-07-28 00:04:25 -0700190 advanceClocks(io, time::milliseconds(150));
schneiderklaus8ff3abd2019-03-12 22:15:12 -0700191 BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts, 2);
192 BOOST_CHECK_EQUAL(aimdPipeline->m_nRetransmitted, 1);
193 BOOST_CHECK_EQUAL(aimdPipeline->m_nLossDecr, 1);
194 BOOST_CHECK_EQUAL(aimdPipeline->m_nSkippedRetx, 0);
Weiwei Liu245d7912016-07-28 00:04:25 -0700195
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400196 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800197 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drop to 1/2 of previous size
Weiwei Liu245d7912016-07-28 00:04:25 -0700198 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 1);
199
200 // receive segment 6, retransmit 3
201 face.receive(*makeDataWithSegment(6));
202 advanceClocks(io, time::nanoseconds(1));
203
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400204 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800205 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.75, MARGIN); // congestion avoidance
Weiwei Liu245d7912016-07-28 00:04:25 -0700206 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
207 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
schneiderklaus8ff3abd2019-03-12 22:15:12 -0700208
209 BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts, 2);
210 BOOST_CHECK_EQUAL(aimdPipeline->m_nRetransmitted, 2);
211 BOOST_CHECK_EQUAL(aimdPipeline->m_nTimeouts,
212 aimdPipeline->m_nRetransmitted + aimdPipeline->m_nSkippedRetx);
213
Weiwei Liu245d7912016-07-28 00:04:25 -0700214}
215
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000216BOOST_AUTO_TEST_CASE(CongestionMarksWithCwa)
217{
218 nDataSegments = 7;
219 aimdPipeline->m_ssthresh = 4.0;
220 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
221
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800222 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000223 advanceClocks(io, time::nanoseconds(1));
224 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
225
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800226 // receive segments 0 to 4
227 for (uint64_t i = 0; i < 5; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000228 face.receive(*makeDataWithSegment(i));
229 advanceClocks(io, time::nanoseconds(1));
230 }
231
232 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800233 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000234
235 // receive segment 5 with congestion mark
236 face.receive(*makeDataWithSegmentAndCongMark(5));
237 advanceClocks(io, time::nanoseconds(1));
238
239 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800240 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
241 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000242
243 // receive the last segment with congestion mark
244 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
245 advanceClocks(io, time::nanoseconds(1));
246
247 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800248 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // conservative window adaption (window size should not decrease)
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000249 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
250
251 // make sure no interest is retransmitted for marked data packets
252 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
253 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
254
255 // check number of received marked data packets
256 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
257}
258
259BOOST_AUTO_TEST_CASE(CongestionMarksWithoutCwa)
260{
261 opt.disableCwa = true;
262 createPipeline();
263
264 nDataSegments = 7;
265 aimdPipeline->m_ssthresh = 4.0;
266 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
267
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800268 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000269 advanceClocks(io, time::nanoseconds(1));
270 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
271
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800272 // receive segments 0 to 4
273 for (uint64_t i = 0; i < 5; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000274 face.receive(*makeDataWithSegment(i));
275 advanceClocks(io, time::nanoseconds(1));
276 }
277
278 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800279 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000280
281 // receive segment 5 with congestion mark
282 face.receive(*makeDataWithSegmentAndCongMark(5));
283 advanceClocks(io, time::nanoseconds(1));
284
285 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800286 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
287 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000288
289 // receive the last segment with congestion mark
290 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
291 advanceClocks(io, time::nanoseconds(1));
292
293 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
294 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, PipelineInterestsAimd::MIN_SSTHRESH,
295 MARGIN); // window size should decrease, as cwa is disabled
296 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
297
298 // make sure no interest is retransmitted for marked data packets
299 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
300 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
301
302 // check number of received marked data packets
303 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
304}
305
306BOOST_AUTO_TEST_CASE(IgnoreCongestionMarks)
307{
308 opt.ignoreCongMarks = true;
309 createPipeline();
310
311 nDataSegments = 7;
312 aimdPipeline->m_ssthresh = 4.0;
313 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
314
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800315 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000316 advanceClocks(io, time::nanoseconds(1));
317 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
318
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800319 // receive segments 0 to 5
320 for (uint64_t i = 0; i < 6; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000321 face.receive(*makeDataWithSegment(i));
322 advanceClocks(io, time::nanoseconds(1));
323 }
324
325 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800326 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.75, MARGIN);
327 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000328
329 // receive the last segment with congestion mark
330 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
331 advanceClocks(io, time::nanoseconds(1));
332
333 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800334 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 5.0, MARGIN); // window size increases
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000335 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
336
337 // make sure no interest is retransmitted for marked data packet
338 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
339
340 // check number of received marked data packets
341 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 1);
342}
343
Weiwei Liu245d7912016-07-28 00:04:25 -0700344BOOST_AUTO_TEST_CASE(Nack)
345{
346 nDataSegments = 5;
347 aimdPipeline->m_cwnd = 10.0;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800348 run(name);
349 advanceClocks(io, time::nanoseconds(1));
350
351 face.receive(*makeDataWithSegment(0));
Weiwei Liu245d7912016-07-28 00:04:25 -0700352 advanceClocks(io, time::nanoseconds(1));
353
354 face.receive(*makeDataWithSegment(1));
355 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400356
357 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800358 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 10);
Weiwei Liu245d7912016-07-28 00:04:25 -0700359
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800360 // receive a nack with NackReason::DUPLICATE for segment 1
Weiwei Liu245d7912016-07-28 00:04:25 -0700361 auto nack1 = makeNack(face.sentInterests[1], lp::NackReason::DUPLICATE);
362 face.receive(nack1);
363 advanceClocks(io, time::nanoseconds(1));
364
365 // nack1 is ignored
366 BOOST_CHECK_EQUAL(hasFailed, false);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400367 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700368 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
369
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800370 // receive a nack with NackReason::CONGESTION for segment 2
Weiwei Liu245d7912016-07-28 00:04:25 -0700371 auto nack2 = makeNack(face.sentInterests[2], lp::NackReason::CONGESTION);
372 face.receive(nack2);
373 advanceClocks(io, time::nanoseconds(1));
374
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800375 // segment 2 is retransmitted
376 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[2], 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700377
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800378 // receive a nack with NackReason::NONE for segment 3
Weiwei Liu245d7912016-07-28 00:04:25 -0700379 auto nack3 = makeNack(face.sentInterests[3], lp::NackReason::NONE);
380 face.receive(nack3);
381 advanceClocks(io, time::nanoseconds(1));
382
383 // Other types of Nack will trigger a failure
384 BOOST_CHECK_EQUAL(hasFailed, true);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400385 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700386}
387
388BOOST_AUTO_TEST_CASE(FinalBlockIdNotSetAtBeginning)
389{
390 nDataSegments = 4;
391 aimdPipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800392 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700393 advanceClocks(io, time::nanoseconds(1));
394
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800395 // receive segment 0 without FinalBlockId
396 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700397 advanceClocks(io, time::nanoseconds(1));
398
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800399 // interests for segment 0 - 5 have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700400 BOOST_CHECK_EQUAL(face.sentInterests.size(), 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800401 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700402 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, false);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800403 // pending interests: segment 1, 2, 3, 4, 5
Weiwei Liu245d7912016-07-28 00:04:25 -0700404 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 5);
405
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800406 // receive segment 1 with FinalBlockId
407 face.receive(*makeDataWithSegment(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700408 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800409 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700410 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
411
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800412 // pending interests for segment 1, 4, 5 haven been removed
413 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700414}
415
416BOOST_AUTO_TEST_CASE(FailureBeforeFinalBlockIdReceived)
417{
418 // failed to retrieve segNo while the FinalBlockId has not yet been
419 // set, and later received a FinalBlockId >= segNo, i.e. segNo is
420 // part of the content.
421
422 nDataSegments = 4;
423 aimdPipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800424 run(name);
425 advanceClocks(io, time::nanoseconds(1));
426
427 // receive segment 0 without FinalBlockId
428 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700429 advanceClocks(io, time::nanoseconds(1));
430
431 // receive segment 1 without FinalBlockId
432 face.receive(*makeDataWithSegment(1, false));
433 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800434
435 // interests for segment 0 - 7 have been sent
436 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 8);
Weiwei Liu245d7912016-07-28 00:04:25 -0700437
438 // receive nack with NackReason::NONE for segment 3
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800439 auto nack = makeNack(face.sentInterests[3], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700440 face.receive(nack);
441 advanceClocks(io, time::nanoseconds(1));
442
443 // error not triggered
444 // pending interests for segment > 3 haven been removed
445 BOOST_CHECK_EQUAL(hasFailed, false);
446 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 1);
447
448 // receive segment 2 with FinalBlockId
449 face.receive(*makeDataWithSegment(2));
450 advanceClocks(io, time::nanoseconds(1));
451
452 // error triggered since segment 3 is part of the content
453 BOOST_CHECK_EQUAL(hasFailed, true);
454}
455
456BOOST_AUTO_TEST_CASE(SpuriousFailureBeforeFinalBlockIdReceived)
457{
458 // failed to retrieve segNo while the FinalBlockId has not yet been
459 // set, and later received a FinalBlockId < segNo, i.e. segNo is
460 // not part of the content, and it was actually a spurious failure
461
462 nDataSegments = 4;
463 aimdPipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800464 run(name);
465 advanceClocks(io, time::nanoseconds(1));
466
467 // receive segment 0 without FinalBlockId
468 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700469 advanceClocks(io, time::nanoseconds(1));
470
471 // receive segment 1 without FinalBlockId
472 face.receive(*makeDataWithSegment(1, false));
473 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800474
475 // interests for segment 0 - 7 have been sent
476 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 8);
Weiwei Liu245d7912016-07-28 00:04:25 -0700477
478 // receive nack with NackReason::NONE for segment 4
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800479 auto nack = makeNack(face.sentInterests[4], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700480 face.receive(nack);
481 advanceClocks(io, time::nanoseconds(1));
482
483 // error not triggered
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800484 // pending interests for segment > 3 have been removed
Weiwei Liu245d7912016-07-28 00:04:25 -0700485 BOOST_CHECK_EQUAL(hasFailed, false);
486 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
487
488 // receive segment 2 with FinalBlockId
489 face.receive(*makeDataWithSegment(2));
490 advanceClocks(io, time::nanoseconds(1));
491
492 // timeout segment 3
Davide Pesavento958896e2017-01-19 00:52:04 -0500493 advanceClocks(io, time::seconds(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700494
495 // segment 3 is retransmitted
496 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
497
498 // receive segment 3
499 face.receive(*makeDataWithSegment(3));
500 advanceClocks(io, time::nanoseconds(1));
501
502 BOOST_CHECK_EQUAL(hasFailed, false);
503}
504
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500505BOOST_AUTO_TEST_CASE(SegmentInfoMaintenance)
506{
507 // test that m_segmentInfo is properly maintained when
508 // a segment is received after two consecutive timeouts
509
510 nDataSegments = 3;
511
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800512 run(name);
513 advanceClocks(io, time::nanoseconds(1));
514
515 // receive segment 0
516 face.receive(*makeDataWithSegment(0));
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500517 advanceClocks(io, time::nanoseconds(1));
518
519 // receive segment 1
520 face.receive(*makeDataWithSegment(1));
521 advanceClocks(io, time::nanoseconds(1));
522
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800523 BOOST_CHECK_EQUAL(face.sentInterests.size(), 3);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500524
525 // check if segment 2's state is FirstTimeSent
526 auto it = aimdPipeline->m_segmentInfo.find(2);
527 BOOST_REQUIRE(it != aimdPipeline->m_segmentInfo.end());
528 BOOST_CHECK(it->second.state == SegmentState::FirstTimeSent);
529
530 // timeout segment 2 twice
531 advanceClocks(io, time::milliseconds(400), 3);
532
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800533 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500534
535 // check if segment 2's state is Retransmitted
536 it = aimdPipeline->m_segmentInfo.find(2);
537 BOOST_REQUIRE(it != aimdPipeline->m_segmentInfo.end());
538 BOOST_CHECK(it->second.state == SegmentState::Retransmitted);
539
540 // check if segment 2 was retransmitted twice
541 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount.at(2), 2);
542
543 // receive segment 2 the first time
544 face.receive(*makeDataWithSegment(2));
545 advanceClocks(io, time::nanoseconds(1));
546
547 // check if segment 2 was erased from m_segmentInfo
548 it = aimdPipeline->m_segmentInfo.find(2);
549 BOOST_CHECK(it == aimdPipeline->m_segmentInfo.end());
550
551 auto prevRtt = rttEstimator.getAvgRtt();
552 auto prevRto = rttEstimator.getEstimatedRto();
553
554 // receive segment 2 the second time
555 face.receive(*makeDataWithSegment(2));
556 advanceClocks(io, time::nanoseconds(1));
557
558 // nothing changed
559 it = aimdPipeline->m_segmentInfo.find(2);
560 BOOST_CHECK(it == aimdPipeline->m_segmentInfo.end());
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800561 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500562 BOOST_CHECK_EQUAL(rttEstimator.getAvgRtt(), prevRtt);
563 BOOST_CHECK_EQUAL(rttEstimator.getEstimatedRto(), prevRto);
564}
565
Chavoosh Ghasemi75309ae2018-03-26 14:46:24 -0400566BOOST_AUTO_TEST_CASE(PrintSummaryWithNoRttMeasurements)
567{
568 // test the console ouptut when no RTT measurement is available,
569 // to make sure a proper message will be printed out
570
571 std::stringstream ss;
572
573 // change the underlying buffer and save the old buffer
574 auto oldBuf = std::cerr.rdbuf(ss.rdbuf());
575
576 aimdPipeline->printSummary();
577 std::string line;
578
579 bool found = false;
580 while (std::getline(ss, line)) {
581 if (line == "RTT stats unavailable") {
582 found = true;
583 break;
584 }
585 }
586 BOOST_CHECK(found);
587 std::cerr.rdbuf(oldBuf); // reset
588}
589
Ryan Wickman034f30f2018-06-06 11:11:11 -0500590BOOST_AUTO_TEST_CASE(StopsWhenFileSizeLessThanChunkSize)
591{
592 // test to see if the program doesn't hang,
593 // when transfer is complete, for files less than the chunk size
594 // (i.e. when only one segment is sent/received)
595
596 createPipeline();
597 nDataSegments = 1;
598
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800599 run(name);
Ryan Wickman034f30f2018-06-06 11:11:11 -0500600 advanceClocks(io, time::nanoseconds(1));
601
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800602 face.receive(*makeDataWithSegment(0));
Ryan Wickman034f30f2018-06-06 11:11:11 -0500603 advanceClocks(io, time::nanoseconds(1));
604
605 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
606 BOOST_CHECK_EQUAL(aimdPipeline->m_segmentInfo.size(), 0);
607 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 0);
608}
609
610
Weiwei Liu245d7912016-07-28 00:04:25 -0700611BOOST_AUTO_TEST_SUITE_END() // TestPipelineInterestsAimd
612BOOST_AUTO_TEST_SUITE_END() // Chunks
613
614} // namespace tests
615} // namespace aimd
616} // namespace chunks
617} // namespace ndn