blob: 0e6341e91b307c38f7b287bee8d45137f8c604aa [file] [log] [blame]
Weiwei Liu245d7912016-07-28 00:04:25 -07001/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
Davide Pesaventoe9c69852017-11-04 18:08:37 -04002/*
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -08003 * Copyright (c) 2016-2019, Regents of the University of California,
Davide Pesavento958896e2017-01-19 00:52:04 -05004 * Colorado State University,
5 * University Pierre & Marie Curie, Sorbonne University.
Weiwei Liu245d7912016-07-28 00:04:25 -07006 *
7 * This file is part of ndn-tools (Named Data Networking Essential Tools).
8 * See AUTHORS.md for complete list of ndn-tools authors and contributors.
9 *
10 * ndn-tools is free software: you can redistribute it and/or modify it under the terms
11 * of the GNU General Public License as published by the Free Software Foundation,
12 * either version 3 of the License, or (at your option) any later version.
13 *
14 * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
15 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
16 * PURPOSE. See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
20 *
21 * See AUTHORS.md for complete list of ndn-cxx authors and contributors.
22 *
23 * @author Weiwei Liu
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000024 * @author Chavoosh Ghasemi
Weiwei Liu245d7912016-07-28 00:04:25 -070025 */
26
27#include "tools/chunks/catchunks/pipeline-interests-aimd.hpp"
28#include "tools/chunks/catchunks/options.hpp"
29
30#include "pipeline-interests-fixture.hpp"
31
32namespace ndn {
33namespace chunks {
34namespace aimd {
35namespace tests {
36
37using namespace ndn::tests;
38
Davide Pesaventof6991e12018-01-08 20:58:50 -050039class PipelineInterestAimdFixture : public chunks::tests::PipelineInterestsFixture
Weiwei Liu245d7912016-07-28 00:04:25 -070040{
41public:
42 PipelineInterestAimdFixture()
Davide Pesaventof6991e12018-01-08 20:58:50 -050043 : opt(makePipelineOptions())
Weiwei Liu245d7912016-07-28 00:04:25 -070044 , rttEstimator(makeRttEstimatorOptions())
45 {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000046 createPipeline();
47 }
48
49 void
50 createPipeline()
51 {
Weiwei Liu245d7912016-07-28 00:04:25 -070052 auto pline = make_unique<PipelineInterestsAimd>(face, rttEstimator, opt);
53 aimdPipeline = pline.get();
54 setPipeline(std::move(pline));
55 }
56
57private:
Davide Pesaventof6991e12018-01-08 20:58:50 -050058 static PipelineInterestsAimd::Options
Weiwei Liu245d7912016-07-28 00:04:25 -070059 makePipelineOptions()
60 {
Davide Pesaventof6991e12018-01-08 20:58:50 -050061 PipelineInterestsAimd::Options pipelineOptions;
62 pipelineOptions.isQuiet = true;
63 pipelineOptions.isVerbose = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070064 pipelineOptions.disableCwa = false;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000065 pipelineOptions.ignoreCongMarks = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070066 pipelineOptions.resetCwndToInit = false;
67 pipelineOptions.initCwnd = 1.0;
68 pipelineOptions.aiStep = 1.0;
69 pipelineOptions.mdCoef = 0.5;
70 pipelineOptions.initSsthresh = std::numeric_limits<int>::max();
71 return pipelineOptions;
72 }
73
74 static RttEstimator::Options
75 makeRttEstimatorOptions()
76 {
77 RttEstimator::Options rttOptions;
78 rttOptions.alpha = 0.125;
79 rttOptions.beta = 0.25;
80 rttOptions.k = 4;
81 rttOptions.minRto = Milliseconds(200);
82 rttOptions.maxRto = Milliseconds(4000);
83 return rttOptions;
84 }
85
86protected:
Davide Pesaventof6991e12018-01-08 20:58:50 -050087 PipelineInterestsAimd::Options opt;
Weiwei Liu245d7912016-07-28 00:04:25 -070088 RttEstimator rttEstimator;
89 PipelineInterestsAimd* aimdPipeline;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000090 static constexpr double MARGIN = 0.01;
Weiwei Liu245d7912016-07-28 00:04:25 -070091};
92
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000093constexpr double PipelineInterestAimdFixture::MARGIN;
94
Weiwei Liu245d7912016-07-28 00:04:25 -070095BOOST_AUTO_TEST_SUITE(Chunks)
96BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsAimd, PipelineInterestAimdFixture)
97
98BOOST_AUTO_TEST_CASE(SlowStart)
99{
100 nDataSegments = 4;
101 aimdPipeline->m_ssthresh = 8.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000102 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700103
104 double preCwnd = aimdPipeline->m_cwnd;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800105 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700106 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400107 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700108
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800109 for (uint64_t i = 0; i < nDataSegments - 1; ++i) {
Weiwei Liu245d7912016-07-28 00:04:25 -0700110 face.receive(*makeDataWithSegment(i));
111 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000112 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700113 preCwnd = aimdPipeline->m_cwnd;
114 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400115
116 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700117}
118
119BOOST_AUTO_TEST_CASE(CongestionAvoidance)
120{
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800121 nDataSegments = 7;
Weiwei Liu245d7912016-07-28 00:04:25 -0700122 aimdPipeline->m_ssthresh = 4.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000123 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700124
125 double preCwnd = aimdPipeline->m_cwnd;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800126 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700127 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400128 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700129
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800130 for (uint64_t i = 0; i < aimdPipeline->m_ssthresh; ++i) { // slow start
Weiwei Liu245d7912016-07-28 00:04:25 -0700131 face.receive(*makeDataWithSegment(i));
132 advanceClocks(io, time::nanoseconds(1));
133 preCwnd = aimdPipeline->m_cwnd;
134 }
135
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800136 BOOST_CHECK_CLOSE(preCwnd, 4.25, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700137
138 for (uint64_t i = aimdPipeline->m_ssthresh; i < nDataSegments - 1; ++i) { // congestion avoidance
139 face.receive(*makeDataWithSegment(i));
140 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000141 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, opt.aiStep / floor(aimdPipeline->m_cwnd), MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700142 preCwnd = aimdPipeline->m_cwnd;
143 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400144
145 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700146}
147
148BOOST_AUTO_TEST_CASE(Timeout)
149{
150 nDataSegments = 8;
151 aimdPipeline->m_ssthresh = 4.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000152 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700153
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800154 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700155 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400156 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700157
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800158 // receive segment 0, 1, and 2
159 for (uint64_t i = 0; i < 3; ++i) {
Weiwei Liu245d7912016-07-28 00:04:25 -0700160 face.receive(*makeDataWithSegment(i));
161 advanceClocks(io, time::nanoseconds(1));
162 }
163
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400164 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 3);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800165 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4, MARGIN);
166 BOOST_CHECK_EQUAL(face.sentInterests.size(), 7); // request for segment 7 has been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700167
168 advanceClocks(io, time::milliseconds(100));
169
170 // receive segment 4
171 face.receive(*makeDataWithSegment(4));
172 advanceClocks(io, time::nanoseconds(1));
173
174 // receive segment 5
175 face.receive(*makeDataWithSegment(5));
176 advanceClocks(io, time::nanoseconds(1));
177
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400178 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800179 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
180 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all the segment requests have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700181
182 // timeout segment 3
183 advanceClocks(io, time::milliseconds(150));
184
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400185 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800186 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drop to 1/2 of previous size
Weiwei Liu245d7912016-07-28 00:04:25 -0700187 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 1);
188
189 // receive segment 6, retransmit 3
190 face.receive(*makeDataWithSegment(6));
191 advanceClocks(io, time::nanoseconds(1));
192
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400193 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800194 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.75, MARGIN); // congestion avoidance
Weiwei Liu245d7912016-07-28 00:04:25 -0700195 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
196 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
197}
198
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000199BOOST_AUTO_TEST_CASE(CongestionMarksWithCwa)
200{
201 nDataSegments = 7;
202 aimdPipeline->m_ssthresh = 4.0;
203 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
204
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800205 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000206 advanceClocks(io, time::nanoseconds(1));
207 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
208
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800209 // receive segments 0 to 4
210 for (uint64_t i = 0; i < 5; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000211 face.receive(*makeDataWithSegment(i));
212 advanceClocks(io, time::nanoseconds(1));
213 }
214
215 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800216 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000217
218 // receive segment 5 with congestion mark
219 face.receive(*makeDataWithSegmentAndCongMark(5));
220 advanceClocks(io, time::nanoseconds(1));
221
222 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800223 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
224 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000225
226 // receive the last segment with congestion mark
227 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
228 advanceClocks(io, time::nanoseconds(1));
229
230 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800231 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // conservative window adaption (window size should not decrease)
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000232 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
233
234 // make sure no interest is retransmitted for marked data packets
235 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
236 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
237
238 // check number of received marked data packets
239 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
240}
241
242BOOST_AUTO_TEST_CASE(CongestionMarksWithoutCwa)
243{
244 opt.disableCwa = true;
245 createPipeline();
246
247 nDataSegments = 7;
248 aimdPipeline->m_ssthresh = 4.0;
249 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
250
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800251 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000252 advanceClocks(io, time::nanoseconds(1));
253 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
254
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800255 // receive segments 0 to 4
256 for (uint64_t i = 0; i < 5; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000257 face.receive(*makeDataWithSegment(i));
258 advanceClocks(io, time::nanoseconds(1));
259 }
260
261 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800262 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000263
264 // receive segment 5 with congestion mark
265 face.receive(*makeDataWithSegmentAndCongMark(5));
266 advanceClocks(io, time::nanoseconds(1));
267
268 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800269 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.25, MARGIN); // window size drops to 1/2 of previous size
270 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000271
272 // receive the last segment with congestion mark
273 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
274 advanceClocks(io, time::nanoseconds(1));
275
276 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
277 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, PipelineInterestsAimd::MIN_SSTHRESH,
278 MARGIN); // window size should decrease, as cwa is disabled
279 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
280
281 // make sure no interest is retransmitted for marked data packets
282 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
283 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
284
285 // check number of received marked data packets
286 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
287}
288
289BOOST_AUTO_TEST_CASE(IgnoreCongestionMarks)
290{
291 opt.ignoreCongMarks = true;
292 createPipeline();
293
294 nDataSegments = 7;
295 aimdPipeline->m_ssthresh = 4.0;
296 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
297
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800298 run(name);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000299 advanceClocks(io, time::nanoseconds(1));
300 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
301
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800302 // receive segments 0 to 5
303 for (uint64_t i = 0; i < 6; ++i) {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000304 face.receive(*makeDataWithSegment(i));
305 advanceClocks(io, time::nanoseconds(1));
306 }
307
308 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800309 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.75, MARGIN);
310 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments); // all interests have been sent
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000311
312 // receive the last segment with congestion mark
313 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
314 advanceClocks(io, time::nanoseconds(1));
315
316 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800317 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 5.0, MARGIN); // window size increases
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000318 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
319
320 // make sure no interest is retransmitted for marked data packet
321 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
322
323 // check number of received marked data packets
324 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 1);
325}
326
Weiwei Liu245d7912016-07-28 00:04:25 -0700327BOOST_AUTO_TEST_CASE(Nack)
328{
329 nDataSegments = 5;
330 aimdPipeline->m_cwnd = 10.0;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800331 run(name);
332 advanceClocks(io, time::nanoseconds(1));
333
334 face.receive(*makeDataWithSegment(0));
Weiwei Liu245d7912016-07-28 00:04:25 -0700335 advanceClocks(io, time::nanoseconds(1));
336
337 face.receive(*makeDataWithSegment(1));
338 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400339
340 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800341 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 10);
Weiwei Liu245d7912016-07-28 00:04:25 -0700342
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800343 // receive a nack with NackReason::DUPLICATE for segment 1
Weiwei Liu245d7912016-07-28 00:04:25 -0700344 auto nack1 = makeNack(face.sentInterests[1], lp::NackReason::DUPLICATE);
345 face.receive(nack1);
346 advanceClocks(io, time::nanoseconds(1));
347
348 // nack1 is ignored
349 BOOST_CHECK_EQUAL(hasFailed, false);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400350 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700351 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
352
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800353 // receive a nack with NackReason::CONGESTION for segment 2
Weiwei Liu245d7912016-07-28 00:04:25 -0700354 auto nack2 = makeNack(face.sentInterests[2], lp::NackReason::CONGESTION);
355 face.receive(nack2);
356 advanceClocks(io, time::nanoseconds(1));
357
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800358 // segment 2 is retransmitted
359 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[2], 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700360
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800361 // receive a nack with NackReason::NONE for segment 3
Weiwei Liu245d7912016-07-28 00:04:25 -0700362 auto nack3 = makeNack(face.sentInterests[3], lp::NackReason::NONE);
363 face.receive(nack3);
364 advanceClocks(io, time::nanoseconds(1));
365
366 // Other types of Nack will trigger a failure
367 BOOST_CHECK_EQUAL(hasFailed, true);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400368 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700369}
370
371BOOST_AUTO_TEST_CASE(FinalBlockIdNotSetAtBeginning)
372{
373 nDataSegments = 4;
374 aimdPipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800375 run(name);
Weiwei Liu245d7912016-07-28 00:04:25 -0700376 advanceClocks(io, time::nanoseconds(1));
377
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800378 // receive segment 0 without FinalBlockId
379 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700380 advanceClocks(io, time::nanoseconds(1));
381
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800382 // interests for segment 0 - 5 have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700383 BOOST_CHECK_EQUAL(face.sentInterests.size(), 6);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800384 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700385 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, false);
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800386 // pending interests: segment 1, 2, 3, 4, 5
Weiwei Liu245d7912016-07-28 00:04:25 -0700387 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 5);
388
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800389 // receive segment 1 with FinalBlockId
390 face.receive(*makeDataWithSegment(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700391 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800392 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700393 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
394
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800395 // pending interests for segment 1, 4, 5 haven been removed
396 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700397}
398
399BOOST_AUTO_TEST_CASE(FailureBeforeFinalBlockIdReceived)
400{
401 // failed to retrieve segNo while the FinalBlockId has not yet been
402 // set, and later received a FinalBlockId >= segNo, i.e. segNo is
403 // part of the content.
404
405 nDataSegments = 4;
406 aimdPipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800407 run(name);
408 advanceClocks(io, time::nanoseconds(1));
409
410 // receive segment 0 without FinalBlockId
411 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700412 advanceClocks(io, time::nanoseconds(1));
413
414 // receive segment 1 without FinalBlockId
415 face.receive(*makeDataWithSegment(1, false));
416 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800417
418 // interests for segment 0 - 7 have been sent
419 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 8);
Weiwei Liu245d7912016-07-28 00:04:25 -0700420
421 // receive nack with NackReason::NONE for segment 3
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800422 auto nack = makeNack(face.sentInterests[3], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700423 face.receive(nack);
424 advanceClocks(io, time::nanoseconds(1));
425
426 // error not triggered
427 // pending interests for segment > 3 haven been removed
428 BOOST_CHECK_EQUAL(hasFailed, false);
429 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 1);
430
431 // receive segment 2 with FinalBlockId
432 face.receive(*makeDataWithSegment(2));
433 advanceClocks(io, time::nanoseconds(1));
434
435 // error triggered since segment 3 is part of the content
436 BOOST_CHECK_EQUAL(hasFailed, true);
437}
438
439BOOST_AUTO_TEST_CASE(SpuriousFailureBeforeFinalBlockIdReceived)
440{
441 // failed to retrieve segNo while the FinalBlockId has not yet been
442 // set, and later received a FinalBlockId < segNo, i.e. segNo is
443 // not part of the content, and it was actually a spurious failure
444
445 nDataSegments = 4;
446 aimdPipeline->m_cwnd = 4;
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800447 run(name);
448 advanceClocks(io, time::nanoseconds(1));
449
450 // receive segment 0 without FinalBlockId
451 face.receive(*makeDataWithSegment(0, false));
Weiwei Liu245d7912016-07-28 00:04:25 -0700452 advanceClocks(io, time::nanoseconds(1));
453
454 // receive segment 1 without FinalBlockId
455 face.receive(*makeDataWithSegment(1, false));
456 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800457
458 // interests for segment 0 - 7 have been sent
459 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 8);
Weiwei Liu245d7912016-07-28 00:04:25 -0700460
461 // receive nack with NackReason::NONE for segment 4
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800462 auto nack = makeNack(face.sentInterests[4], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700463 face.receive(nack);
464 advanceClocks(io, time::nanoseconds(1));
465
466 // error not triggered
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800467 // pending interests for segment > 3 have been removed
Weiwei Liu245d7912016-07-28 00:04:25 -0700468 BOOST_CHECK_EQUAL(hasFailed, false);
469 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
470
471 // receive segment 2 with FinalBlockId
472 face.receive(*makeDataWithSegment(2));
473 advanceClocks(io, time::nanoseconds(1));
474
475 // timeout segment 3
Davide Pesavento958896e2017-01-19 00:52:04 -0500476 advanceClocks(io, time::seconds(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700477
478 // segment 3 is retransmitted
479 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
480
481 // receive segment 3
482 face.receive(*makeDataWithSegment(3));
483 advanceClocks(io, time::nanoseconds(1));
484
485 BOOST_CHECK_EQUAL(hasFailed, false);
486}
487
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500488BOOST_AUTO_TEST_CASE(SegmentInfoMaintenance)
489{
490 // test that m_segmentInfo is properly maintained when
491 // a segment is received after two consecutive timeouts
492
493 nDataSegments = 3;
494
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800495 run(name);
496 advanceClocks(io, time::nanoseconds(1));
497
498 // receive segment 0
499 face.receive(*makeDataWithSegment(0));
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500500 advanceClocks(io, time::nanoseconds(1));
501
502 // receive segment 1
503 face.receive(*makeDataWithSegment(1));
504 advanceClocks(io, time::nanoseconds(1));
505
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800506 BOOST_CHECK_EQUAL(face.sentInterests.size(), 3);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500507
508 // check if segment 2's state is FirstTimeSent
509 auto it = aimdPipeline->m_segmentInfo.find(2);
510 BOOST_REQUIRE(it != aimdPipeline->m_segmentInfo.end());
511 BOOST_CHECK(it->second.state == SegmentState::FirstTimeSent);
512
513 // timeout segment 2 twice
514 advanceClocks(io, time::milliseconds(400), 3);
515
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800516 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500517
518 // check if segment 2's state is Retransmitted
519 it = aimdPipeline->m_segmentInfo.find(2);
520 BOOST_REQUIRE(it != aimdPipeline->m_segmentInfo.end());
521 BOOST_CHECK(it->second.state == SegmentState::Retransmitted);
522
523 // check if segment 2 was retransmitted twice
524 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount.at(2), 2);
525
526 // receive segment 2 the first time
527 face.receive(*makeDataWithSegment(2));
528 advanceClocks(io, time::nanoseconds(1));
529
530 // check if segment 2 was erased from m_segmentInfo
531 it = aimdPipeline->m_segmentInfo.find(2);
532 BOOST_CHECK(it == aimdPipeline->m_segmentInfo.end());
533
534 auto prevRtt = rttEstimator.getAvgRtt();
535 auto prevRto = rttEstimator.getEstimatedRto();
536
537 // receive segment 2 the second time
538 face.receive(*makeDataWithSegment(2));
539 advanceClocks(io, time::nanoseconds(1));
540
541 // nothing changed
542 it = aimdPipeline->m_segmentInfo.find(2);
543 BOOST_CHECK(it == aimdPipeline->m_segmentInfo.end());
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800544 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5);
Ryan Wickman2c9933c2018-06-12 11:51:51 -0500545 BOOST_CHECK_EQUAL(rttEstimator.getAvgRtt(), prevRtt);
546 BOOST_CHECK_EQUAL(rttEstimator.getEstimatedRto(), prevRto);
547}
548
Chavoosh Ghasemi75309ae2018-03-26 14:46:24 -0400549BOOST_AUTO_TEST_CASE(PrintSummaryWithNoRttMeasurements)
550{
551 // test the console ouptut when no RTT measurement is available,
552 // to make sure a proper message will be printed out
553
554 std::stringstream ss;
555
556 // change the underlying buffer and save the old buffer
557 auto oldBuf = std::cerr.rdbuf(ss.rdbuf());
558
559 aimdPipeline->printSummary();
560 std::string line;
561
562 bool found = false;
563 while (std::getline(ss, line)) {
564 if (line == "RTT stats unavailable") {
565 found = true;
566 break;
567 }
568 }
569 BOOST_CHECK(found);
570 std::cerr.rdbuf(oldBuf); // reset
571}
572
Ryan Wickman034f30f2018-06-06 11:11:11 -0500573BOOST_AUTO_TEST_CASE(StopsWhenFileSizeLessThanChunkSize)
574{
575 // test to see if the program doesn't hang,
576 // when transfer is complete, for files less than the chunk size
577 // (i.e. when only one segment is sent/received)
578
579 createPipeline();
580 nDataSegments = 1;
581
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800582 run(name);
Ryan Wickman034f30f2018-06-06 11:11:11 -0500583 advanceClocks(io, time::nanoseconds(1));
584
Chavoosh Ghasemi5cb67012019-02-15 09:56:57 -0800585 face.receive(*makeDataWithSegment(0));
Ryan Wickman034f30f2018-06-06 11:11:11 -0500586 advanceClocks(io, time::nanoseconds(1));
587
588 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
589 BOOST_CHECK_EQUAL(aimdPipeline->m_segmentInfo.size(), 0);
590 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 0);
591}
592
593
Weiwei Liu245d7912016-07-28 00:04:25 -0700594BOOST_AUTO_TEST_SUITE_END() // TestPipelineInterestsAimd
595BOOST_AUTO_TEST_SUITE_END() // Chunks
596
597} // namespace tests
598} // namespace aimd
599} // namespace chunks
600} // namespace ndn