blob: 98ba9eacd5ebe8b1fc800e557f6fec370106d1c2 [file] [log] [blame]
Weiwei Liu245d7912016-07-28 00:04:25 -07001/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
Davide Pesaventoe9c69852017-11-04 18:08:37 -04002/*
Davide Pesaventof6991e12018-01-08 20:58:50 -05003 * Copyright (c) 2016-2018, Regents of the University of California,
Davide Pesavento958896e2017-01-19 00:52:04 -05004 * Colorado State University,
5 * University Pierre & Marie Curie, Sorbonne University.
Weiwei Liu245d7912016-07-28 00:04:25 -07006 *
7 * This file is part of ndn-tools (Named Data Networking Essential Tools).
8 * See AUTHORS.md for complete list of ndn-tools authors and contributors.
9 *
10 * ndn-tools is free software: you can redistribute it and/or modify it under the terms
11 * of the GNU General Public License as published by the Free Software Foundation,
12 * either version 3 of the License, or (at your option) any later version.
13 *
14 * ndn-tools is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
15 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
16 * PURPOSE. See the GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * ndn-tools, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
20 *
21 * See AUTHORS.md for complete list of ndn-cxx authors and contributors.
22 *
23 * @author Weiwei Liu
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000024 * @author Chavoosh Ghasemi
Weiwei Liu245d7912016-07-28 00:04:25 -070025 */
26
27#include "tools/chunks/catchunks/pipeline-interests-aimd.hpp"
28#include "tools/chunks/catchunks/options.hpp"
29
30#include "pipeline-interests-fixture.hpp"
31
32namespace ndn {
33namespace chunks {
34namespace aimd {
35namespace tests {
36
37using namespace ndn::tests;
38
Davide Pesaventof6991e12018-01-08 20:58:50 -050039class PipelineInterestAimdFixture : public chunks::tests::PipelineInterestsFixture
Weiwei Liu245d7912016-07-28 00:04:25 -070040{
41public:
42 PipelineInterestAimdFixture()
Davide Pesaventof6991e12018-01-08 20:58:50 -050043 : opt(makePipelineOptions())
Weiwei Liu245d7912016-07-28 00:04:25 -070044 , rttEstimator(makeRttEstimatorOptions())
45 {
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000046 createPipeline();
47 }
48
49 void
50 createPipeline()
51 {
Weiwei Liu245d7912016-07-28 00:04:25 -070052 auto pline = make_unique<PipelineInterestsAimd>(face, rttEstimator, opt);
53 aimdPipeline = pline.get();
54 setPipeline(std::move(pline));
55 }
56
57private:
Davide Pesaventof6991e12018-01-08 20:58:50 -050058 static PipelineInterestsAimd::Options
Weiwei Liu245d7912016-07-28 00:04:25 -070059 makePipelineOptions()
60 {
Davide Pesaventof6991e12018-01-08 20:58:50 -050061 PipelineInterestsAimd::Options pipelineOptions;
62 pipelineOptions.isQuiet = true;
63 pipelineOptions.isVerbose = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070064 pipelineOptions.disableCwa = false;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000065 pipelineOptions.ignoreCongMarks = false;
Weiwei Liu245d7912016-07-28 00:04:25 -070066 pipelineOptions.resetCwndToInit = false;
67 pipelineOptions.initCwnd = 1.0;
68 pipelineOptions.aiStep = 1.0;
69 pipelineOptions.mdCoef = 0.5;
70 pipelineOptions.initSsthresh = std::numeric_limits<int>::max();
71 return pipelineOptions;
72 }
73
74 static RttEstimator::Options
75 makeRttEstimatorOptions()
76 {
77 RttEstimator::Options rttOptions;
78 rttOptions.alpha = 0.125;
79 rttOptions.beta = 0.25;
80 rttOptions.k = 4;
81 rttOptions.minRto = Milliseconds(200);
82 rttOptions.maxRto = Milliseconds(4000);
83 return rttOptions;
84 }
85
86protected:
Davide Pesaventof6991e12018-01-08 20:58:50 -050087 PipelineInterestsAimd::Options opt;
Weiwei Liu245d7912016-07-28 00:04:25 -070088 RttEstimator rttEstimator;
89 PipelineInterestsAimd* aimdPipeline;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000090 static constexpr double MARGIN = 0.01;
Weiwei Liu245d7912016-07-28 00:04:25 -070091};
92
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +000093constexpr double PipelineInterestAimdFixture::MARGIN;
94
Weiwei Liu245d7912016-07-28 00:04:25 -070095BOOST_AUTO_TEST_SUITE(Chunks)
96BOOST_FIXTURE_TEST_SUITE(TestPipelineInterestsAimd, PipelineInterestAimdFixture)
97
98BOOST_AUTO_TEST_CASE(SlowStart)
99{
100 nDataSegments = 4;
101 aimdPipeline->m_ssthresh = 8.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000102 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700103
104 double preCwnd = aimdPipeline->m_cwnd;
105 runWithData(*makeDataWithSegment(0));
106 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400107 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700108
109 for (uint64_t i = 1; i < nDataSegments - 1; ++i) {
110 face.receive(*makeDataWithSegment(i));
111 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000112 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700113 preCwnd = aimdPipeline->m_cwnd;
114 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400115
116 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700117}
118
119BOOST_AUTO_TEST_CASE(CongestionAvoidance)
120{
121 nDataSegments = 8;
122 aimdPipeline->m_ssthresh = 4.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000123 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700124
125 double preCwnd = aimdPipeline->m_cwnd;
126 runWithData(*makeDataWithSegment(0));
127 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400128 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700129
130 for (uint64_t i = 1; i < aimdPipeline->m_ssthresh; ++i) { // slow start
131 face.receive(*makeDataWithSegment(i));
132 advanceClocks(io, time::nanoseconds(1));
133 preCwnd = aimdPipeline->m_cwnd;
134 }
135
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000136 BOOST_CHECK_CLOSE(preCwnd, aimdPipeline->m_ssthresh, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700137
138 for (uint64_t i = aimdPipeline->m_ssthresh; i < nDataSegments - 1; ++i) { // congestion avoidance
139 face.receive(*makeDataWithSegment(i));
140 advanceClocks(io, time::nanoseconds(1));
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000141 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd - preCwnd, opt.aiStep / floor(aimdPipeline->m_cwnd), MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700142 preCwnd = aimdPipeline->m_cwnd;
143 }
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400144
145 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments - 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700146}
147
148BOOST_AUTO_TEST_CASE(Timeout)
149{
150 nDataSegments = 8;
151 aimdPipeline->m_ssthresh = 4.0;
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000152 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
Weiwei Liu245d7912016-07-28 00:04:25 -0700153
154 runWithData(*makeDataWithSegment(0));
155 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400156 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
Weiwei Liu245d7912016-07-28 00:04:25 -0700157
158 // receive segment 1 and segment 2
159 for (uint64_t i = 1; i < 3; ++i) {
160 face.receive(*makeDataWithSegment(i));
161 advanceClocks(io, time::nanoseconds(1));
162 }
163
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400164 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 3);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000165 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 3, MARGIN);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400166 BOOST_CHECK_EQUAL(face.sentInterests.size(), 5); // request for segment 5 has been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700167
168 advanceClocks(io, time::milliseconds(100));
169
170 // receive segment 4
171 face.receive(*makeDataWithSegment(4));
172 advanceClocks(io, time::nanoseconds(1));
173
174 // receive segment 5
175 face.receive(*makeDataWithSegment(5));
176 advanceClocks(io, time::nanoseconds(1));
177
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400178 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000179 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.25, MARGIN);
180 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments - 1); // all the segment requests have been sent
Weiwei Liu245d7912016-07-28 00:04:25 -0700181
182 // timeout segment 3
183 advanceClocks(io, time::milliseconds(150));
184
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400185 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000186 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.125, MARGIN); // window size drop to 1/2 of previous size
Weiwei Liu245d7912016-07-28 00:04:25 -0700187 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 1);
188
189 // receive segment 6, retransmit 3
190 face.receive(*makeDataWithSegment(6));
191 advanceClocks(io, time::nanoseconds(1));
192
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400193 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000194 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.625, MARGIN); // congestion avoidance
Weiwei Liu245d7912016-07-28 00:04:25 -0700195 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
196 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
197}
198
Chavoosh Ghasemi641f5932017-11-06 22:45:11 +0000199BOOST_AUTO_TEST_CASE(CongestionMarksWithCwa)
200{
201 nDataSegments = 7;
202 aimdPipeline->m_ssthresh = 4.0;
203 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
204
205 runWithData(*makeDataWithSegment(0));
206 advanceClocks(io, time::nanoseconds(1));
207 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
208
209 // receive segments 1 to 4
210 for (uint64_t i = 1; i < 5; ++i) {
211 face.receive(*makeDataWithSegment(i));
212 advanceClocks(io, time::nanoseconds(1));
213 }
214
215 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
216 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.25, MARGIN);
217
218 // receive segment 5 with congestion mark
219 face.receive(*makeDataWithSegmentAndCongMark(5));
220 advanceClocks(io, time::nanoseconds(1));
221
222 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
223 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.125, MARGIN); // window size drops to 1/2 of previous size
224 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments - 1); // all interests have been sent
225
226 // receive the last segment with congestion mark
227 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
228 advanceClocks(io, time::nanoseconds(1));
229
230 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
231 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.125, MARGIN); // conservative window adaption (window size should not decrease)
232 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
233
234 // make sure no interest is retransmitted for marked data packets
235 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
236 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
237
238 // check number of received marked data packets
239 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
240}
241
242BOOST_AUTO_TEST_CASE(CongestionMarksWithoutCwa)
243{
244 opt.disableCwa = true;
245 createPipeline();
246
247 nDataSegments = 7;
248 aimdPipeline->m_ssthresh = 4.0;
249 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
250
251 runWithData(*makeDataWithSegment(0));
252 advanceClocks(io, time::nanoseconds(1));
253 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
254
255 // receive segments 1 to 4
256 for (uint64_t i = 1; i < 5; ++i) {
257 face.receive(*makeDataWithSegment(i));
258 advanceClocks(io, time::nanoseconds(1));
259 }
260
261 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 5);
262 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.25, MARGIN);
263
264 // receive segment 5 with congestion mark
265 face.receive(*makeDataWithSegmentAndCongMark(5));
266 advanceClocks(io, time::nanoseconds(1));
267
268 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
269 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 2.125, MARGIN); // window size drops to 1/2 of previous size
270 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments - 1); // all interests have been sent
271
272 // receive the last segment with congestion mark
273 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
274 advanceClocks(io, time::nanoseconds(1));
275
276 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
277 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, PipelineInterestsAimd::MIN_SSTHRESH,
278 MARGIN); // window size should decrease, as cwa is disabled
279 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
280
281 // make sure no interest is retransmitted for marked data packets
282 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[5], 0);
283 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
284
285 // check number of received marked data packets
286 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 2);
287}
288
289BOOST_AUTO_TEST_CASE(IgnoreCongestionMarks)
290{
291 opt.ignoreCongMarks = true;
292 createPipeline();
293
294 nDataSegments = 7;
295 aimdPipeline->m_ssthresh = 4.0;
296 BOOST_REQUIRE_CLOSE(aimdPipeline->m_cwnd, 1, MARGIN);
297
298 runWithData(*makeDataWithSegment(0));
299 advanceClocks(io, time::nanoseconds(1));
300 BOOST_CHECK_EQUAL(face.sentInterests.size(), 1);
301
302 // receive segments 1 to 5
303 for (uint64_t i = 1; i < 6; ++i) {
304 face.receive(*makeDataWithSegment(i));
305 advanceClocks(io, time::nanoseconds(1));
306 }
307
308 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 6);
309 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.5, MARGIN);
310 BOOST_CHECK_EQUAL(face.sentInterests.size(), nDataSegments - 1); // all interests have been sent
311
312 // receive the last segment with congestion mark
313 face.receive(*makeDataWithSegmentAndCongMark(nDataSegments - 1));
314 advanceClocks(io, time::nanoseconds(1));
315
316 BOOST_CHECK_EQUAL(pipeline->m_nReceived, nDataSegments);
317 BOOST_CHECK_CLOSE(aimdPipeline->m_cwnd, 4.75, MARGIN); // window size increases
318 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
319
320 // make sure no interest is retransmitted for marked data packet
321 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[nDataSegments - 1], 0);
322
323 // check number of received marked data packets
324 BOOST_CHECK_EQUAL(aimdPipeline->m_nCongMarks, 1);
325}
326
Weiwei Liu245d7912016-07-28 00:04:25 -0700327BOOST_AUTO_TEST_CASE(Nack)
328{
329 nDataSegments = 5;
330 aimdPipeline->m_cwnd = 10.0;
331 runWithData(*makeDataWithSegment(0));
332 advanceClocks(io, time::nanoseconds(1));
333
334 face.receive(*makeDataWithSegment(1));
335 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400336
337 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700338 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 4);
339
340 // receive a nack with NackReason::DUPLICATE for segment 2
341 auto nack1 = makeNack(face.sentInterests[1], lp::NackReason::DUPLICATE);
342 face.receive(nack1);
343 advanceClocks(io, time::nanoseconds(1));
344
345 // nack1 is ignored
346 BOOST_CHECK_EQUAL(hasFailed, false);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400347 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700348 BOOST_CHECK_EQUAL(aimdPipeline->m_retxQueue.size(), 0);
349
350 // receive a nack with NackReason::CONGESTION for segment 3
351 auto nack2 = makeNack(face.sentInterests[2], lp::NackReason::CONGESTION);
352 face.receive(nack2);
353 advanceClocks(io, time::nanoseconds(1));
354
355 // segment 3 is retransmitted
356 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
357
358 // receive a nack with NackReason::NONE for segment 4
359 auto nack3 = makeNack(face.sentInterests[3], lp::NackReason::NONE);
360 face.receive(nack3);
361 advanceClocks(io, time::nanoseconds(1));
362
363 // Other types of Nack will trigger a failure
364 BOOST_CHECK_EQUAL(hasFailed, true);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400365 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700366}
367
368BOOST_AUTO_TEST_CASE(FinalBlockIdNotSetAtBeginning)
369{
370 nDataSegments = 4;
371 aimdPipeline->m_cwnd = 4;
372 runWithData(*makeDataWithSegment(0, false));
373 advanceClocks(io, time::nanoseconds(1));
374
375 // receive segment 1 without FinalBlockId
376 face.receive(*makeDataWithSegment(1, false));
377 advanceClocks(io, time::nanoseconds(1));
378
379 // interests for segment 1 - 6 have been sent
380 BOOST_CHECK_EQUAL(face.sentInterests.size(), 6);
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400381 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 2);
Weiwei Liu245d7912016-07-28 00:04:25 -0700382 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, false);
383 // pending interests: segment 2, 3, 4, 5, 6
384 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 5);
385
386 // receive segment 2 with FinalBlockId
387 face.receive(*makeDataWithSegment(2));
388 advanceClocks(io, time::nanoseconds(1));
Davide Pesaventoe9c69852017-11-04 18:08:37 -0400389 BOOST_CHECK_EQUAL(pipeline->m_nReceived, 3);
Weiwei Liu245d7912016-07-28 00:04:25 -0700390 BOOST_CHECK_EQUAL(aimdPipeline->m_hasFinalBlockId, true);
391
392 // pending interests for segment 2, 4, 5, 6 haven been removed
393 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 1);
394}
395
396BOOST_AUTO_TEST_CASE(FailureBeforeFinalBlockIdReceived)
397{
398 // failed to retrieve segNo while the FinalBlockId has not yet been
399 // set, and later received a FinalBlockId >= segNo, i.e. segNo is
400 // part of the content.
401
402 nDataSegments = 4;
403 aimdPipeline->m_cwnd = 4;
404 runWithData(*makeDataWithSegment(0, false));
405 advanceClocks(io, time::nanoseconds(1));
406
407 // receive segment 1 without FinalBlockId
408 face.receive(*makeDataWithSegment(1, false));
409 advanceClocks(io, time::nanoseconds(1));
410 // interests for segment 1 - 6 have been sent
411 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 6);
412
413 // receive nack with NackReason::NONE for segment 3
414 auto nack = makeNack(face.sentInterests[2], lp::NackReason::NONE);
415 face.receive(nack);
416 advanceClocks(io, time::nanoseconds(1));
417
418 // error not triggered
419 // pending interests for segment > 3 haven been removed
420 BOOST_CHECK_EQUAL(hasFailed, false);
421 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 1);
422
423 // receive segment 2 with FinalBlockId
424 face.receive(*makeDataWithSegment(2));
425 advanceClocks(io, time::nanoseconds(1));
426
427 // error triggered since segment 3 is part of the content
428 BOOST_CHECK_EQUAL(hasFailed, true);
429}
430
431BOOST_AUTO_TEST_CASE(SpuriousFailureBeforeFinalBlockIdReceived)
432{
433 // failed to retrieve segNo while the FinalBlockId has not yet been
434 // set, and later received a FinalBlockId < segNo, i.e. segNo is
435 // not part of the content, and it was actually a spurious failure
436
437 nDataSegments = 4;
438 aimdPipeline->m_cwnd = 4;
439 runWithData(*makeDataWithSegment(0, false));
440 advanceClocks(io, time::nanoseconds(1));
441
442 // receive segment 1 without FinalBlockId
443 face.receive(*makeDataWithSegment(1, false));
444 advanceClocks(io, time::nanoseconds(1));
445 // interests for segment 1 - 6 have been sent
446 BOOST_REQUIRE_EQUAL(face.sentInterests.size(), 6);
447
448 // receive nack with NackReason::NONE for segment 4
Davide Pesavento958896e2017-01-19 00:52:04 -0500449 auto nack = makeNack(face.sentInterests[3], lp::NackReason::NONE);
Weiwei Liu245d7912016-07-28 00:04:25 -0700450 face.receive(nack);
451 advanceClocks(io, time::nanoseconds(1));
452
453 // error not triggered
Davide Pesavento958896e2017-01-19 00:52:04 -0500454 // pending interests for segment > 4 have been removed
Weiwei Liu245d7912016-07-28 00:04:25 -0700455 BOOST_CHECK_EQUAL(hasFailed, false);
456 BOOST_CHECK_EQUAL(face.getNPendingInterests(), 2);
457
458 // receive segment 2 with FinalBlockId
459 face.receive(*makeDataWithSegment(2));
460 advanceClocks(io, time::nanoseconds(1));
461
462 // timeout segment 3
Davide Pesavento958896e2017-01-19 00:52:04 -0500463 advanceClocks(io, time::seconds(1));
Weiwei Liu245d7912016-07-28 00:04:25 -0700464
465 // segment 3 is retransmitted
466 BOOST_CHECK_EQUAL(aimdPipeline->m_retxCount[3], 1);
467
468 // receive segment 3
469 face.receive(*makeDataWithSegment(3));
470 advanceClocks(io, time::nanoseconds(1));
471
472 BOOST_CHECK_EQUAL(hasFailed, false);
473}
474
475BOOST_AUTO_TEST_SUITE_END() // TestPipelineInterestsAimd
476BOOST_AUTO_TEST_SUITE_END() // Chunks
477
478} // namespace tests
479} // namespace aimd
480} // namespace chunks
481} // namespace ndn