Remove dependency on Selectors and refactor codebase.
Change-Id: Ic3024b76ba0eea61f790c91c36090b4aa68702a3
Refs: #4522
diff --git a/src/handles/command-base-handle.cpp b/src/handles/command-base-handle.cpp
index 1717839..8f2c6d0 100644
--- a/src/handles/command-base-handle.cpp
+++ b/src/handles/command-base-handle.cpp
@@ -32,7 +32,7 @@
static ndn::optional<std::string>
getSignerFromTag(const ndn::Interest& interest)
{
- shared_ptr<SignerTag> signerTag = interest.getTag<SignerTag>();
+ std::shared_ptr<SignerTag> signerTag = interest.getTag<SignerTag>();
if (signerTag == nullptr) {
return ndn::nullopt;
}
@@ -62,13 +62,10 @@
auto signer1 = getSignerFromTag(request);
std::string signer = signer1.value_or("*");
- //_LOG_DEBUG("accept " << request->getName() << " signer=" << signer);
accept(signer);
},
[reject] (const ndn::Interest& request,
const ndn::security::v2::ValidationError& error) {
- //_LOG_DEBUG("reject " << request->getName() << " signer=" <<
- // getSignerFromTag(*request).value_or("?") << ' ' << failureInfo);
reject(ndn::mgmt::RejectReply::STATUS403);
});
};
diff --git a/src/handles/delete-handle.cpp b/src/handles/delete-handle.cpp
index dc0f5f1..c892583 100644
--- a/src/handles/delete-handle.cpp
+++ b/src/handles/delete-handle.cpp
@@ -19,8 +19,12 @@
#include "delete-handle.hpp"
+#include <ndn-cxx/util/logger.hpp>
+
namespace repo {
+NDN_LOG_INIT(repo.DeleteHandle);
+
DeleteHandle::DeleteHandle(Face& face, RepoStorage& storageHandle,
ndn::mgmt::Dispatcher& dispatcher, Scheduler& scheduler,
Validator& validator)
@@ -40,12 +44,6 @@
{
const RepoCommandParameter& repoParameter = dynamic_cast<const RepoCommandParameter&>(parameter);
- if (repoParameter.hasSelectors()) {
- //choose data with selector and delete it
- processSelectorDeleteCommand(interest, repoParameter, done);
- return;
- }
-
if (!repoParameter.hasStartBlockId() && !repoParameter.hasEndBlockId()) {
processSingleDeleteCommand(interest, repoParameter, done);
return;
@@ -87,7 +85,7 @@
{
int64_t nDeletedData = storageHandle.deleteData(parameter.getName());
if (nDeletedData == -1) {
- std::cerr << "Deletion Failed!" <<std::endl;
+ NDN_LOG_DEBUG("Deletion Failed");
done(negativeReply(interest, 405, "Deletion Failed"));
}
else
@@ -95,20 +93,6 @@
}
void
-DeleteHandle::processSelectorDeleteCommand(const Interest& interest, const RepoCommandParameter& parameter,
- const ndn::mgmt::CommandContinuation& done) const
-{
- int64_t nDeletedData = storageHandle.deleteData(Interest(parameter.getName())
- .setSelectors(parameter.getSelectors()));
- if (nDeletedData == -1) {
- std::cerr << "Deletion Failed!" <<std::endl;
- done(negativeReply(interest, 405, "Deletion Failed"));
- }
- else
- done(positiveReply(interest, parameter, 200, nDeletedData));
-}
-
-void
DeleteHandle::processSegmentDeleteCommand(const Interest& interest, const RepoCommandParameter& parameter,
const ndn::mgmt::CommandContinuation& done) const
{
diff --git a/src/handles/delete-handle.hpp b/src/handles/delete-handle.hpp
index fbb25d3..ca32709 100644
--- a/src/handles/delete-handle.hpp
+++ b/src/handles/delete-handle.hpp
@@ -62,10 +62,6 @@
const ndn::mgmt::CommandContinuation& done) const;
void
- processSelectorDeleteCommand(const Interest& interest, const RepoCommandParameter& parameter,
- const ndn::mgmt::CommandContinuation& done) const;
-
- void
processSegmentDeleteCommand(const Interest& interest, const RepoCommandParameter& parameter,
const ndn::mgmt::CommandContinuation& done) const;
};
diff --git a/src/handles/read-handle.cpp b/src/handles/read-handle.cpp
index 44a9d18..dbdfef7 100644
--- a/src/handles/read-handle.cpp
+++ b/src/handles/read-handle.cpp
@@ -20,8 +20,12 @@
#include "read-handle.hpp"
#include "repo.hpp"
+#include <ndn-cxx/util/logger.hpp>
+
namespace repo {
+NDN_LOG_INIT(repo.ReadHandle);
+
ReadHandle::ReadHandle(Face& face, RepoStorage& storageHandle, size_t prefixSubsetLength)
: m_prefixSubsetLength(prefixSubsetLength)
, m_face(face)
@@ -35,11 +39,11 @@
{
// Connect a RepoStorage's signals to the read handle
if (m_prefixSubsetLength != RepoConfig::DISABLED_SUBSET_LENGTH) {
- afterDataDeletionConnection = m_storageHandle.afterDataInsertion.connect(
+ afterDataInsertionConnection = m_storageHandle.afterDataInsertion.connect(
[this] (const Name& prefix) {
onDataInserted(prefix);
});
- afterDataInsertionConnection = m_storageHandle.afterDataDeletion.connect(
+ afterDataDeletionConnection = m_storageHandle.afterDataDeletion.connect(
[this] (const Name& prefix) {
onDataDeleted(prefix);
});
@@ -49,16 +53,21 @@
void
ReadHandle::onInterest(const Name& prefix, const Interest& interest)
{
- shared_ptr<ndn::Data> data = m_storageHandle.readData(interest);
+ NDN_LOG_DEBUG("Received Interest " << interest.getName());
+ std::shared_ptr<ndn::Data> data = m_storageHandle.readData(interest);
if (data != nullptr) {
- m_face.put(*data);
+ NDN_LOG_DEBUG("Put Data: " << *data);
+ m_face.put(*data);
+ }
+ else {
+ NDN_LOG_DEBUG("No data for " << interest.getName());
}
}
void
ReadHandle::onRegisterFailed(const Name& prefix, const std::string& reason)
{
- std::cerr << "ERROR: Failed to register prefix in local hub's daemon" << std::endl;
+ NDN_LOG_ERROR("ERROR: Failed to register prefix in local hub's daemon");
m_face.shutdown();
}
@@ -67,8 +76,8 @@
{
ndn::InterestFilter filter(prefix);
m_face.setInterestFilter(filter,
- bind(&ReadHandle::onInterest, this, _1, _2),
- bind(&ReadHandle::onRegisterFailed, this, _1, _2));
+ std::bind(&ReadHandle::onInterest, this, _1, _2),
+ std::bind(&ReadHandle::onRegisterFailed, this, _1, _2));
}
void
diff --git a/src/handles/read-handle.hpp b/src/handles/read-handle.hpp
index a243abd..bdd6b6f 100644
--- a/src/handles/read-handle.hpp
+++ b/src/handles/read-handle.hpp
@@ -54,7 +54,7 @@
}
/**
- * @param after Do something after actually removing a prefix
+ * @param name Full name of the deleted Data
*/
void
onDataDeleted(const Name& name);
diff --git a/src/handles/tcp-bulk-insert-handle.cpp b/src/handles/tcp-bulk-insert-handle.cpp
index 53a60d6..f27d173 100644
--- a/src/handles/tcp-bulk-insert-handle.cpp
+++ b/src/handles/tcp-bulk-insert-handle.cpp
@@ -1,6 +1,6 @@
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/**
- * Copyright (c) 2014-2017, Regents of the University of California.
+ * Copyright (c) 2014-2018, Regents of the University of California.
*
* This file is part of NDN repo-ng (Next generation of NDN repository).
* See AUTHORS.md for complete list of repo-ng authors and contributors.
@@ -19,8 +19,12 @@
#include "tcp-bulk-insert-handle.hpp"
+#include <ndn-cxx/util/logger.hpp>
+
namespace repo {
+NDN_LOG_INIT(repo.tcpHandle);
+
const size_t MAX_NDN_PACKET_SIZE = 8800;
namespace detail {
@@ -29,7 +33,7 @@
{
public:
TcpBulkInsertClient(TcpBulkInsertHandle& writer,
- const shared_ptr<boost::asio::ip::tcp::socket>& socket)
+ const std::shared_ptr<boost::asio::ip::tcp::socket>& socket)
: m_writer(writer)
, m_socket(socket)
, m_hasStarted(false)
@@ -38,13 +42,13 @@
}
static void
- startReceive(const shared_ptr<TcpBulkInsertClient>& client)
+ startReceive(const std::shared_ptr<TcpBulkInsertClient>& client)
{
BOOST_ASSERT(!client->m_hasStarted);
client->m_socket->async_receive(
boost::asio::buffer(client->m_inputBuffer, MAX_NDN_PACKET_SIZE), 0,
- bind(&TcpBulkInsertClient::handleReceive, client, _1, _2, client));
+ std::bind(&TcpBulkInsertClient::handleReceive, client, _1, _2, client));
client->m_hasStarted = true;
}
@@ -53,11 +57,11 @@
void
handleReceive(const boost::system::error_code& error,
std::size_t nBytesReceived,
- const shared_ptr<TcpBulkInsertClient>& client);
+ const std::shared_ptr<TcpBulkInsertClient>& client);
private:
TcpBulkInsertHandle& m_writer;
- shared_ptr<boost::asio::ip::tcp::socket> m_socket;
+ std::shared_ptr<boost::asio::ip::tcp::socket> m_socket;
bool m_hasStarted;
uint8_t m_inputBuffer[MAX_NDN_PACKET_SIZE];
std::size_t m_inputBufferSize;
@@ -87,22 +91,19 @@
BOOST_THROW_EXCEPTION(Error("Cannot listen on [" + host + ":" + port + "]"));
m_localEndpoint = *endpoint;
- std::cerr << "Start listening on " << m_localEndpoint << std::endl;
+ NDN_LOG_DEBUG("Start listening on " << m_localEndpoint);
m_acceptor.open(m_localEndpoint .protocol());
m_acceptor.set_option(ip::tcp::acceptor::reuse_address(true));
- if (m_localEndpoint.address().is_v6())
- {
- m_acceptor.set_option(ip::v6_only(true));
- }
+ if (m_localEndpoint.address().is_v6()) {
+ m_acceptor.set_option(ip::v6_only(true));
+ }
m_acceptor.bind(m_localEndpoint);
m_acceptor.listen(255);
- shared_ptr<ip::tcp::socket> clientSocket =
- make_shared<ip::tcp::socket>(std::ref(m_acceptor.get_io_service()));
+ auto clientSocket = std::make_shared<ip::tcp::socket>(m_acceptor.get_io_service());
m_acceptor.async_accept(*clientSocket,
- bind(&TcpBulkInsertHandle::handleAccept, this, _1,
- clientSocket));
+ std::bind(&TcpBulkInsertHandle::handleAccept, this, _1, clientSocket));
}
void
@@ -114,45 +115,40 @@
void
TcpBulkInsertHandle::handleAccept(const boost::system::error_code& error,
- const shared_ptr<boost::asio::ip::tcp::socket>& socket)
+ const std::shared_ptr<boost::asio::ip::tcp::socket>& socket)
{
using namespace boost::asio;
if (error) {
- // if (error == boost::system::errc::operation_canceled) // when socket is closed by someone
- // return;
return;
}
- std::cerr << "New connection from " << socket->remote_endpoint() << std::endl;
+ NDN_LOG_DEBUG("New connection from " << socket->remote_endpoint());
- shared_ptr<detail::TcpBulkInsertClient> client =
- make_shared<detail::TcpBulkInsertClient>(std::ref(*this), socket);
+ std::shared_ptr<detail::TcpBulkInsertClient> client =
+ std::make_shared<detail::TcpBulkInsertClient>(*this, socket);
detail::TcpBulkInsertClient::startReceive(client);
// prepare accepting the next connection
- shared_ptr<ip::tcp::socket> clientSocket =
- make_shared<ip::tcp::socket>(std::ref(m_acceptor.get_io_service()));
+ auto clientSocket = std::make_shared<ip::tcp::socket>(m_acceptor.get_io_service());
m_acceptor.async_accept(*clientSocket,
- bind(&TcpBulkInsertHandle::handleAccept, this, _1,
- clientSocket));
+ std::bind(&TcpBulkInsertHandle::handleAccept, this, _1, clientSocket));
}
void
detail::TcpBulkInsertClient::handleReceive(const boost::system::error_code& error,
std::size_t nBytesReceived,
- const shared_ptr<detail::TcpBulkInsertClient>& client)
+ const std::shared_ptr<detail::TcpBulkInsertClient>& client)
{
- if (error)
- {
- if (error == boost::system::errc::operation_canceled) // when socket is closed by someone
- return;
-
- boost::system::error_code error;
- m_socket->shutdown(boost::asio::ip::tcp::socket::shutdown_both, error);
- m_socket->close(error);
+ if (error) {
+ if (error == boost::system::errc::operation_canceled) // when socket is closed by someone
return;
- }
+
+ boost::system::error_code error;
+ m_socket->shutdown(boost::asio::ip::tcp::socket::shutdown_both, error);
+ m_socket->close(error);
+ return;
+ }
m_inputBufferSize += nBytesReceived;
@@ -175,42 +171,37 @@
Data data(element);
bool isInserted = m_writer.getStorageHandle().insertData(data);
if (isInserted)
- std::cerr << "Successfully injected " << data.getName() << std::endl;
+ NDN_LOG_DEBUG("Successfully injected " << data.getName());
else
- std::cerr << "FAILED to inject " << data.getName() << std::endl;
+ NDN_LOG_DEBUG("FAILED to inject " << data.getName());
}
catch (const std::runtime_error& error) {
/// \todo Catch specific error after determining what wireDecode() can throw
- std::cerr << "Error decoding received Data packet" << std::endl;
+ NDN_LOG_ERROR("Error decoding received Data packet");
}
}
}
- if (!isOk && m_inputBufferSize == MAX_NDN_PACKET_SIZE && offset == 0)
- {
- boost::system::error_code error;
- m_socket->shutdown(boost::asio::ip::tcp::socket::shutdown_both, error);
- m_socket->close(error);
- return;
- }
+ if (!isOk && m_inputBufferSize == MAX_NDN_PACKET_SIZE && offset == 0) {
+ boost::system::error_code error;
+ m_socket->shutdown(boost::asio::ip::tcp::socket::shutdown_both, error);
+ m_socket->close(error);
+ return;
+ }
- if (offset > 0)
- {
- if (offset != m_inputBufferSize)
- {
- std::copy(m_inputBuffer + offset, m_inputBuffer + m_inputBufferSize,
- m_inputBuffer);
- m_inputBufferSize -= offset;
- }
- else
- {
- m_inputBufferSize = 0;
- }
+ if (offset > 0) {
+ if (offset != m_inputBufferSize) {
+ std::copy(m_inputBuffer + offset, m_inputBuffer + m_inputBufferSize, m_inputBuffer);
+ m_inputBufferSize -= offset;
}
+ else {
+ m_inputBufferSize = 0;
+ }
+ }
m_socket->async_receive(boost::asio::buffer(m_inputBuffer + m_inputBufferSize,
MAX_NDN_PACKET_SIZE - m_inputBufferSize), 0,
- bind(&TcpBulkInsertClient::handleReceive, this, _1, _2, client));
+ std::bind(&TcpBulkInsertClient::handleReceive, this, _1, _2, client));
}
diff --git a/src/handles/watch-handle.cpp b/src/handles/watch-handle.cpp
deleted file mode 100644
index 105e802..0000000
--- a/src/handles/watch-handle.cpp
+++ /dev/null
@@ -1,298 +0,0 @@
-/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
-/*
- * Copyright (c) 2014-2018, Regents of the University of California.
- *
- * This file is part of NDN repo-ng (Next generation of NDN repository).
- * See AUTHORS.md for complete list of repo-ng authors and contributors.
- *
- * repo-ng is free software: you can redistribute it and/or modify it under the terms
- * of the GNU General Public License as published by the Free Software Foundation,
- * either version 3 of the License, or (at your option) any later version.
- *
- * repo-ng is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * repo-ng, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "watch-handle.hpp"
-
-namespace repo {
-
-static const milliseconds PROCESS_DELETE_TIME(10000_ms);
-static const milliseconds DEFAULT_INTEREST_LIFETIME(4000_ms);
-
-WatchHandle::WatchHandle(Face& face, RepoStorage& storageHandle,
- ndn::mgmt::Dispatcher& dispatcher, Scheduler& scheduler, Validator& validator)
- : CommandBaseHandle(face, storageHandle, scheduler, validator)
- , m_validator(validator)
- , m_interestNum(0)
- , m_maxInterestNum(0)
- , m_interestLifetime(DEFAULT_INTEREST_LIFETIME)
- , m_watchTimeout(0)
- , m_startTime(steady_clock::now())
- , m_size(0)
-{
- dispatcher.addControlCommand<RepoCommandParameter>(ndn::PartialName("watch").append("start"),
- makeAuthorization(),
- std::bind(&WatchHandle::validateParameters<WatchStartCommand>, this, _1),
- std::bind(&WatchHandle::handleStartCommand, this, _1, _2, _3, _4));
-
- dispatcher.addControlCommand<RepoCommandParameter>(ndn::PartialName("watch").append("check"),
- makeAuthorization(),
- std::bind(&WatchHandle::validateParameters<WatchCheckCommand>, this, _1),
- std::bind(&WatchHandle::handleCheckCommand, this, _1, _2, _3, _4));
-
- dispatcher.addControlCommand<RepoCommandParameter>(ndn::PartialName("watch").append("stop"),
- makeAuthorization(),
- std::bind(&WatchHandle::validateParameters<WatchStopCommand>, this, _1),
- std::bind(&WatchHandle::handleStopCommand, this, _1, _2, _3, _4));
-}
-
-void
-WatchHandle::deleteProcess(const Name& name)
-{
- m_processes.erase(name);
-}
-
-void
-WatchHandle::handleStartCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameter,
- const ndn::mgmt::CommandContinuation& done)
-{
- const RepoCommandParameter& repoParameter = dynamic_cast<const RepoCommandParameter&>(parameter);
- processWatchCommand(interest, repoParameter, done);
-}
-
-void WatchHandle::watchStop(const Name& name)
-{
- m_processes[name].second = false;
- m_maxInterestNum = 0;
- m_interestNum = 0;
- m_startTime = steady_clock::now();
- m_watchTimeout = 0_ms;
- m_interestLifetime = DEFAULT_INTEREST_LIFETIME;
- m_size = 0;
-}
-
-
-void
-WatchHandle::onData(const Interest& interest, const ndn::Data& data, const Name& name)
-{
- m_validator.validate(data,
- bind(&WatchHandle::onDataValidated, this, interest, _1, name),
- bind(&WatchHandle::onDataValidationFailed, this, interest, _1, _2, name));
-}
-
-void
-WatchHandle::onDataValidated(const Interest& interest, const Data& data, const Name& name)
-{
- if (!m_processes[name].second) {
- return;
- }
- if (storageHandle.insertData(data)) {
- m_size++;
- if (!onRunning(name))
- return;
-
- Interest fetchInterest(interest.getName());
- fetchInterest.setSelectors(interest.getSelectors());
- fetchInterest.setInterestLifetime(m_interestLifetime);
- fetchInterest.setChildSelector(1);
-
- // update selectors
- // if data name is equal to interest name, use MinSuffixComponents selecor to exclude this data
- if (data.getName().size() == interest.getName().size()) {
- fetchInterest.setMinSuffixComponents(2);
- }
- else {
- Exclude exclude;
- if (!interest.getExclude().empty()) {
- exclude = interest.getExclude();
- }
-
- exclude.excludeBefore(data.getName()[interest.getName().size()]);
- fetchInterest.setExclude(exclude);
- }
-
- ++m_interestNum;
- face.expressInterest(fetchInterest,
- bind(&WatchHandle::onData, this, _1, _2, name),
- bind(&WatchHandle::onTimeout, this, _1, name), // Nack
- bind(&WatchHandle::onTimeout, this, _1, name));
- }
- else {
- BOOST_THROW_EXCEPTION(Error("Insert into Repo Failed"));
- }
- m_processes[name].first.setInsertNum(m_size);
-}
-
-void
-WatchHandle::onDataValidationFailed(const Interest& interest, const Data& data,
- const ValidationError& error, const Name& name)
-{
- std::cerr << error << std::endl;
- if (!m_processes[name].second) {
- return;
- }
- if (!onRunning(name))
- return;
-
- Interest fetchInterest(interest.getName());
- fetchInterest.setSelectors(interest.getSelectors());
- fetchInterest.setInterestLifetime(m_interestLifetime);
- fetchInterest.setChildSelector(1);
-
- // update selectors
- // if data name is equal to interest name, use MinSuffixComponents selecor to exclude this data
- if (data.getName().size() == interest.getName().size()) {
- fetchInterest.setMinSuffixComponents(2);
- }
- else {
- Exclude exclude;
- if (!interest.getExclude().empty()) {
- exclude = interest.getExclude();
- }
- // Only exclude this data since other data whose names are smaller may be validated and satisfied
- exclude.excludeBefore(data.getName()[interest.getName().size()]);
- fetchInterest.setExclude(exclude);
- }
-
- ++m_interestNum;
- face.expressInterest(fetchInterest,
- bind(&WatchHandle::onData, this, _1, _2, name),
- bind(&WatchHandle::onTimeout, this, _1, name), // Nack
- bind(&WatchHandle::onTimeout, this, _1, name));
-}
-
-void
-WatchHandle::onTimeout(const ndn::Interest& interest, const Name& name)
-{
- std::cerr << "Timeout" << std::endl;
- if (!m_processes[name].second) {
- return;
- }
- if (!onRunning(name))
- return;
- // selectors do not need to be updated
- Interest fetchInterest(interest.getName());
- fetchInterest.setSelectors(interest.getSelectors());
- fetchInterest.setInterestLifetime(m_interestLifetime);
- fetchInterest.setChildSelector(1);
-
- ++m_interestNum;
- face.expressInterest(fetchInterest,
- bind(&WatchHandle::onData, this, _1, _2, name),
- bind(&WatchHandle::onTimeout, this, _1, name), // Nack
- bind(&WatchHandle::onTimeout, this, _1, name));
-
-}
-
-void
-WatchHandle::handleStopCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameter,
- const ndn::mgmt::CommandContinuation& done)
-{
- const RepoCommandParameter& repoParameter = dynamic_cast<const RepoCommandParameter&>(parameter);
-
- watchStop(repoParameter.getName());
- std::string text = "Watched Prefix Insertion for prefix (" + prefix.toUri() + ") is stop.";
- return done(RepoCommandResponse(101, text));
-}
-
-void
-WatchHandle::handleCheckCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameter,
- const ndn::mgmt::CommandContinuation& done)
-{
- const RepoCommandParameter& repoParameter = dynamic_cast<const RepoCommandParameter&>(parameter);
-
- //check whether this process exists
- Name name = repoParameter.getName();
- if (m_processes.count(name) == 0) {
- std::cerr << "no such process name: " << name << std::endl;
- RepoCommandResponse response(404, "No such process is in progress");
- response.setBody(response.wireEncode());
- return done(response);
- }
-
- RepoCommandResponse& response = m_processes[name].first;
-
- if (!m_processes[name].second) {
- response.setCode(101);
- }
-
- return done(response);
-}
-
-void
-WatchHandle::deferredDeleteProcess(const Name& name)
-{
- scheduler.scheduleEvent(PROCESS_DELETE_TIME,
- bind(&WatchHandle::deleteProcess, this, name));
-}
-
-void
-WatchHandle::processWatchCommand(const Interest& interest,
- const RepoCommandParameter& parameter,
- const ndn::mgmt::CommandContinuation& done)
-{
- // if there is no watchTimeout specified, m_watchTimeout will be set as 0 and this handle will run forever
- if (parameter.hasWatchTimeout()) {
- m_watchTimeout = parameter.getWatchTimeout();
- }
- else {
- m_watchTimeout = 0_ms;
- }
-
- // if there is no maxInterestNum specified, m_maxInterestNum will be 0, which means infinity
- if (parameter.hasMaxInterestNum()) {
- m_maxInterestNum = parameter.getMaxInterestNum();
- }
- else {
- m_maxInterestNum = 0;
- }
-
- if (parameter.hasInterestLifetime()) {
- m_interestLifetime = parameter.getInterestLifetime();
- }
-
- RepoCommandResponse response(100, "Watching the prefix started.");
- response.setBody(response.wireEncode());
- done(response);
-
- m_processes[parameter.getName()] =
- std::make_pair(RepoCommandResponse(300, "This watched prefix Insertion is in progress"),
- true);
-
- Interest fetchInterest(parameter.getName());
- if (parameter.hasSelectors()) {
- fetchInterest.setSelectors(parameter.getSelectors());
- }
- fetchInterest.setChildSelector(1);
- fetchInterest.setInterestLifetime(m_interestLifetime);
- m_startTime = steady_clock::now();
- m_interestNum++;
- face.expressInterest(fetchInterest,
- bind(&WatchHandle::onData, this, _1, _2, parameter.getName()),
- bind(&WatchHandle::onTimeout, this, _1, parameter.getName()), // Nack
- bind(&WatchHandle::onTimeout, this, _1, parameter.getName()));
-}
-
-bool
-WatchHandle::onRunning(const Name& name)
-{
- bool isTimeout = (m_watchTimeout != milliseconds::zero() &&
- steady_clock::now() - m_startTime > m_watchTimeout);
- bool isMaxInterest = m_interestNum >= m_maxInterestNum && m_maxInterestNum != 0;
- if (isTimeout || isMaxInterest) {
- deferredDeleteProcess(name);
- watchStop(name);
- return false;
- }
- return true;
-}
-
-} // namespace repo
diff --git a/src/handles/watch-handle.hpp b/src/handles/watch-handle.hpp
deleted file mode 100644
index 221b67d..0000000
--- a/src/handles/watch-handle.hpp
+++ /dev/null
@@ -1,155 +0,0 @@
-/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
-/*
- * Copyright (c) 2014-2018, Regents of the University of California.
- *
- * This file is part of NDN repo-ng (Next generation of NDN repository).
- * See AUTHORS.md for complete list of repo-ng authors and contributors.
- *
- * repo-ng is free software: you can redistribute it and/or modify it under the terms
- * of the GNU General Public License as published by the Free Software Foundation,
- * either version 3 of the License, or (at your option) any later version.
- *
- * repo-ng is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
- * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE. See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * repo-ng, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef REPO_HANDLES_WATCH_HANDLE_HPP
-#define REPO_HANDLES_WATCH_HANDLE_HPP
-
-#include "command-base-handle.hpp"
-
-#include <ndn-cxx/mgmt/dispatcher.hpp>
-
-#include <queue>
-
-namespace repo {
-
-using std::map;
-using std::pair;
-using std::queue;
-using namespace ndn::time;
-/**
- * @brief WatchHandle provides a different way for repo to insert data.
- *
- * Repo keeps sending interest to request the data with same prefix,
- * but with different exclude selectors(updated every time). Repo will stop
- * watching the prefix until a command interest tell it to stop, the total
- * amount of sent interests reaches a specific number or time out.
- */
-class WatchHandle : public CommandBaseHandle
-{
-
-public:
- class Error : public CommandBaseHandle::Error
- {
- public:
- explicit
- Error(const std::string& what)
- : CommandBaseHandle::Error(what)
- {
- }
- };
-
-
-public:
- WatchHandle(Face& face, RepoStorage& storageHandle,
- ndn::mgmt::Dispatcher& dispatcher, Scheduler& scheduler,
- Validator& validator);
-
-private: // watch-insert command
- /**
- * @brief handle watch commands
- */
-
- void
- handleStartCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameters,
- const ndn::mgmt::CommandContinuation& done);
- void
- onValidationFailed(const std::shared_ptr<const Interest>& interest, const std::string& reason);
-
-private: // data fetching
- /**
- * @brief fetch data and send next interest
- */
- void
- onData(const Interest& interest, const Data& data, const Name& name);
-
- /**
- * @brief handle when fetching one data timeout
- */
- void
- onTimeout(const Interest& interest, const Name& name);
-
- void
- onDataValidated(const Interest& interest, const Data& data, const Name& name);
-
- /**
- * @brief failure of validation
- */
- void
- onDataValidationFailed(const Interest& interest, const Data& data,
- const ValidationError& error, const Name& name);
-
-
- void
- processWatchCommand(const Interest& interest, const RepoCommandParameter& parameter,
- const ndn::mgmt::CommandContinuation& done);
-
- void
- watchStop(const Name& name);
-
-private: // watch state check command
- /**
- * @brief handle watch check command
- */
-
- void
- handleCheckCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameters,
- const ndn::mgmt::CommandContinuation& done);
-
- void
- onCheckValidationFailed(const Interest& interest, const ValidationError& error);
-
-private: // watch stop command
- /**
- * @brief handle watch stop command
- */
-
- void
- handleStopCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameters,
- const ndn::mgmt::CommandContinuation& done);
-
- void
- onStopValidationFailed(const Interest& interest, const ValidationError& error);
-
-private:
- void
- deferredDeleteProcess(const Name& name);
-
- void
- deleteProcess(const Name& name);
-
- bool
- onRunning(const Name& name);
-
-private:
- Validator& m_validator;
- map<Name, std::pair<RepoCommandResponse, bool> > m_processes;
- int64_t m_interestNum;
- int64_t m_maxInterestNum;
- milliseconds m_interestLifetime;
- milliseconds m_watchTimeout;
- ndn::time::steady_clock::TimePoint m_startTime;
- int64_t m_size;
-};
-
-} // namespace repo
-
-#endif // REPO_HANDLES_WATCH_HANDLE_HPP
diff --git a/src/handles/write-handle.cpp b/src/handles/write-handle.cpp
index 6cc9583..675f286 100644
--- a/src/handles/write-handle.cpp
+++ b/src/handles/write-handle.cpp
@@ -19,12 +19,16 @@
#include "write-handle.hpp"
+#include <ndn-cxx/util/logger.hpp>
#include <ndn-cxx/util/random.hpp>
namespace repo {
-static const int RETRY_TIMEOUT = 3;
+NDN_LOG_INIT(repo.WriteHandle);
+
static const int DEFAULT_CREDIT = 12;
+static const bool DEFAULT_CANBE_PREFIX = false;
+static const milliseconds MAX_TIMEOUT(60_s);
static const milliseconds NOEND_TIMEOUT(10000_ms);
static const milliseconds PROCESS_DELETE_TIME(10000_ms);
static const milliseconds DEFAULT_INTEREST_LIFETIME(4000_ms);
@@ -33,8 +37,9 @@
Scheduler& scheduler, Validator& validator)
: CommandBaseHandle(face, storageHandle, scheduler, validator)
, m_validator(validator)
- , m_retryTime(RETRY_TIMEOUT)
, m_credit(DEFAULT_CREDIT)
+ , m_canBePrefix(DEFAULT_CANBE_PREFIX)
+ , m_maxTimeout(MAX_TIMEOUT)
, m_noEndTimeout(NOEND_TIMEOUT)
, m_interestLifetime(DEFAULT_INTEREST_LIFETIME)
{
@@ -64,10 +69,6 @@
dynamic_cast<RepoCommandParameter*>(const_cast<ndn::mgmt::ControlParameters*>(¶meter));
if (repoParameter->hasStartBlockId() || repoParameter->hasEndBlockId()) {
- if (repoParameter->hasSelectors()) {
- done(negativeReply("BlockId present. BlockId is not supported in this protocol", 402));
- return;
- }
processSegmentedInsertCommand(interest, *repoParameter, done);
}
else {
@@ -81,8 +82,8 @@
WriteHandle::onData(const Interest& interest, const Data& data, ProcessId processId)
{
m_validator.validate(data,
- bind(&WriteHandle::onDataValidated, this, interest, _1, processId),
- bind(&WriteHandle::onDataValidationFailed, this, _1, _2));
+ std::bind(&WriteHandle::onDataValidated, this, interest, _1, processId),
+ [](const Data& data, const ValidationError& error){NDN_LOG_ERROR("Error: " << error);});
}
void
@@ -104,296 +105,13 @@
}
void
-WriteHandle::onDataValidationFailed(const Data& data, const ValidationError& error)
-{
- std::cerr << error << std::endl;
-}
-
-void
-WriteHandle::onSegmentData(const Interest& interest, const Data& data, ProcessId processId)
-{
- m_validator.validate(data,
- bind(&WriteHandle::onSegmentDataValidated, this, interest, _1, processId),
- bind(&WriteHandle::onDataValidationFailed, this, _1, _2));
-}
-
-void
-WriteHandle::onSegmentDataValidated(const Interest& interest, const Data& data, ProcessId processId)
-{
- auto it = m_processes.find(processId);
- if (it == m_processes.end()) {
- return;
- }
- RepoCommandResponse& response = it->second.response;
-
- //refresh endBlockId
- auto finalBlock = data.getFinalBlock();
- if (finalBlock && finalBlock->isSegment()) {
- auto finalSeg = finalBlock->toSegment();
- if (response.hasEndBlockId()) {
- if (finalSeg < response.getEndBlockId()) {
- response.setEndBlockId(finalSeg);
- }
- }
- else {
- response.setEndBlockId(finalSeg);
- }
- }
-
- //insert data
- if (storageHandle.insertData(data)) {
- response.setInsertNum(response.getInsertNum() + 1);
- }
-
- onSegmentDataControl(processId, interest);
-}
-
-void
WriteHandle::onTimeout(const Interest& interest, ProcessId processId)
{
- std::cerr << "Timeout" << std::endl;
+ NDN_LOG_DEBUG("Timeout" << std::endl);
m_processes.erase(processId);
}
void
-WriteHandle::onSegmentTimeout(const Interest& interest, ProcessId processId)
-{
- std::cerr << "SegTimeout" << std::endl;
-
- onSegmentTimeoutControl(processId, interest);
-}
-
-void
-WriteHandle::segInit(ProcessId processId, const RepoCommandParameter& parameter)
-{
- ProcessInfo& process = m_processes[processId];
- process.credit = 0;
-
- map<SegmentNo, int>& processRetry = process.retryCounts;
-
- Name name = parameter.getName();
- SegmentNo startBlockId = parameter.getStartBlockId();
-
- uint64_t initialCredit = m_credit;
-
- if (parameter.hasEndBlockId()) {
- initialCredit =
- std::min(initialCredit, parameter.getEndBlockId() - parameter.getStartBlockId() + 1);
- }
- else {
- // set noEndTimeout timer
- process.noEndTime = ndn::time::steady_clock::now() +
- m_noEndTimeout;
- }
- process.credit = initialCredit;
- SegmentNo segment = startBlockId;
-
- for (; segment < startBlockId + initialCredit; ++segment) {
- Name fetchName = name;
- fetchName.appendSegment(segment);
- Interest interest(fetchName);
- interest.setInterestLifetime(m_interestLifetime);
- face.expressInterest(interest,
- bind(&WriteHandle::onSegmentData, this, _1, _2, processId),
- bind(&WriteHandle::onSegmentTimeout, this, _1, processId), // Nack
- bind(&WriteHandle::onSegmentTimeout, this, _1, processId));
- process.credit--;
- processRetry[segment] = 0;
- }
-
- queue<SegmentNo>& nextSegmentQueue = process.nextSegmentQueue;
-
- process.nextSegment = segment;
- nextSegmentQueue.push(segment);
-}
-
-void
-WriteHandle::onSegmentDataControl(ProcessId processId, const Interest& interest)
-{
- if (m_processes.count(processId) == 0) {
- return;
- }
- ProcessInfo& process = m_processes[processId];
- RepoCommandResponse& response = process.response;
- int& processCredit = process.credit;
- //onSegmentDataControl is called when a data returns.
- //When data returns, processCredit++
- processCredit++;
- SegmentNo& nextSegment = process.nextSegment;
- queue<SegmentNo>& nextSegmentQueue = process.nextSegmentQueue;
- map<SegmentNo, int>& retryCounts = process.retryCounts;
-
- //read whether notime timeout
- if (!response.hasEndBlockId()) {
-
- ndn::time::steady_clock::TimePoint& noEndTime = process.noEndTime;
- ndn::time::steady_clock::TimePoint now = ndn::time::steady_clock::now();
-
- if (now > noEndTime) {
- std::cerr << "noEndtimeout: " << processId << std::endl;
- //m_processes.erase(processId);
- //StatusCode should be refreshed as 405
- response.setCode(405);
- //schedule a delete event
- deferredDeleteProcess(processId);
- return;
- }
- }
-
- //read whether this process has total ends, if ends, remove control info from the maps
- if (response.hasEndBlockId()) {
- uint64_t nSegments =
- response.getEndBlockId() - response.getStartBlockId() + 1;
- if (response.getInsertNum() >= nSegments) {
- //m_processes.erase(processId);
- //All the data has been inserted, StatusCode is refreshed as 200
- response.setCode(200);
- deferredDeleteProcess(processId);
- return;
- }
- }
-
- //check whether there is any credit
- if (processCredit == 0)
- return;
-
-
- //check whether sent queue empty
- if (nextSegmentQueue.empty()) {
- //do not do anything
- return;
- }
-
- //pop the queue
- SegmentNo sendingSegment = nextSegmentQueue.front();
- nextSegmentQueue.pop();
-
- //check whether sendingSegment exceeds
- if (response.hasEndBlockId() && sendingSegment > response.getEndBlockId()) {
- //do not do anything
- return;
- }
-
- //read whether this is retransmitted data;
- SegmentNo fetchedSegment =
- interest.getName().get(interest.getName().size() - 1).toSegment();
-
- BOOST_ASSERT(retryCounts.count(fetchedSegment) != 0);
-
- //find this fetched data, remove it from this map
- //rit->second.erase(oit);
- retryCounts.erase(fetchedSegment);
- //express the interest of the top of the queue
- Name fetchName(interest.getName().getPrefix(-1));
- fetchName.appendSegment(sendingSegment);
- Interest fetchInterest(fetchName);
- fetchInterest.setInterestLifetime(m_interestLifetime);
- face.expressInterest(fetchInterest,
- bind(&WriteHandle::onSegmentData, this, _1, _2, processId),
- bind(&WriteHandle::onSegmentTimeout, this, _1, processId), // Nack
- bind(&WriteHandle::onSegmentTimeout, this, _1, processId));
- //When an interest is expressed, processCredit--
- processCredit--;
- if (retryCounts.count(sendingSegment) == 0) {
- //not found
- retryCounts[sendingSegment] = 0;
- }
- else {
- //found
- retryCounts[sendingSegment] = retryCounts[sendingSegment] + 1;
- }
- //increase the next seg and put it into the queue
- if (!response.hasEndBlockId() || (nextSegment + 1) <= response.getEndBlockId()) {
- nextSegment++;
- nextSegmentQueue.push(nextSegment);
- }
-}
-
-void
-WriteHandle::onSegmentTimeoutControl(ProcessId processId, const Interest& interest)
-{
- if (m_processes.count(processId) == 0) {
- return;
- }
- ProcessInfo& process = m_processes[processId];
- // RepoCommandResponse& response = process.response;
- // SegmentNo& nextSegment = process.nextSegment;
- // queue<SegmentNo>& nextSegmentQueue = process.nextSegmentQueue;
- map<SegmentNo, int>& retryCounts = process.retryCounts;
-
- SegmentNo timeoutSegment = interest.getName().get(-1).toSegment();
-
- std::cerr << "timeoutSegment: " << timeoutSegment << std::endl;
-
- BOOST_ASSERT(retryCounts.count(timeoutSegment) != 0);
-
- //read the retry time. If retry out of time, fail the process. if not, plus
- int& retryTime = retryCounts[timeoutSegment];
- if (retryTime >= m_retryTime) {
- //fail this process
- std::cerr << "Retry timeout: " << processId << std::endl;
- m_processes.erase(processId);
- return;
- }
- else {
- //Reput it in the queue, retryTime++
- retryTime++;
- Interest retryInterest(interest.getName());
- retryInterest.setInterestLifetime(m_interestLifetime);
- face.expressInterest(retryInterest,
- bind(&WriteHandle::onSegmentData, this, _1, _2, processId),
- bind(&WriteHandle::onSegmentTimeout, this, _1, processId), // Nack
- bind(&WriteHandle::onSegmentTimeout, this, _1, processId));
- }
-
-}
-
-void
-WriteHandle::handleCheckCommand(const Name& prefix, const Interest& interest,
- const ndn::mgmt::ControlParameters& parameter,
- const ndn::mgmt::CommandContinuation& done)
-{
- const RepoCommandParameter& repoParameter = dynamic_cast<const RepoCommandParameter&>(parameter);
-
- //check whether this process exists
- ProcessId processId = repoParameter.getProcessId();
- if (m_processes.count(processId) == 0) {
- std::cerr << "no such processId: " << processId << std::endl;
- done(negativeReply("No such this process is in progress", 404));
- return;
- }
-
- ProcessInfo& process = m_processes[processId];
-
- RepoCommandResponse& response = process.response;
-
- //Check whether it is single data fetching
- if (!response.hasStartBlockId() &&
- !response.hasEndBlockId()) {
- //reply(interest, response);
- done(response);
- return;
- }
-
- //read if noEndtimeout
- if (!response.hasEndBlockId()) {
- extendNoEndTime(process);
- done(response);
- return;
- }
- else {
- done(response);
- }
-}
-
-void
-WriteHandle::deferredDeleteProcess(ProcessId processId)
-{
- scheduler.scheduleEvent(PROCESS_DELETE_TIME,
- bind(&WriteHandle::deleteProcess, this, processId));
-}
-
-void
WriteHandle::processSingleInsertCommand(const Interest& interest, RepoCommandParameter& parameter,
const ndn::mgmt::CommandContinuation& done)
{
@@ -409,16 +127,108 @@
done(response);
response.setCode(300);
-
Interest fetchInterest(parameter.getName());
+ fetchInterest.setCanBePrefix(m_canBePrefix);
fetchInterest.setInterestLifetime(m_interestLifetime);
- if (parameter.hasSelectors()) {
- fetchInterest.setSelectors(parameter.getSelectors());
- }
face.expressInterest(fetchInterest,
- bind(&WriteHandle::onData, this, _1, _2, processId),
- bind(&WriteHandle::onTimeout, this, _1, processId), // Nack
- bind(&WriteHandle::onTimeout, this, _1, processId));
+ std::bind(&WriteHandle::onData, this, _1, _2, processId),
+ std::bind(&WriteHandle::onTimeout, this, _1, processId), // Nack
+ std::bind(&WriteHandle::onTimeout, this, _1, processId));
+}
+
+void
+WriteHandle::segInit(ProcessId processId, const RepoCommandParameter& parameter)
+{
+ // use SegmentFetcher to send fetch interest.
+ ProcessInfo& process = m_processes[processId];
+ Name name = parameter.getName();
+ SegmentNo startBlockId = parameter.getStartBlockId();
+
+ uint64_t initialCredit = m_credit;
+
+ if (parameter.hasEndBlockId()) {
+ initialCredit =
+ std::min(initialCredit, parameter.getEndBlockId() - parameter.getStartBlockId() + 1);
+ }
+ else {
+ // set noEndTimeout timer
+ process.noEndTime = ndn::time::steady_clock::now() +
+ m_noEndTimeout;
+ }
+
+ Name fetchName = name;
+ SegmentNo segment = startBlockId;
+ fetchName.appendSegment(segment);
+ Interest interest(fetchName);
+
+ ndn::util::SegmentFetcher::Options options;
+ options.initCwnd = initialCredit;
+ options.interestLifetime = m_interestLifetime;
+ options.maxTimeout = m_maxTimeout;
+ auto fetcher = ndn::util::SegmentFetcher::start(face, interest, m_validator, options);
+ fetcher->onError.connect([] (uint32_t errorCode, const std::string& errorMsg)
+ {NDN_LOG_ERROR("Error: " << errorMsg);});
+ fetcher->afterSegmentValidated.connect([this, &fetcher, &processId] (const Data& data)
+ {onSegmentData(*fetcher, data, processId);});
+ fetcher->afterSegmentTimedOut.connect([this, &fetcher, &processId] ()
+ {onSegmentTimeout(*fetcher, processId);});
+}
+
+void
+WriteHandle::onSegmentData(ndn::util::SegmentFetcher& fetcher, const Data& data, ProcessId processId)
+{
+ auto it = m_processes.find(processId);
+ if (it == m_processes.end()) {
+ fetcher.stop();
+ return;
+ }
+
+ RepoCommandResponse& response = it->second.response;
+
+ //insert data
+ if (storageHandle.insertData(data)) {
+ response.setInsertNum(response.getInsertNum() + 1);
+ }
+
+ ProcessInfo& process = m_processes[processId];
+ //read whether notime timeout
+ if (!response.hasEndBlockId()) {
+
+ ndn::time::steady_clock::TimePoint& noEndTime = process.noEndTime;
+ ndn::time::steady_clock::TimePoint now = ndn::time::steady_clock::now();
+
+ if (now > noEndTime) {
+ NDN_LOG_DEBUG("noEndtimeout: " << processId);
+ //StatusCode should be refreshed as 405
+ response.setCode(405);
+ //schedule a delete event
+ deferredDeleteProcess(processId);
+ fetcher.stop();
+ return;
+ }
+ }
+
+ //read whether this process has total ends, if ends, remove control info from the maps
+ if (response.hasEndBlockId()) {
+ uint64_t nSegments = response.getEndBlockId() - response.getStartBlockId() + 1;
+ if (response.getInsertNum() >= nSegments) {
+ //All the data has been inserted, StatusCode is refreshed as 200
+ response.setCode(200);
+ deferredDeleteProcess(processId);
+ fetcher.stop();
+ return;
+ }
+ }
+}
+
+void
+WriteHandle::onSegmentTimeout(ndn::util::SegmentFetcher& fetcher, ProcessId processId)
+{
+ NDN_LOG_DEBUG("SegTimeout");
+ if (m_processes.count(processId) == 0) {
+ fetcher.stop();
+ return;
+ }
}
void
@@ -470,6 +280,49 @@
}
void
+WriteHandle::handleCheckCommand(const Name& prefix, const Interest& interest,
+ const ndn::mgmt::ControlParameters& parameter,
+ const ndn::mgmt::CommandContinuation& done)
+{
+ const RepoCommandParameter& repoParameter = dynamic_cast<const RepoCommandParameter&>(parameter);
+
+ //check whether this process exists
+ ProcessId processId = repoParameter.getProcessId();
+ if (m_processes.count(processId) == 0) {
+ NDN_LOG_DEBUG("no such processId: " << processId);
+ done(negativeReply("No such this process is in progress", 404));
+ return;
+ }
+
+ ProcessInfo& process = m_processes[processId];
+
+ RepoCommandResponse& response = process.response;
+
+ //Check whether it is single data fetching
+ if (!response.hasStartBlockId() && !response.hasEndBlockId()) {
+ done(response);
+ return;
+ }
+
+ //read if noEndtimeout
+ if (!response.hasEndBlockId()) {
+ extendNoEndTime(process);
+ done(response);
+ return;
+ }
+ else {
+ done(response);
+ }
+}
+
+void
+WriteHandle::deferredDeleteProcess(ProcessId processId)
+{
+ scheduler.scheduleEvent(PROCESS_DELETE_TIME,
+ std::bind(&WriteHandle::deleteProcess, this, processId));
+}
+
+void
WriteHandle::extendNoEndTime(ProcessInfo& process)
{
ndn::time::steady_clock::TimePoint& noEndTime = process.noEndTime;
diff --git a/src/handles/write-handle.hpp b/src/handles/write-handle.hpp
index 686d68c..115801e 100644
--- a/src/handles/write-handle.hpp
+++ b/src/handles/write-handle.hpp
@@ -23,15 +23,12 @@
#include "command-base-handle.hpp"
#include <ndn-cxx/mgmt/dispatcher.hpp>
+#include <ndn-cxx/util/segment-fetcher.hpp>
#include <queue>
namespace repo {
-using std::map;
-using std::pair;
-using std::queue;
-
/**
* @brief WriteHandle provides basic credit based congestion control.
*
@@ -80,12 +77,11 @@
*/
struct ProcessInfo
{
- //ProcessId id;
RepoCommandResponse response;
- queue<SegmentNo> nextSegmentQueue; ///< queue of waiting segment
+ std::queue<SegmentNo> nextSegmentQueue; ///< queue of waiting segment
/// to be sent when having credits
SegmentNo nextSegment; ///< last segment put into the nextSegmentQueue
- map<SegmentNo, int> retryCounts; ///< to store retrying times of timeout segment
+ std::map<SegmentNo, int> retryCounts; ///< to store retrying times of timeout segment
int credit; ///< congestion control credits of process
/**
@@ -136,16 +132,13 @@
* @brief fetch segmented data
*/
void
- onSegmentData(const Interest& interest, const Data& data, ProcessId processId);
-
- void
- onSegmentDataValidated(const Interest& interest, const Data& data, ProcessId processId);
+ onSegmentData(ndn::util::SegmentFetcher& fetcher, const Data& data, ProcessId processId);
/**
- * @brief Timeout when fetching segmented data. Data can be fetched RETRY_TIMEOUT times.
+ * @brief handle when fetching segmented data timeout
*/
void
- onSegmentTimeout(const Interest& interest, ProcessId processId);
+ onSegmentTimeout(ndn::util::SegmentFetcher& fetcher, ProcessId processId);
/**
* @brief initiate fetching segmented data
@@ -153,30 +146,12 @@
void
segInit(ProcessId processId, const RepoCommandParameter& parameter);
- /**
- * @brief control for sending interests in function onSegmentData()
- */
- void
- onSegmentDataControl(ProcessId processId, const Interest& interest);
-
- /**
- * @brief control for sending interest in function onSegmentTimeout
- */
- void
- onSegmentTimeoutControl(ProcessId processId, const Interest& interest);
-
void
processSegmentedInsertCommand(const Interest& interest, RepoCommandParameter& parameter,
const ndn::mgmt::CommandContinuation& done);
private:
/**
- * @brief failure of validation for both one or segmented data
- */
- void
- onDataValidationFailed(const Data& data, const ValidationError& error);
-
- /**
* @brief extends noEndTime of process if not noEndTimeout, set StatusCode 405
*
* called by onCheckValidated() if there is no EndBlockId. If not noEndTimeout,
@@ -213,9 +188,12 @@
private:
Validator& m_validator;
- map<ProcessId, ProcessInfo> m_processes;
- int m_retryTime;
+
+ std::map<ProcessId, ProcessInfo> m_processes;
+
int m_credit;
+ bool m_canBePrefix;
+ ndn::time::milliseconds m_maxTimeout;
ndn::time::milliseconds m_noEndTimeout;
ndn::time::milliseconds m_interestLifetime;
};