face: UDP face/channel/factory
Change-Id: I4683b45378637133982efd23edd16a0c35148948
refs #1189
diff --git a/daemon/face/datagram-face.hpp b/daemon/face/datagram-face.hpp
new file mode 100644
index 0000000..f4f9a18
--- /dev/null
+++ b/daemon/face/datagram-face.hpp
@@ -0,0 +1,267 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#ifndef NFD_FACE_DATAGRAM_FACE_HPP
+#define NFD_FACE_DATAGRAM_FACE_HPP
+
+#include "face.hpp"
+
+namespace nfd {
+
+template <class T>
+class DatagramFace : public Face
+{
+public:
+ typedef T protocol;
+
+ explicit
+ DatagramFace(const shared_ptr<typename protocol::socket>& socket);
+
+ virtual
+ ~DatagramFace();
+
+ // from Face
+ virtual void
+ sendInterest(const Interest& interest);
+
+ virtual void
+ sendData(const Data& data);
+
+ virtual void
+ close();
+
+ void
+ handleSend(const boost::system::error_code& error,
+ const Block& wire);
+
+ void
+ handleReceive(const boost::system::error_code& error,
+ size_t nBytesReceived);
+
+protected:
+
+ void
+ receiveDatagram(const uint8_t* buffer,
+ size_t nBytesReceived,
+ const boost::system::error_code& error);
+
+ void
+ keepFaceAliveUntilAllHandlersExecuted(const shared_ptr<Face>& face);
+
+ void
+ closeSocket();
+
+protected:
+ shared_ptr<typename protocol::socket> m_socket;
+ uint8_t m_inputBuffer[MAX_NDN_PACKET_SIZE];
+
+ NFD_LOG_INCLASS_DECLARE();
+
+};
+
+template <class T>
+inline
+DatagramFace<T>::DatagramFace(const shared_ptr<typename DatagramFace::protocol::socket>& socket)
+ : m_socket(socket)
+{
+ m_socket->async_receive(boost::asio::buffer(m_inputBuffer, MAX_NDN_PACKET_SIZE), 0,
+ bind(&DatagramFace<T>::handleReceive, this, _1, _2));
+}
+
+template <class T>
+inline
+DatagramFace<T>::~DatagramFace()
+{
+}
+
+template <class T>
+inline void
+DatagramFace<T>::sendInterest(const Interest& interest)
+{
+ m_socket->async_send(boost::asio::buffer(interest.wireEncode().wire(),
+ interest.wireEncode().size()),
+ bind(&DatagramFace<T>::handleSend, this, _1, interest.wireEncode()));
+
+ // anything else should be done here?
+}
+
+template <class T>
+inline void
+DatagramFace<T>::sendData(const Data& data)
+{
+ m_socket->async_send(boost::asio::buffer(data.wireEncode().wire(),
+ data.wireEncode().size()),
+ bind(&DatagramFace<T>::handleSend, this, _1, data.wireEncode()));
+
+ // anything else should be done here?
+}
+
+template <class T>
+inline void
+DatagramFace<T>::handleSend(const boost::system::error_code& error,
+ const Block& wire)
+{
+ if (error != 0) {
+ if (error == boost::system::errc::operation_canceled) // when socket is closed by someone
+ return;
+
+ if (!m_socket->is_open())
+ {
+ onFail("Tunnel closed");
+ return;
+ }
+
+ NFD_LOG_WARN("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Send operation failed, closing socket: "
+ << error.category().message(error.value()));
+
+ closeSocket();
+
+ if (error == boost::asio::error::eof)
+ {
+ onFail("Tunnel closed");
+ }
+ else
+ {
+ onFail("Send operation failed, closing socket: " +
+ error.category().message(error.value()));
+ }
+ return;
+ }
+
+ NFD_LOG_TRACE("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Successfully sent: " << wire.size() << " bytes");
+ // do nothing (needed to retain validity of wire memory block
+}
+
+template <class T>
+inline void
+DatagramFace<T>::close()
+{
+ if (!m_socket->is_open())
+ return;
+
+ NFD_LOG_INFO("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Close tunnel");
+
+ closeSocket();
+ onFail("Close tunnel");
+}
+
+template <class T>
+inline void
+DatagramFace<T>::handleReceive(const boost::system::error_code& error,
+ size_t nBytesReceived)
+{
+ receiveDatagram(m_inputBuffer, nBytesReceived, error);
+ m_socket->async_receive(boost::asio::buffer(m_inputBuffer, MAX_NDN_PACKET_SIZE), 0,
+ bind(&DatagramFace<T>::handleReceive, this, _1, _2));
+}
+
+template <class T>
+inline void
+DatagramFace<T>::receiveDatagram(const uint8_t* buffer,
+ size_t nBytesReceived,
+ const boost::system::error_code& error)
+{
+ if (error != 0 || nBytesReceived == 0) {
+ if (error == boost::system::errc::operation_canceled) // when socket is closed by someone
+ return;
+
+ // this should be unnecessary, but just in case
+ if (!m_socket->is_open())
+ {
+ onFail("Tunnel closed");
+ return;
+ }
+
+ NFD_LOG_WARN("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Receive operation failed: "
+ << error.category().message(error.value()));
+
+ closeSocket();
+
+ if (error == boost::asio::error::eof)
+ {
+ onFail("Tunnel closed");
+ }
+ else
+ {
+ onFail("Receive operation failed, closing socket: " +
+ error.category().message(error.value()));
+ }
+ return;
+ }
+
+ NFD_LOG_TRACE("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Received: " << nBytesReceived << " bytes");
+
+ /// @todo Eliminate reliance on exceptions in this path
+ try {
+ Block element(buffer, nBytesReceived);
+
+ if (element.size() != nBytesReceived)
+ {
+ NFD_LOG_WARN("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Received datagram size and decoded "
+ << "element size don't match");
+ /// @todo this message should not extend the face lifetime
+ return;
+ }
+ if (!this->decodeAndDispatchInput(element))
+ {
+ NFD_LOG_WARN("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Received unrecognized block of type ["
+ << element.type() << "]");
+ // ignore unknown packet and proceed
+ /// @todo this message should not extend the face lifetime
+ return;
+ }
+ }
+ catch(const tlv::Error& e) {
+ NFD_LOG_WARN("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Received input is invalid");
+ /// @todo this message should not extend the face lifetime
+ return;
+ }
+}
+
+
+template <class T>
+inline void
+DatagramFace<T>::keepFaceAliveUntilAllHandlersExecuted(const shared_ptr<Face>& face)
+{
+}
+
+template <class T>
+inline void
+DatagramFace<T>::closeSocket()
+{
+ boost::asio::io_service& io = m_socket->get_io_service();
+
+ // use the non-throwing variants and ignore errors, if any
+ boost::system::error_code error;
+ m_socket->shutdown(protocol::socket::shutdown_both, error);
+ m_socket->close(error);
+ // after this, handlers will be called with an error code
+
+ // ensure that the Face object is alive at least until all pending
+ // handlers are dispatched
+ io.post(bind(&DatagramFace<T>::keepFaceAliveUntilAllHandlersExecuted,
+ this, this->shared_from_this()));
+}
+
+} // namespace nfd
+
+#endif // NFD_FACE_DATAGRAM_FACE_HPP
diff --git a/daemon/face/multicast-udp-face.cpp b/daemon/face/multicast-udp-face.cpp
new file mode 100644
index 0000000..edd2ff4
--- /dev/null
+++ b/daemon/face/multicast-udp-face.cpp
@@ -0,0 +1,58 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#include "multicast-udp-face.hpp"
+
+namespace nfd {
+
+NFD_LOG_INIT("MulticastUdpFace");
+
+
+MulticastUdpFace::MulticastUdpFace(const shared_ptr<MulticastUdpFace::protocol::socket>& socket)
+ : UdpFace(socket)
+{
+ NFD_LOG_DEBUG("Face creation. Multicast group: "
+ << m_socket->local_endpoint());
+ m_multicastGroup = m_socket->local_endpoint();
+}
+
+const boost::asio::ip::udp::endpoint&
+MulticastUdpFace::getMulticastGroup() const
+{
+ return m_multicastGroup;
+}
+
+void
+MulticastUdpFace::sendInterest(const Interest& interest)
+{
+ NFD_LOG_DEBUG("Sending interest");
+ m_socket->async_send_to(boost::asio::buffer(interest.wireEncode().wire(),
+ interest.wireEncode().size()),
+ m_multicastGroup,
+ bind(&DatagramFace<protocol>::handleSend, this, _1, interest.wireEncode()));
+
+ // anything else should be done here?
+}
+
+void
+MulticastUdpFace::sendData(const Data& data)
+{
+ NFD_LOG_DEBUG("Sending data");
+ m_socket->async_send_to(boost::asio::buffer(data.wireEncode().wire(),
+ data.wireEncode().size()),
+ m_multicastGroup,
+ bind(&DatagramFace<protocol>::handleSend, this, _1, data.wireEncode()));
+
+ // anything else should be done here?
+}
+
+bool
+MulticastUdpFace::isMultiAccess() const
+{
+ return true;
+}
+
+} // namespace nfd
diff --git a/daemon/face/multicast-udp-face.hpp b/daemon/face/multicast-udp-face.hpp
new file mode 100644
index 0000000..46404da
--- /dev/null
+++ b/daemon/face/multicast-udp-face.hpp
@@ -0,0 +1,49 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#ifndef NFD_FACE_MULTICAST_UDP_FACE_HPP
+#define NFD_FACE_MULTICAST_UDP_FACE_HPP
+
+#include "udp-face.hpp"
+
+namespace nfd
+{
+
+/**
+ * \brief Implementation of Face abstraction that uses
+ * multicast UDP as underlying transport mechanism
+ */
+class MulticastUdpFace : public UdpFace
+{
+public:
+ typedef boost::asio::ip::udp protocol;
+
+ /**
+ * \brief Creates a Udp face for multicast communication
+ */
+ explicit
+ MulticastUdpFace(const shared_ptr<protocol::socket>& socket);
+
+ const boost::asio::ip::udp::endpoint&
+ getMulticastGroup() const;
+
+ // from Face
+ virtual void
+ sendInterest(const Interest& interest);
+
+ virtual void
+ sendData(const Data& data);
+
+ virtual bool
+ isMultiAccess() const;
+
+private:
+ boost::asio::ip::udp::endpoint m_multicastGroup;
+};
+
+} // namespace nfd
+
+#endif // NFD_FACE_MULTICAST_UDP_FACE_HPP
diff --git a/daemon/face/udp-channel.cpp b/daemon/face/udp-channel.cpp
new file mode 100644
index 0000000..2127cd6
--- /dev/null
+++ b/daemon/face/udp-channel.cpp
@@ -0,0 +1,202 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#include "udp-channel.hpp"
+#include "core/global-io.hpp"
+
+namespace nfd {
+
+NFD_LOG_INIT("UdpChannel");
+
+using namespace boost::asio;
+
+UdpChannel::UdpChannel(const udp::Endpoint& localEndpoint,
+ const time::Duration& timeout)
+ : m_localEndpoint(localEndpoint)
+ , m_isListening(false)
+{
+ /// \todo the reuse_address works as we want in Linux, but in other system could be different.
+ /// We need to check this
+ /// (SO_REUSEADDR doesn't behave uniformly in different OS)
+
+ m_socket = make_shared<ip::udp::socket>(boost::ref(getGlobalIoService()));
+ m_socket->open(m_localEndpoint.protocol());
+ m_socket->set_option(boost::asio::ip::udp::socket::reuse_address(true));
+
+ try {
+ m_socket->bind(m_localEndpoint);
+ }
+ catch (boost::system::system_error& e) {
+ //The bind failed, so the socket is useless now
+ m_socket->close();
+ throw Error("Failed to properly configure the socket. "
+ "UdpChannel creation aborted, check the address (" + std::string(e.what()) + ")");
+ }
+}
+
+void
+UdpChannel::listen(const FaceCreatedCallback& onFaceCreated,
+ const ConnectFailedCallback& onListenFailed)
+{
+ if (m_isListening) {
+ throw Error("Listen already called on this channel");
+ }
+ m_isListening = true;
+
+ onFaceCreatedNewPeerCallback = onFaceCreated;
+ onConnectFailedNewPeerCallback = onListenFailed;
+
+ m_socket->async_receive_from(boost::asio::buffer(m_inputBuffer, MAX_NDN_PACKET_SIZE),
+ m_newRemoteEndpoint,
+ bind(&UdpChannel::newPeer, this,
+ boost::asio::placeholders::error,
+ boost::asio::placeholders::bytes_transferred));
+}
+
+
+void
+UdpChannel::connect(const udp::Endpoint& remoteEndpoint,
+ const FaceCreatedCallback& onFaceCreated)
+{
+ ChannelFaceMap::iterator i = m_channelFaces.find(remoteEndpoint);
+ if (i != m_channelFaces.end()) {
+ onFaceCreated(i->second);
+ return;
+ }
+
+ //creating a new socket for the face that will be created soon
+ shared_ptr<ip::udp::socket> clientSocket =
+ make_shared<ip::udp::socket>(boost::ref(getGlobalIoService()));
+
+ clientSocket->open(m_localEndpoint.protocol());
+ clientSocket->set_option(ip::udp::socket::reuse_address(true));
+
+ try {
+ clientSocket->bind(m_localEndpoint);
+ clientSocket->connect(remoteEndpoint); //@todo connect or async_connect
+ //(since there is no handshake the connect shouldn't block). If we go for
+ //async_connect, make sure that if in the meantime we receive a UDP pkt from
+ //that endpoint nothing bad happen (it's difficult, but it could happen)
+ }
+ catch (boost::system::system_error& e) {
+ clientSocket->close();
+ throw Error("Failed to properly configure the socket. Check the address ("
+ + std::string(e.what()) + ")");
+ }
+ createFace(clientSocket, onFaceCreated);
+}
+
+void
+UdpChannel::connect(const std::string& remoteHost,
+ const std::string& remotePort,
+ const FaceCreatedCallback& onFaceCreated,
+ const ConnectFailedCallback& onConnectFailed)
+{
+ ip::udp::resolver::query query(remoteHost, remotePort);
+ shared_ptr<ip::udp::resolver> resolver =
+ make_shared<ip::udp::resolver>(boost::ref(getGlobalIoService()));
+
+ resolver->async_resolve(query,
+ bind(&UdpChannel::handleEndpointResolution, this, _1, _2,
+ onFaceCreated, onConnectFailed,
+ resolver));
+}
+
+void
+UdpChannel::handleEndpointResolution(const boost::system::error_code& error,
+ ip::udp::resolver::iterator remoteEndpoint,
+ const FaceCreatedCallback& onFaceCreated,
+ const ConnectFailedCallback& onConnectFailed,
+ const shared_ptr<ip::udp::resolver>& resolver)
+{
+ if (error != 0 ||
+ remoteEndpoint == ip::udp::resolver::iterator())
+ {
+ if (error == boost::system::errc::operation_canceled) // when socket is closed by someone
+ return;
+
+ NFD_LOG_DEBUG("Remote endpoint hostname or port cannot be resolved: "
+ << error.category().message(error.value()));
+
+ onConnectFailed("Remote endpoint hostname or port cannot be resolved: " +
+ error.category().message(error.value()));
+ return;
+ }
+
+ connect(*remoteEndpoint, onFaceCreated);
+}
+
+size_t
+UdpChannel::size() const
+{
+ return m_channelFaces.size();
+}
+
+
+shared_ptr<UdpFace>
+UdpChannel::createFace(const shared_ptr<ip::udp::socket>& socket,
+ const FaceCreatedCallback& onFaceCreated)
+{
+ udp::Endpoint remoteEndpoint = socket->remote_endpoint();
+
+ shared_ptr<UdpFace> face = make_shared<UdpFace>(boost::cref(socket));
+ face->onFail += bind(&UdpChannel::afterFaceFailed, this, remoteEndpoint);
+
+ onFaceCreated(face);
+ m_channelFaces[remoteEndpoint] = face;
+ return face;
+}
+
+void
+UdpChannel::newPeer(const boost::system::error_code& error,
+ std::size_t nBytesReceived)
+{
+ NFD_LOG_DEBUG("UdpChannel::newPeer from " << m_newRemoteEndpoint);
+ ChannelFaceMap::iterator i = m_channelFaces.find(m_newRemoteEndpoint);
+ if (i != m_channelFaces.end()) {
+ //The face already exists.
+ //Usually this shouldn't happen, because the channel creates a Udpface
+ //as soon as it receives a pkt from a new endpoint and then the
+ //traffic is dispatched by the kernel directly to the face.
+ //However, if the node receives multiple packets from the same endpoint
+ //"at the same time", while the channel is creating the face the kernel
+ //could dispatch the other pkts to the channel because the face is not yet
+ //ready. In this case, the channel has to pass the pkt to the face
+ NFD_LOG_DEBUG("The creation of the face for the remote endpoint "
+ << m_newRemoteEndpoint
+ << " is in progress");
+ //Passing the message to the correspondent face
+ i->second->handleFirstReceive(m_inputBuffer, nBytesReceived, error);
+ return;
+ }
+
+ shared_ptr<ip::udp::socket> clientSocket =
+ make_shared<ip::udp::socket>(boost::ref(getGlobalIoService()));
+ clientSocket->open(m_localEndpoint.protocol());
+ clientSocket->set_option(ip::udp::socket::reuse_address(true));
+ clientSocket->bind(m_localEndpoint);
+ clientSocket->connect(m_newRemoteEndpoint);
+
+ shared_ptr<UdpFace> newFace = createFace(clientSocket, onFaceCreatedNewPeerCallback);
+
+ //Passing the message to the correspondent face
+ newFace->handleFirstReceive(m_inputBuffer, nBytesReceived, error);
+
+ m_socket->async_receive_from(boost::asio::buffer(m_inputBuffer, MAX_NDN_PACKET_SIZE),
+ m_newRemoteEndpoint,
+ bind(&UdpChannel::newPeer, this,
+ boost::asio::placeholders::error,
+ boost::asio::placeholders::bytes_transferred));
+}
+
+
+void UdpChannel::afterFaceFailed(udp::Endpoint &endpoint)
+{
+ NFD_LOG_DEBUG("afterFaceFailed: " << endpoint);
+ m_channelFaces.erase(endpoint);
+}
+
+} // namespace nfd
diff --git a/daemon/face/udp-channel.hpp b/daemon/face/udp-channel.hpp
new file mode 100644
index 0000000..b987b5e
--- /dev/null
+++ b/daemon/face/udp-channel.hpp
@@ -0,0 +1,164 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#ifndef NFD_FACE_UDP_CHANNEL_HPP
+#define NFD_FACE_UDP_CHANNEL_HPP
+
+#include "common.hpp"
+#include "core/time.hpp"
+#include "udp-face.hpp"
+
+namespace nfd {
+
+namespace udp {
+ typedef boost::asio::ip::udp::endpoint Endpoint;
+} // namespace udp
+
+/**
+ * \brief Class implementing UDP-based channel to create faces
+ *
+ *
+ */
+class UdpChannel
+{
+public:
+
+ /**
+ * \brief Exception of UdpChannel
+ */
+ struct Error : public std::runtime_error
+ {
+ Error(const std::string& what) : runtime_error(what) {}
+ };
+
+ /**
+ * \brief Prototype for the callback called when face is created
+ * (as a response to new incoming communication not managed
+ * by any faces yet or after the connect function is created)
+ */
+ typedef function<void(const shared_ptr<UdpFace>& newFace)> FaceCreatedCallback;
+
+ /**
+ * \brief Prototype for the callback that is called when face is failed to
+ * get created
+ */
+ typedef function<void(const std::string& reason)> ConnectFailedCallback;
+
+ /**
+ * \brief Create UDP channel for the local endpoint
+ *
+ * To enable creation of faces upon incoming connections,
+ * one needs to explicitly call UdpChannel::listen method.
+ * The created socket is bound to the localEndpoint.
+ * reuse_address option is set
+ *
+ * \throw UdpChannel::Error if bind on the socket fails
+ */
+ UdpChannel(const udp::Endpoint& localEndpoint,
+ const time::Duration& timeout);
+
+ /**
+ * \brief Enable listening on the local endpoint, accept connections,
+ * and create faces when remote host makes a connection
+ * \param onFaceCreated Callback to notify successful creation of the face
+ * \param onAcceptFailed Callback to notify when channel fails
+ *
+ * \throws UdpChannel::Error if called multiple times
+ */
+ void
+ listen(const FaceCreatedCallback& onFaceCreated,
+ const ConnectFailedCallback& onAcceptFailed);
+
+ /**
+ * \brief Create a face by establishing connection to remote endpoint
+ *
+ * \throw UdpChannel::Error if bind or connect on the socket fail
+ */
+ void
+ connect(const udp::Endpoint& remoteEndpoint,
+ const FaceCreatedCallback& onFaceCreated);
+ /**
+ * \brief Create a face by establishing connection to the specified
+ * remote host and remote port
+ *
+ * This method will never block and will return immediately. All
+ * necessary hostname and port resolution and connection will happen
+ * in asynchronous mode.
+ *
+ * If connection cannot be established within specified timeout, it
+ * will be aborted.
+ */
+ void
+ connect(const std::string& remoteHost, const std::string& remotePort,
+ const FaceCreatedCallback& onFaceCreated,
+ const ConnectFailedCallback& onConnectFailed);
+
+ /**
+ * \brief Get number of faces in the channel
+ */
+ size_t
+ size() const;
+
+private:
+ shared_ptr<UdpFace>
+ createFace(const shared_ptr<boost::asio::ip::udp::socket>& socket,
+ const FaceCreatedCallback& onFaceCreated);
+ void
+ afterFaceFailed(udp::Endpoint& endpoint);
+
+ /**
+ * \brief The UdpChannel has received a new pkt from a remote endpoint not yet
+ * associated with any UdpFace
+ */
+ void
+ newPeer(const boost::system::error_code& error,
+ std::size_t nBytesReceived);
+
+ void
+ handleEndpointResolution(const boost::system::error_code& error,
+ boost::asio::ip::udp::resolver::iterator remoteEndpoint,
+ const FaceCreatedCallback& onFaceCreated,
+ const ConnectFailedCallback& onConnectFailed,
+ const shared_ptr<boost::asio::ip::udp::resolver>& resolver);
+
+private:
+ udp::Endpoint m_localEndpoint;
+
+ /**
+ * \brief Endpoint used to store the information about the last new remote endpoint
+ */
+ udp::Endpoint m_newRemoteEndpoint;
+
+ /**
+ * Callbacks for face creation.
+ * New communications are detected using async_receive_from.
+ * Its handler has a fixed signature. No space for the face callback
+ */
+ FaceCreatedCallback onFaceCreatedNewPeerCallback;
+
+ // @todo remove the onConnectFailedNewPeerCallback if it remains unused
+ ConnectFailedCallback onConnectFailedNewPeerCallback;
+
+ /**
+ * \brief Socket used to "accept" new communication
+ **/
+ shared_ptr<boost::asio::ip::udp::socket> m_socket;
+
+ uint8_t m_inputBuffer[MAX_NDN_PACKET_SIZE];
+
+ typedef std::map< udp::Endpoint, shared_ptr<UdpFace> > ChannelFaceMap;
+ ChannelFaceMap m_channelFaces;
+
+ /**
+ * \brief If true, it means the function listen has already been called
+ */
+ bool m_isListening;
+
+};
+
+} // namespace nfd
+
+#endif // NFD_FACE_UDP_CHANNEL_HPP
diff --git a/daemon/face/udp-face.cpp b/daemon/face/udp-face.cpp
new file mode 100644
index 0000000..0ee40cc
--- /dev/null
+++ b/daemon/face/udp-face.cpp
@@ -0,0 +1,38 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#include "udp-face.hpp"
+
+namespace nfd {
+
+NFD_LOG_INCLASS_TEMPLATE_SPECIALIZATION_DEFINE(DatagramFace, UdpFace::protocol, "UdpFace");
+
+UdpFace::UdpFace(const shared_ptr<UdpFace::protocol::socket>& socket)
+ : DatagramFace<protocol>(socket)
+{
+}
+
+void
+UdpFace::handleFirstReceive(const uint8_t* buffer,
+ std::size_t nBytesReceived,
+ const boost::system::error_code& error)
+{
+ //checking if the received message size is too big.
+ //This check is redundant, since in the actual implementation a packet
+ //cannot be bigger than MAX_NDN_PACKET_SIZE
+ if (!error && (nBytesReceived > MAX_NDN_PACKET_SIZE))
+ {
+ NFD_LOG_WARN("[id:" << this->getId()
+ << ",endpoint:" << m_socket->local_endpoint()
+ << "] Received message too big. Maximum size is "
+ << MAX_NDN_PACKET_SIZE );
+ return;
+ }
+ receiveDatagram(buffer, nBytesReceived, error);
+}
+
+
+} // namespace nfd
diff --git a/daemon/face/udp-face.hpp b/daemon/face/udp-face.hpp
new file mode 100644
index 0000000..126419f
--- /dev/null
+++ b/daemon/face/udp-face.hpp
@@ -0,0 +1,40 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#ifndef NFD_FACE_UDP_FACE_HPP
+#define NFD_FACE_UDP_FACE_HPP
+
+#include "datagram-face.hpp"
+
+namespace nfd
+{
+
+/**
+ * \brief Implementation of Face abstraction that uses UDP
+ * as underlying transport mechanism
+ */
+class UdpFace : public DatagramFace<boost::asio::ip::udp>
+{
+public:
+ typedef boost::asio::ip::udp protocol;
+
+ explicit
+ UdpFace(const shared_ptr<protocol::socket>& socket);
+
+ //@todo if needed by other datagramFaces, it could be moved to datagram-face.hpp
+ /**
+ * \brief Manages the first datagram received by the UdpChannel socket set on listening
+ */
+ void
+ handleFirstReceive(const uint8_t* buffer,
+ std::size_t nBytesReceived,
+ const boost::system::error_code& error);
+
+};
+
+} // namespace nfd
+
+#endif // NFD_FACE_UDP_FACE_HPP
diff --git a/daemon/face/udp-factory.cpp b/daemon/face/udp-factory.cpp
new file mode 100644
index 0000000..8dafb57
--- /dev/null
+++ b/daemon/face/udp-factory.cpp
@@ -0,0 +1,216 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#include "udp-factory.hpp"
+#include "core/global-io.hpp"
+#include "core/resolver.hpp"
+
+namespace nfd {
+
+using namespace boost::asio;
+
+NFD_LOG_INIT("UdpFactory");
+
+UdpFactory::UdpFactory(const std::string& defaultPort/* = "6363"*/)
+ : m_defaultPort(defaultPort)
+{
+}
+
+shared_ptr<UdpChannel>
+UdpFactory::createChannel(const udp::Endpoint& endpoint,
+ const time::Duration& timeout)
+{
+ NFD_LOG_DEBUG("Creating unicast " << endpoint);
+
+ shared_ptr<UdpChannel> channel = findChannel(endpoint);
+ if (static_cast<bool>(channel))
+ return channel;
+
+
+ //checking if the endpoint is already in use for multicast face
+ shared_ptr<MulticastUdpFace> multicast = findMulticastFace(endpoint);
+ if (static_cast<bool>(multicast))
+ throw Error("Cannot create the requested UDP unicast channel, local "
+ "endpoint is already allocated for a UDP multicast face");
+
+ if (endpoint.address().is_multicast()) {
+ throw Error("This method is only for unicast channel. The provided "
+ "endpoint is multicast. Use createMulticastFace to "
+ "create a multicast face");
+ }
+
+ channel = make_shared<UdpChannel>(boost::cref(endpoint),
+ timeout);
+ m_channels[endpoint] = channel;
+
+ return channel;
+}
+
+shared_ptr<UdpChannel>
+UdpFactory::createChannel(const std::string& localHost,
+ const std::string& localPort,
+ const time::Duration& timeout)
+{
+ return createChannel(UdpResolver::syncResolve(localHost, localPort),
+ timeout);
+}
+
+shared_ptr<MulticastUdpFace>
+UdpFactory::createMulticastFace(const udp::Endpoint& localEndpoint,
+ const udp::Endpoint& multicastEndpoint)
+{
+ //checking if the local and musticast endpoint are already in use for a multicast face
+ shared_ptr<MulticastUdpFace> multicastFace = findMulticastFace(localEndpoint);
+ if (static_cast<bool>(multicastFace)) {
+ if (multicastFace->getMulticastGroup() == multicastEndpoint)
+ return multicastFace;
+ else
+ throw Error("Cannot create the requested UDP multicast face, local "
+ "endpoint is already allocated for a UDP multicast face "
+ "on a different multicast group");
+ }
+
+ //checking if the local endpoint is already in use for an unicast channel
+ shared_ptr<UdpChannel> unicast = findChannel(localEndpoint);
+ if (static_cast<bool>(unicast)) {
+ throw Error("Cannot create the requested UDP multicast face, local "
+ "endpoint is already allocated for a UDP unicast channel");
+ }
+
+ if (localEndpoint.address().is_v6() || multicastEndpoint.address().is_v6()) {
+ throw Error("IPv6 multicast is not supported yet. Please provide an IPv4 address");
+ }
+
+ if (localEndpoint.port() != multicastEndpoint.port()) {
+ throw Error("Cannot create the requested UDP multicast face, "
+ "both endpoints should have the same port number. ");
+ }
+
+ if (!multicastEndpoint.address().is_multicast()) {
+ throw Error("Cannot create the requested UDP multicast face, "
+ "the multicast group given as input is not a multicast address");
+ }
+
+ shared_ptr<ip::udp::socket> clientSocket =
+ make_shared<ip::udp::socket>(boost::ref(getGlobalIoService()));
+
+ clientSocket->open(multicastEndpoint.protocol());
+
+ clientSocket->set_option(ip::udp::socket::reuse_address(true));
+
+ try {
+ clientSocket->bind(multicastEndpoint);
+
+ if (localEndpoint.address() != ip::address::from_string("0.0.0.0")) {
+ clientSocket->set_option(ip::multicast::outbound_interface(localEndpoint.address().to_v4()));
+ }
+ clientSocket->set_option(ip::multicast::join_group(multicastEndpoint.address().to_v4(),
+ localEndpoint.address().to_v4()));
+ }
+ catch (boost::system::system_error& e) {
+ std::stringstream msg;
+ msg << "Failed to properly configure the socket, check the address (" << e.what() << ")";
+ throw Error(msg.str());
+ }
+
+ clientSocket->set_option(ip::multicast::enable_loopback(false));
+
+ multicastFace = make_shared<MulticastUdpFace>(boost::cref(clientSocket));
+ multicastFace->onFail += bind(&UdpFactory::afterFaceFailed, this, localEndpoint);
+
+ m_multicastFaces[localEndpoint] = multicastFace;
+
+ return multicastFace;
+}
+
+shared_ptr<MulticastUdpFace>
+UdpFactory::createMulticastFace(const std::string& localIp,
+ const std::string& multicastIp,
+ const std::string& multicastPort)
+{
+
+ return createMulticastFace(UdpResolver::syncResolve(localIp,
+ multicastPort),
+ UdpResolver::syncResolve(multicastIp,
+ multicastPort));
+}
+
+void
+UdpFactory::createFace(const FaceUri& uri,
+ const FaceCreatedCallback& onCreated,
+ const FaceConnectFailedCallback& onConnectFailed)
+{
+ resolver::AddressSelector addressSelector = resolver::AnyAddress();
+ if (uri.getScheme() == "udp4")
+ addressSelector = resolver::Ipv4Address();
+ else if (uri.getScheme() == "udp6")
+ addressSelector = resolver::Ipv6Address();
+
+ UdpResolver::asyncResolve(uri.getDomain(),
+ uri.getPort().empty() ? m_defaultPort : uri.getPort(),
+ bind(&UdpFactory::continueCreateFaceAfterResolve, this, _1,
+ onCreated, onConnectFailed),
+ onConnectFailed,
+ addressSelector);
+
+}
+
+void
+UdpFactory::continueCreateFaceAfterResolve(const udp::Endpoint& endpoint,
+ const FaceCreatedCallback& onCreated,
+ const FaceConnectFailedCallback& onConnectFailed)
+{
+ if (endpoint.address().is_multicast()) {
+ onConnectFailed("The provided address is multicast. Please use createMulticastFace method");
+ return;
+ }
+
+ // very simple logic for now
+
+ for (ChannelMap::iterator channel = m_channels.begin();
+ channel != m_channels.end();
+ ++channel)
+ {
+ if ((channel->first.address().is_v4() && endpoint.address().is_v4()) ||
+ (channel->first.address().is_v6() && endpoint.address().is_v6()))
+ {
+ channel->second->connect(endpoint, onCreated);
+ return;
+ }
+ }
+ onConnectFailed("No channels available to connect to "
+ + boost::lexical_cast<std::string>(endpoint));
+}
+
+shared_ptr<UdpChannel>
+UdpFactory::findChannel(const udp::Endpoint& localEndpoint)
+{
+ ChannelMap::iterator i = m_channels.find(localEndpoint);
+ if (i != m_channels.end())
+ return i->second;
+ else
+ return shared_ptr<UdpChannel>();
+}
+
+shared_ptr<MulticastUdpFace>
+UdpFactory::findMulticastFace(const udp::Endpoint& localEndpoint)
+{
+ MulticastFaceMap::iterator i = m_multicastFaces.find(localEndpoint);
+ if (i != m_multicastFaces.end())
+ return i->second;
+ else
+ return shared_ptr<MulticastUdpFace>();
+}
+
+void
+UdpFactory::afterFaceFailed(udp::Endpoint& endpoint)
+{
+ NFD_LOG_DEBUG("afterFaceFailed: " << endpoint);
+ m_multicastFaces.erase(endpoint);
+}
+
+
+} // namespace nfd
diff --git a/daemon/face/udp-factory.hpp b/daemon/face/udp-factory.hpp
new file mode 100644
index 0000000..695d1e5
--- /dev/null
+++ b/daemon/face/udp-factory.hpp
@@ -0,0 +1,171 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#ifndef NFD_FACE_UDP_FACTORY_HPP
+#define NFD_FACE_UDP_FACTORY_HPP
+
+#include "protocol-factory.hpp"
+#include "udp-channel.hpp"
+#include "multicast-udp-face.hpp"
+
+
+namespace nfd {
+
+// @todo The multicast support for ipv6 must be implemented
+
+class UdpFactory : public ProtocolFactory
+{
+public:
+ /**
+ * \brief Exception of UdpFactory
+ */
+ struct Error : public ProtocolFactory::Error
+ {
+ Error(const std::string& what) : ProtocolFactory::Error(what) {}
+ };
+
+ explicit
+ UdpFactory(const std::string& defaultPort = "6363");
+
+ /**
+ * \brief Create UDP-based channel using udp::Endpoint
+ *
+ * udp::Endpoint is really an alias for boost::asio::ip::udp::endpoint.
+ *
+ * If this method called twice with the same endpoint, only one channel
+ * will be created. The second call will just retrieve the existing
+ * channel.
+ *
+ * If a multicast face is already active on the same local endpoint,
+ * the creation fails and an exception is thrown
+ *
+ * Once a face is created, if it doesn't send/receive anything for
+ * a period of time equal to timeout, it will be destroyed
+ * @todo this funcionality has to be implemented
+ *
+ * \returns always a valid pointer to a UdpChannel object, an exception
+ * is thrown if it cannot be created.
+ *
+ * \throws UdpFactory::Error
+ *
+ * \see http://www.boost.org/doc/libs/1_42_0/doc/html/boost_asio/reference/ip__udp/endpoint.html
+ * for details on ways to create udp::Endpoint
+ */
+ shared_ptr<UdpChannel>
+ createChannel(const udp::Endpoint& localEndpoint,
+ const time::Duration& timeout = time::seconds(600));
+
+ /**
+ * \brief Create UDP-based channel using specified host and port number
+ *
+ * This method will attempt to resolve the provided host and port numbers
+ * and will throw UdpFactory::Error when channel cannot be created.
+ *
+ * Note that this call will **BLOCK** until resolution is done or failed.
+ *
+ * If localHost is a IPv6 address of a specific device, it must be in the form:
+ * ip address%interface name
+ * Example: fe80::5e96:9dff:fe7d:9c8d%en1
+ * Otherwise, you can use ::
+ *
+ * Once a face is created, if it doesn't send/receive anything for
+ * a period of time equal to timeout, it will be destroyed
+ * @todo this funcionality has to be implemented
+ *
+ * \throws UdpChannel::Error if the bind on the socket fails
+ * \throws UdpFactory::Error
+ */
+ shared_ptr<UdpChannel>
+ createChannel(const std::string& localHost,
+ const std::string& localPort,
+ const time::Duration& timeout = time::seconds(600));
+
+ /**
+ * \brief Create MulticastUdpFace using udp::Endpoint
+ *
+ * udp::Endpoint is really an alias for boost::asio::ip::udp::endpoint.
+ *
+ * The face will join the multicast group
+ *
+ * If this method called twice with the same endpoint and group, only one face
+ * will be created. The second call will just retrieve the existing
+ * channel.
+ *
+ * If an unicast face is already active on the same local NIC and port, the
+ * creation fails and an exception is thrown
+ *
+ * \returns always a valid pointer to a MulticastUdpFace object, an exception
+ * is thrown if it cannot be created.
+ *
+ * \throws UdpFactory::Error
+ *
+ * \see http://www.boost.org/doc/libs/1_42_0/doc/html/boost_asio/reference/ip__udp/endpoint.html
+ * for details on ways to create udp::Endpoint
+ */
+ shared_ptr<MulticastUdpFace>
+ createMulticastFace(const udp::Endpoint& localEndpoint,
+ const udp::Endpoint& multicastEndpoint);
+
+ shared_ptr<MulticastUdpFace>
+ createMulticastFace(const std::string& localIp,
+ const std::string& multicastIp,
+ const std::string& multicastPort);
+
+ // from Factory
+ virtual void
+ createFace(const FaceUri& uri,
+ const FaceCreatedCallback& onCreated,
+ const FaceConnectFailedCallback& onConnectFailed);
+
+protected:
+ typedef std::map< udp::Endpoint, shared_ptr<MulticastUdpFace> > MulticastFaceMap;
+
+ /**
+ * \brief Keeps tracking of the MulticastUdpFace created
+ */
+ MulticastFaceMap m_multicastFaces;
+
+private:
+ void
+ afterFaceFailed(udp::Endpoint& endpoint);
+
+ /**
+ * \brief Look up UdpChannel using specified local endpoint
+ *
+ * \returns shared pointer to the existing UdpChannel object
+ * or empty shared pointer when such channel does not exist
+ *
+ * \throws never
+ */
+ shared_ptr<UdpChannel>
+ findChannel(const udp::Endpoint& localEndpoint);
+
+
+ /**
+ * \brief Look up multicast UdpFace using specified local endpoint
+ *
+ * \returns shared pointer to the existing multicast MulticastUdpFace object
+ * or empty shared pointer when such face does not exist
+ *
+ * \throws never
+ */
+ shared_ptr<MulticastUdpFace>
+ findMulticastFace(const udp::Endpoint& localEndpoint);
+
+ void
+ continueCreateFaceAfterResolve(const udp::Endpoint& endpoint,
+ const FaceCreatedCallback& onCreated,
+ const FaceConnectFailedCallback& onConnectFailed);
+
+ typedef std::map< udp::Endpoint, shared_ptr<UdpChannel> > ChannelMap;
+ ChannelMap m_channels;
+
+ std::string m_defaultPort;
+};
+
+} // namespace nfd
+
+#endif // NFD_FACE_UDP_FACTORY_HPP
diff --git a/tests/face/udp.cpp b/tests/face/udp.cpp
new file mode 100644
index 0000000..02c4319
--- /dev/null
+++ b/tests/face/udp.cpp
@@ -0,0 +1,869 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (C) 2014 Named Data Networking Project
+ * See COPYING for copyright and distribution information.
+ */
+
+#include "face/udp-factory.hpp"
+#include "core/scheduler.hpp"
+#include "core/face-uri.hpp"
+#include "tests/test-common.hpp"
+
+#include <ndn-cpp-dev/security/key-chain.hpp>
+#include <boost/asio.hpp>
+
+namespace nfd {
+namespace tests {
+
+BOOST_FIXTURE_TEST_SUITE(FaceUdp, BaseFixture)
+
+class FactoryErrorCheck : protected BaseFixture
+{
+public:
+ bool isTheSameMulticastEndpoint(const UdpFactory::Error& e) {
+ return strcmp(e.what(),
+ "Cannot create the requested UDP unicast channel, local "
+ "endpoint is already allocated for a UDP multicast face") == 0;
+ }
+
+ bool isNotMulticastAddress(const UdpFactory::Error& e) {
+ return strcmp(e.what(),
+ "Cannot create the requested UDP multicast face, "
+ "the multicast group given as input is not a multicast address") == 0;
+ }
+
+ bool isTheSameUnicastEndpoint(const UdpFactory::Error& e) {
+ return strcmp(e.what(),
+ "Cannot create the requested UDP multicast face, local "
+ "endpoint is already allocated for a UDP unicast channel") == 0;
+ }
+
+ bool isLocalEndpointOnDifferentGroup(const UdpFactory::Error& e) {
+ return strcmp(e.what(),
+ "Cannot create the requested UDP multicast face, local "
+ "endpoint is already allocated for a UDP multicast face "
+ "on a different multicast group") == 0;
+ }
+};
+
+BOOST_FIXTURE_TEST_CASE(ChannelMapUdp, FactoryErrorCheck)
+{
+ using boost::asio::ip::udp;
+
+ UdpFactory factory = UdpFactory();
+
+ //to instantiate multicast face on a specific ip address, change interfaceIp
+ std::string interfaceIp = "0.0.0.0";
+
+ shared_ptr<UdpChannel> channel1 = factory.createChannel("127.0.0.1", "20070");
+ shared_ptr<UdpChannel> channel1a = factory.createChannel("127.0.0.1", "20070");
+ BOOST_CHECK_EQUAL(channel1, channel1a);
+
+ shared_ptr<UdpChannel> channel2 = factory.createChannel("127.0.0.1", "20071");
+ BOOST_CHECK_NE(channel1, channel2);
+
+ shared_ptr<UdpChannel> channel3 = factory.createChannel(interfaceIp, "20070");
+
+ //same endpoint of a unicast channel
+ BOOST_CHECK_EXCEPTION(factory.createMulticastFace(interfaceIp,
+ "224.0.0.1",
+ "20070"),
+ UdpFactory::Error,
+ isTheSameUnicastEndpoint);
+
+
+ shared_ptr<MulticastUdpFace> multicastFace1 = factory.createMulticastFace(interfaceIp,
+ "224.0.0.1",
+ "20072");
+ shared_ptr<MulticastUdpFace> multicastFace1a = factory.createMulticastFace(interfaceIp,
+ "224.0.0.1",
+ "20072");
+ BOOST_CHECK_EQUAL(multicastFace1, multicastFace1a);
+
+
+ //same endpoint of a multicast face
+ BOOST_CHECK_EXCEPTION(factory.createChannel(interfaceIp, "20072"),
+ UdpFactory::Error,
+ isTheSameMulticastEndpoint);
+
+ //same multicast endpoint, different group
+ BOOST_CHECK_EXCEPTION(factory.createMulticastFace(interfaceIp,
+ "224.0.0.42",
+ "20072"),
+ UdpFactory::Error,
+ isLocalEndpointOnDifferentGroup);
+
+ BOOST_CHECK_EXCEPTION(factory.createMulticastFace(interfaceIp,
+ "192.168.10.15",
+ "20025"),
+ UdpFactory::Error,
+ isNotMulticastAddress);
+
+
+
+// //Test commented because it required to be run in a machine that can resolve ipv6 query
+// shared_ptr<UdpChannel> channel1v6 = factory.createChannel(//"::1",
+// "fe80::5e96:9dff:fe7d:9c8d%en1",
+// //"fe80::aa54:b2ff:fe08:27b8%wlan0",
+// "20070");
+//
+// //the creation of multicastFace2 works properly. It has been disable because it needs an IP address of
+// //an available network interface (different from the first one used)
+// shared_ptr<MulticastUdpFace> multicastFace2 = factory.createMulticastFace("192.168.1.17",
+// "224.0.0.1",
+// "20073");
+// BOOST_CHECK_NE(multicastFace1, multicastFace2);
+//
+//
+// //ipv6 - work in progress
+// shared_ptr<MulticastUdpFace> multicastFace3 = factory.createMulticastFace("fe80::5e96:9dff:fe7d:9c8d%en1",
+// "FF01:0:0:0:0:0:0:2",
+// "20073");
+//
+// shared_ptr<MulticastUdpFace> multicastFace4 = factory.createMulticastFace("fe80::aa54:b2ff:fe08:27b8%wlan0",
+// "FF01:0:0:0:0:0:0:2",
+// "20073");
+//
+// BOOST_CHECK_EQUAL(multicastFace3, multicastFace4);
+//
+// shared_ptr<MulticastUdpFace> multicastFace5 = factory.createMulticastFace("::1",
+// "FF01:0:0:0:0:0:0:2",
+// "20073");
+//
+// BOOST_CHECK_NE(multicastFace3, multicastFace5);
+//
+// //same local ipv6 endpoint for a different multicast group
+// BOOST_CHECK_THROW(factory.createMulticastFace("fe80::aa54:b2ff:fe08:27b8%wlan0",
+// "FE01:0:0:0:0:0:0:2",
+// "20073"),
+// UdpFactory::Error);
+//
+// //same local ipv6 (expect for th port number) endpoint for a different multicast group
+// BOOST_CHECK_THROW(factory.createMulticastFace("fe80::aa54:b2ff:fe08:27b8%wlan0",
+// "FE01:0:0:0:0:0:0:2",
+// "20075"),
+// UdpFactory::Error);
+//
+// BOOST_CHECK_THROW(factory.createMulticastFace("fa80::20a:9dff:fef6:12ff",
+// "FE12:0:0:0:0:0:0:2",
+// "20075"),
+// UdpFactory::Error);
+//
+// //not a multicast ipv6
+// BOOST_CHECK_THROW(factory.createMulticastFace("fa80::20a:9dff:fef6:12ff",
+// "A112:0:0:0:0:0:0:2",
+// "20075"),
+// UdpFactory::Error);
+
+
+
+}
+
+class EndToEndFixture : protected BaseFixture
+{
+public:
+ void
+ channel1_onFaceCreated(const shared_ptr<UdpFace>& newFace)
+ {
+ BOOST_CHECK(!static_cast<bool>(m_face1));
+ m_face1 = newFace;
+ m_face1->onReceiveInterest +=
+ bind(&EndToEndFixture::face1_onReceiveInterest, this, _1);
+ m_face1->onReceiveData +=
+ bind(&EndToEndFixture::face1_onReceiveData, this, _1);
+ m_face1->onFail +=
+ bind(&EndToEndFixture::face1_onFail, this);
+ BOOST_CHECK_MESSAGE(true, "channel 1 face created");
+
+ m_faces.push_back(m_face1);
+
+ this->afterIo();
+ }
+
+ void
+ channel1_onConnectFailed(const std::string& reason)
+ {
+ BOOST_CHECK_MESSAGE(false, reason);
+
+ this->afterIo();
+ }
+
+ void
+ face1_onReceiveInterest(const Interest& interest)
+ {
+ m_face1_receivedInterests.push_back(interest);
+
+ this->afterIo();
+ }
+
+ void
+ face1_onReceiveData(const Data& data)
+ {
+ m_face1_receivedDatas.push_back(data);
+
+ this->afterIo();
+ }
+
+ void
+ face1_onFail()
+ {
+ m_face1.reset();
+ this->afterIo();
+ }
+
+ void
+ channel2_onFaceCreated(const shared_ptr<Face>& newFace)
+ {
+ BOOST_CHECK(!static_cast<bool>(m_face2));
+ m_face2 = newFace;
+ m_face2->onReceiveInterest +=
+ bind(&EndToEndFixture::face2_onReceiveInterest, this, _1);
+ m_face2->onReceiveData +=
+ bind(&EndToEndFixture::face2_onReceiveData, this, _1);
+ m_face2->onFail +=
+ bind(&EndToEndFixture::face2_onFail, this);
+
+ m_faces.push_back(m_face2);
+
+ BOOST_CHECK_MESSAGE(true, "channel 2 face created");
+ this->afterIo();
+ }
+
+ void
+ channel2_onConnectFailed(const std::string& reason)
+ {
+ BOOST_CHECK_MESSAGE(false, reason);
+
+ this->afterIo();
+ }
+
+ void
+ face2_onReceiveInterest(const Interest& interest)
+ {
+ m_face2_receivedInterests.push_back(interest);
+
+ this->afterIo();
+ }
+
+ void
+ face2_onReceiveData(const Data& data)
+ {
+ m_face2_receivedDatas.push_back(data);
+
+ this->afterIo();
+ }
+
+ void
+ face2_onFail()
+ {
+ m_face2.reset();
+ this->afterIo();
+ }
+
+ void
+ channel3_onFaceCreated(const shared_ptr<Face>& newFace)
+ {
+ BOOST_CHECK(!static_cast<bool>(m_face1));
+ m_face3 = newFace;
+ m_faces.push_back(newFace);
+
+ this->afterIo();
+ }
+
+
+ void
+ channel_onFaceCreated(const shared_ptr<UdpFace>& newFace)
+ {
+ m_faces.push_back(newFace);
+ this->afterIo();
+ }
+
+ void
+ channel_onConnectFailed(const std::string& reason)
+ {
+ BOOST_CHECK_MESSAGE(false, reason);
+
+ this->afterIo();
+ }
+
+ void
+ channel_onConnectFailedOk(const std::string& reason)
+ {
+ //it's ok, it was supposed to fail
+ this->afterIo();
+ }
+
+ void
+ checkFaceList(size_t shouldBe)
+ {
+ BOOST_CHECK_EQUAL(m_faces.size(), shouldBe);
+ }
+
+ void
+ abortTestCase(const std::string& message)
+ {
+ g_io.stop();
+ BOOST_FAIL(message);
+ }
+
+private:
+ void
+ afterIo()
+ {
+ if (--m_ioRemaining <= 0) g_io.stop();
+ }
+
+public:
+ int m_ioRemaining;
+
+ shared_ptr<Face> m_face1;
+ std::vector<Interest> m_face1_receivedInterests;
+ std::vector<Data> m_face1_receivedDatas;
+ shared_ptr<Face> m_face2;
+ std::vector<Interest> m_face2_receivedInterests;
+ std::vector<Data> m_face2_receivedDatas;
+
+ shared_ptr<Face> m_face3;
+
+
+ std::list< shared_ptr<Face> > m_faces;
+};
+
+
+BOOST_FIXTURE_TEST_CASE(EndToEnd, EndToEndFixture)
+{
+ UdpFactory factory = UdpFactory();
+ Scheduler scheduler(g_io); // to limit the amount of time the test may take
+
+ EventId abortEvent =
+ scheduler.scheduleEvent(time::seconds(1),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot connect or cannot accept connection"));
+
+ m_ioRemaining = 1;
+
+ shared_ptr<UdpChannel> channel2 = factory.createChannel("127.0.0.1", "20071");
+
+
+ //channel2->connect("127.0.0.1", "20070",
+ // bind(&EndToEndFixture::channel2_onFaceCreated, this, _1),
+ // bind(&EndToEndFixture::channel2_onConnectFailed, this, _1));
+
+ factory.createFace(FaceUri("udp4://127.0.0.1:20070"),
+ bind(&EndToEndFixture::channel2_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel2_onConnectFailed, this, _1));
+
+
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_REQUIRE(static_cast<bool>(m_face2));
+
+ shared_ptr<UdpChannel> channel1 = factory.createChannel("127.0.0.1", "20070");
+ channel1->listen(bind(&EndToEndFixture::channel1_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel1_onConnectFailed, this, _1));
+
+
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(1),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot send or receive Interest/Data packets"));
+
+ Interest interest1("ndn:/TpnzGvW9R");
+ Data data1 ("ndn:/KfczhUqVix");
+ data1.setContent(0, 0);
+ Interest interest2("ndn:/QWiIMfj5sL");
+ Data data2 ("ndn:/XNBV796f");
+ data2.setContent(0, 0);
+ Interest interest3("ndn:/QWiIhjgkj5sL");
+ Data data3 ("ndn:/XNBV794f");
+ data3.setContent(0, 0);
+
+
+ ndn::SignatureSha256WithRsa fakeSignature;
+ fakeSignature.setValue(ndn::dataBlock(tlv::SignatureValue,
+ reinterpret_cast<const uint8_t*>(0),
+ 0));
+
+ // set fake signature on data1 and data2
+ data1.setSignature(fakeSignature);
+ data2.setSignature(fakeSignature);
+ data3.setSignature(fakeSignature);
+
+ m_face2->sendInterest(interest2);
+ m_face2->sendData (data2 );
+
+ m_ioRemaining = 3; //2 send + 1 listen return
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_REQUIRE(static_cast<bool>(m_face1));
+
+ m_face1->sendInterest(interest1);
+ m_face1->sendData (data1 );
+
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(1),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot send or receive Interest/Data packets"));
+ m_ioRemaining = 2;
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+
+ BOOST_REQUIRE_EQUAL(m_face1_receivedInterests.size(), 1);
+ BOOST_REQUIRE_EQUAL(m_face1_receivedDatas .size(), 1);
+ BOOST_REQUIRE_EQUAL(m_face2_receivedInterests.size(), 1);
+ BOOST_REQUIRE_EQUAL(m_face2_receivedDatas .size(), 1);
+
+ BOOST_CHECK_EQUAL(m_face1_receivedInterests[0].getName(), interest2.getName());
+ BOOST_CHECK_EQUAL(m_face1_receivedDatas [0].getName(), data2.getName());
+ BOOST_CHECK_EQUAL(m_face2_receivedInterests[0].getName(), interest1.getName());
+ BOOST_CHECK_EQUAL(m_face2_receivedDatas [0].getName(), data1.getName());
+
+
+
+ //checking if the connection accepting mechanism works properly.
+
+ m_face2->sendData (data3 );
+ m_face2->sendInterest(interest3);
+
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(1),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot send or receive Interest/Data packets"));
+ m_ioRemaining = 2;
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_REQUIRE_EQUAL(m_face1_receivedInterests.size(), 2);
+ BOOST_REQUIRE_EQUAL(m_face1_receivedDatas .size(), 2);
+
+ BOOST_CHECK_EQUAL(m_face1_receivedInterests[1].getName(), interest3.getName());
+ BOOST_CHECK_EQUAL(m_face1_receivedDatas [1].getName(), data3.getName());
+
+}
+
+BOOST_FIXTURE_TEST_CASE(MultipleAccepts, EndToEndFixture)
+{
+ Interest interest1("ndn:/TpnzGvW9R");
+ Interest interest2("ndn:/QWiIMfj5sL");
+ Interest interest3("ndn:/QWiIhjgkj5sL");
+
+ UdpFactory factory = UdpFactory();
+ Scheduler scheduler(g_io); // to limit the amount of time the test may take
+
+ EventId abortEvent =
+ scheduler.scheduleEvent(time::seconds(4),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot connect or cannot accept connection"));
+
+ shared_ptr<UdpChannel> channel1 = factory.createChannel("127.0.0.1", "20070");
+ shared_ptr<UdpChannel> channel2 = factory.createChannel("127.0.0.1", "20071");
+
+
+
+ channel1->listen(bind(&EndToEndFixture::channel_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel_onConnectFailed, this, _1));
+
+ channel2->connect("127.0.0.1", "20070",
+ bind(&EndToEndFixture::channel2_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel_onConnectFailed, this, _1));
+
+
+ m_ioRemaining = 1;
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_CHECK_EQUAL(m_faces.size(), 1);
+
+ shared_ptr<UdpChannel> channel3 = factory.createChannel("127.0.0.1", "20072");
+ channel3->connect("127.0.0.1", "20070",
+ bind(&EndToEndFixture::channel3_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel_onConnectFailed, this, _1));
+
+ shared_ptr<UdpChannel> channel4 = factory.createChannel("127.0.0.1", "20073");
+
+ BOOST_CHECK_NE(channel3, channel4);
+
+ scheduler
+ .scheduleEvent
+ (time::seconds(0.5),
+ bind(&UdpChannel::connect, channel4,
+ "127.0.0.1", "20070",
+ // does not work without static_cast
+ static_cast<UdpChannel::FaceCreatedCallback>(bind(&EndToEndFixture::
+ channel_onFaceCreated,
+ this,
+ _1)),
+ static_cast<UdpChannel::ConnectFailedCallback>(bind(&EndToEndFixture::
+ channel_onConnectFailed,
+ this,
+ _1))));
+
+ m_ioRemaining = 2; // 2 connects
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(4),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot connect or cannot accept multiple connections"));
+
+ scheduler.scheduleEvent(time::seconds(0.4),
+ bind(&EndToEndFixture::checkFaceList, this, 2));
+
+ g_io.run();
+ g_io.reset();
+
+ BOOST_CHECK_EQUAL(m_faces.size(), 3);
+
+
+ m_face2->sendInterest(interest1);
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(1),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot send or receive Interest/Data packets"));
+ m_ioRemaining = 1;
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_CHECK_EQUAL(m_faces.size(), 4);
+
+ m_face3->sendInterest(interest2);
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(1),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot send or receive Interest/Data packets"));
+ m_ioRemaining = 1;
+ g_io.run();
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ //channel1 should have created 2 faces, one when m_face2 sent an interest, one when m_face3 sent an interest
+ BOOST_CHECK_EQUAL(m_faces.size(), 5);
+ BOOST_CHECK_THROW(channel1->listen(bind(&EndToEndFixture::channel_onFaceCreated,
+ this,
+ _1),
+ bind(&EndToEndFixture::channel_onConnectFailedOk,
+ this,
+ _1)),
+ UdpChannel::Error);
+}
+
+//Test commented because it required to be run in a machine that can resolve ipv6 query
+//BOOST_FIXTURE_TEST_CASE(EndToEndIpv6, EndToEndFixture)
+//{
+// UdpFactory factory = UdpFactory();
+// Scheduler scheduler(g_io); // to limit the amount of time the test may take
+//
+// EventId abortEvent =
+// scheduler.scheduleEvent(time::seconds(1),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot connect or cannot accept connection"));
+//
+// m_ioRemaining = 1;
+//
+// shared_ptr<UdpChannel> channel1 = factory.createChannel("::1", "20070");
+// shared_ptr<UdpChannel> channel2 = factory.createChannel("::1", "20071");
+//
+// channel1->listen(bind(&EndToEndFixture::channel1_onFaceCreated, this, _1),
+// bind(&EndToEndFixture::channel1_onConnectFailed, this, _1));
+//
+// channel2->connect("::1", "20070",
+// bind(&EndToEndFixture::channel2_onFaceCreated, this, _1),
+// bind(&EndToEndFixture::channel2_onConnectFailed, this, _1));
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_REQUIRE(static_cast<bool>(m_face2));
+//
+// abortEvent =
+// scheduler.scheduleEvent(time::seconds(1),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot send or receive Interest/Data packets"));
+//
+// Interest interest1("ndn:/TpnzGvW9R");
+// Data data1 ("ndn:/KfczhUqVix");
+// data1.setContent(0, 0);
+// Interest interest2("ndn:/QWiIMfj5sL");
+// Data data2 ("ndn:/XNBV796f");
+// data2.setContent(0, 0);
+// Interest interest3("ndn:/QWiIhjgkj5sL");
+// Data data3 ("ndn:/XNBV794f");
+// data3.setContent(0, 0);
+//
+// ndn::SignatureSha256WithRsa fakeSignature;
+// fakeSignature.setValue(ndn::dataBlock(tlv::SignatureValue, reinterpret_cast<const uint8_t*>(0), 0));
+//
+// // set fake signature on data1 and data2
+// data1.setSignature(fakeSignature);
+// data2.setSignature(fakeSignature);
+// data3.setSignature(fakeSignature);
+//
+// m_face2->sendInterest(interest2);
+// m_face2->sendData (data2 );
+//
+// m_ioRemaining = 3; //2 send + 1 listen return
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_REQUIRE(static_cast<bool>(m_face1));
+//
+// m_face1->sendInterest(interest1);
+// m_face1->sendData (data1 );
+//
+// abortEvent =
+// scheduler.scheduleEvent(time::seconds(1),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot send or receive Interest/Data packets"));
+// m_ioRemaining = 2;
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_REQUIRE_EQUAL(m_face1_receivedInterests.size(), 1);
+// BOOST_REQUIRE_EQUAL(m_face1_receivedDatas .size(), 1);
+// BOOST_REQUIRE_EQUAL(m_face2_receivedInterests.size(), 1);
+// BOOST_REQUIRE_EQUAL(m_face2_receivedDatas .size(), 1);
+//
+// BOOST_CHECK_EQUAL(m_face1_receivedInterests[0].getName(), interest2.getName());
+// BOOST_CHECK_EQUAL(m_face1_receivedDatas [0].getName(), data2.getName());
+// BOOST_CHECK_EQUAL(m_face2_receivedInterests[0].getName(), interest1.getName());
+// BOOST_CHECK_EQUAL(m_face2_receivedDatas [0].getName(), data1.getName());
+//
+// //checking if the connection accepting mechanism works properly.
+//
+// m_face2->sendData (data3 );
+// m_face2->sendInterest(interest3);
+//
+// abortEvent =
+// scheduler.scheduleEvent(time::seconds(1),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot send or receive Interest/Data packets"));
+// m_ioRemaining = 2;
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_REQUIRE_EQUAL(m_face1_receivedInterests.size(), 2);
+// BOOST_REQUIRE_EQUAL(m_face1_receivedDatas .size(), 2);
+//
+// BOOST_CHECK_EQUAL(m_face1_receivedInterests[1].getName(), interest3.getName());
+// BOOST_CHECK_EQUAL(m_face1_receivedDatas [1].getName(), data3.getName());
+//
+//}
+
+
+//Test commented because it required to be run in a machine that can resolve ipv6 query
+//BOOST_FIXTURE_TEST_CASE(MultipleAcceptsIpv6, EndToEndFixture)
+//{
+// Interest interest1("ndn:/TpnzGvW9R");
+// Interest interest2("ndn:/QWiIMfj5sL");
+// Interest interest3("ndn:/QWiIhjgkj5sL");
+//
+// UdpFactory factory = UdpFactory();
+// Scheduler scheduler(g_io); // to limit the amount of time the test may take
+//
+// EventId abortEvent =
+// scheduler.scheduleEvent(time::seconds(4),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot connect or cannot accept connection"));
+//
+// shared_ptr<UdpChannel> channel1 = factory.createChannel("::1", "20070");
+// shared_ptr<UdpChannel> channel2 = factory.createChannel("::1", "20071");
+//
+// channel1->listen(bind(&EndToEndFixture::channel_onFaceCreated, this, _1),
+// bind(&EndToEndFixture::channel_onConnectFailed, this, _1));
+//
+// channel2->connect("::1", "20070",
+// bind(&EndToEndFixture::channel2_onFaceCreated, this, _1),
+// bind(&EndToEndFixture::channel_onConnectFailed, this, _1));
+//
+// m_ioRemaining = 1;
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_CHECK_EQUAL(m_faces.size(), 1);
+//
+// shared_ptr<UdpChannel> channel3 = factory.createChannel("::1", "20072");
+// channel3->connect("::1", "20070",
+// bind(&EndToEndFixture::channel3_onFaceCreated, this, _1),
+// bind(&EndToEndFixture::channel_onConnectFailed, this, _1));
+//
+// shared_ptr<UdpChannel> channel4 = factory.createChannel("::1", "20073");
+//
+// BOOST_CHECK_NE(channel3, channel4);
+//
+// scheduler
+// .scheduleEvent(time::seconds(0.5),
+// bind(&UdpChannel::connect, channel4,
+// "::1", "20070",
+// static_cast<UdpChannel::FaceCreatedCallback>(bind(&EndToEndFixture::
+// channel_onFaceCreated, this, _1)),
+// static_cast<UdpChannel::ConnectFailedCallback>(bind(&EndToEndFixture::
+// channel_onConnectFailed, this, _1))));
+//
+// m_ioRemaining = 2; // 2 connects
+// abortEvent =
+// scheduler.scheduleEvent(time::seconds(4),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot connect or cannot accept multiple connections"));
+//
+// scheduler.scheduleEvent(time::seconds(0.4),
+// bind(&EndToEndFixture::checkFaceList, this, 2));
+//
+// g_io.run();
+// g_io.reset();
+//
+// BOOST_CHECK_EQUAL(m_faces.size(), 3);
+//
+//
+// m_face2->sendInterest(interest1);
+// abortEvent =
+// scheduler.scheduleEvent(time::seconds(1),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot send or receive Interest/Data packets"));
+// m_ioRemaining = 1;
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_CHECK_EQUAL(m_faces.size(), 4);
+//
+// m_face3->sendInterest(interest2);
+// abortEvent =
+// scheduler.scheduleEvent(time::seconds(1),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "UdpChannel error: cannot send or receive Interest/Data packets"));
+// m_ioRemaining = 1;
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// //channel1 should have created 2 faces, one when m_face2 sent an interest, one when m_face3 sent an interest
+// BOOST_CHECK_EQUAL(m_faces.size(), 5);
+// BOOST_CHECK_THROW(channel1->listen(bind(&EndToEndFixture::channel_onFaceCreated,
+// this,
+// _1),
+// bind(&EndToEndFixture::channel_onConnectFailedOk,
+// this,
+// _1)),
+// UdpChannel::Error);
+//}
+
+
+BOOST_FIXTURE_TEST_CASE(FaceClosing, EndToEndFixture)
+{
+ UdpFactory factory = UdpFactory();
+ Scheduler scheduler(g_io); // to limit the amount of time the test may take
+
+ EventId abortEvent =
+ scheduler.scheduleEvent(time::seconds(4),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "UdpChannel error: cannot connect or cannot accept connection"));
+
+ shared_ptr<UdpChannel> channel1 = factory.createChannel("127.0.0.1", "20070");
+ shared_ptr<UdpChannel> channel2 = factory.createChannel("127.0.0.1", "20071");
+
+ channel1->listen(bind(&EndToEndFixture::channel1_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel1_onConnectFailed, this, _1));
+
+ channel2->connect("127.0.0.1", "20070",
+ bind(&EndToEndFixture::channel2_onFaceCreated, this, _1),
+ bind(&EndToEndFixture::channel2_onConnectFailed, this, _1));
+
+ m_ioRemaining = 1;
+ BOOST_REQUIRE_NO_THROW(g_io.run());
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_CHECK_EQUAL(channel2->size(), 1);
+
+ abortEvent =
+ scheduler.scheduleEvent(time::seconds(4),
+ bind(&EndToEndFixture::abortTestCase, this,
+ "FaceClosing error: cannot properly close faces"));
+
+ BOOST_CHECK(static_cast<bool>(m_face2));
+
+ m_ioRemaining = 1;
+ // just double check that we are calling the virtual method
+ static_pointer_cast<Face>(m_face2)->close();
+
+ BOOST_REQUIRE_NO_THROW(g_io.run());
+ g_io.reset();
+ scheduler.cancelEvent(abortEvent);
+
+ BOOST_CHECK(!static_cast<bool>(m_face2));
+
+ BOOST_CHECK_EQUAL(channel2->size(), 0);
+}
+
+//BOOST_FIXTURE_TEST_CASE(MulticastFace, EndToEndFixture)
+//{
+// //to instantiate multicast face on a specific ip address, change interfaceIp
+// std::string interfaceIp = "0.0.0.0";
+//
+// UdpFactory factory = UdpFactory();
+// Scheduler scheduler(g_io); // to limit the amount of time the test may take
+//
+// shared_ptr<MulticastUdpFace> multicastFace1 =
+// factory.createMulticastFace(interfaceIp, "224.0.0.1", "20072");
+//
+// BOOST_REQUIRE(static_cast<bool>(multicastFace1));
+//
+// channel1_onFaceCreated(multicastFace1);
+//
+//
+// EventId abortEvent =
+// scheduler.scheduleEvent(time::seconds(10),
+// bind(&EndToEndFixture::abortTestCase, this,
+// "MulticastFace error: cannot send or receive Interest/Data packets"));
+//
+// Interest interest1("ndn:/TpnzGvW9R");
+// Data data1 ("ndn:/KfczhUqVix");
+// data1.setContent(0, 0);
+// Interest interest2("ndn:/QWiIMfj5sL");
+// Data data2 ("ndn:/XNBV796f");
+// data2.setContent(0, 0);
+// Interest interest3("ndn:/QWiIhjgkj5sL");
+// Data data3 ("ndn:/XNBV794f");
+// data3.setContent(0, 0);
+//
+//
+// ndn::SignatureSha256WithRsa fakeSignature;
+// fakeSignature.setValue(ndn::dataBlock(tlv::SignatureValue, reinterpret_cast<const uint8_t*>(0), 0));
+//
+// // set fake signature on data1 and data2
+// data1.setSignature(fakeSignature);
+// data2.setSignature(fakeSignature);
+// data3.setSignature(fakeSignature);
+//
+// multicastFace1->sendInterest(interest1);
+// multicastFace1->sendData (data1 );
+//
+// g_io.reset();
+// m_ioRemaining = 2;
+// g_io.run();
+// g_io.reset();
+// scheduler.cancelEvent(abortEvent);
+//
+// BOOST_REQUIRE_EQUAL(m_face1_receivedInterests.size(), 1);
+// BOOST_REQUIRE_EQUAL(m_face1_receivedDatas .size(), 1);
+//
+// BOOST_CHECK_EQUAL(m_face1_receivedInterests[0].getName(), interest1.getName());
+// BOOST_CHECK_EQUAL(m_face1_receivedDatas [0].getName(), data1.getName());
+//}
+
+
+
+BOOST_AUTO_TEST_SUITE_END()
+} // namespace tests
+} // namespace nfd