table: refactor NameTree hashtable

refs #3687

Change-Id: I908c1d7e67da861fc86a756fc0bc69b99ee696f7
diff --git a/daemon/table/measurements.cpp b/daemon/table/measurements.cpp
index 60e4f8d..c939b72 100644
--- a/daemon/table/measurements.cpp
+++ b/daemon/table/measurements.cpp
@@ -94,7 +94,7 @@
   }
 
   shared_ptr<name_tree::Entry> nteChild = m_nameTree.lookup(child);
-  shared_ptr<name_tree::Entry> nte = nteChild->getParent();
+  name_tree::Entry* nte = nteChild->getParent();
   BOOST_ASSERT(nte != nullptr);
   return &this->get(*nte);
 }
diff --git a/daemon/table/name-tree-entry.cpp b/daemon/table/name-tree-entry.cpp
index c0bc932..77b66ef 100644
--- a/daemon/table/name-tree-entry.cpp
+++ b/daemon/table/name-tree-entry.cpp
@@ -28,36 +28,45 @@
 namespace nfd {
 namespace name_tree {
 
-Node::Node()
-  : m_prev(nullptr)
-  , m_next(nullptr)
+Entry::Entry(const Name& name, Node* node)
+  : m_name(name)
+  , m_node(node)
+  , m_parent(nullptr)
 {
+  BOOST_ASSERT(node != nullptr);
 }
 
-Node::~Node()
+void
+Entry::setParent(Entry& entry)
 {
-  // erase the Name Tree Nodes that were created to
-  // resolve hash collisions
-  // So before erasing a single node, make sure its m_next == nullptr
-  // See eraseEntryIfEmpty in name-tree.cpp
-  if (m_next != nullptr)
-    delete m_next;
+  BOOST_ASSERT(this->getParent() == nullptr);
+  BOOST_ASSERT(!this->getName().empty());
+  BOOST_ASSERT(entry.getName() == this->getName().getPrefix(-1));
+
+  m_parent = &entry;
+
+  m_parent->m_children.push_back(this);
 }
 
-Entry::Entry(const Name& name)
-  : m_hash(0)
-  , m_prefix(name)
+void
+Entry::unsetParent()
 {
+  BOOST_ASSERT(this->getParent() != nullptr);
+
+  auto i = std::find(m_parent->m_children.begin(), m_parent->m_children.end(), this);
+  BOOST_ASSERT(i != m_parent->m_children.end());
+  m_parent->m_children.erase(i);
+
+  m_parent = nullptr;
 }
 
 bool
-Entry::isEmpty() const
+Entry::hasTableEntries() const
 {
-  return m_children.empty() &&
-         m_fibEntry == nullptr &&
-         m_pitEntries.empty() &&
-         m_measurementsEntry == nullptr &&
-         m_strategyChoiceEntry == nullptr;
+  return m_fibEntry != nullptr ||
+         !m_pitEntries.empty() ||
+         m_measurementsEntry != nullptr ||
+         m_strategyChoiceEntry != nullptr;
 }
 
 void
diff --git a/daemon/table/name-tree-entry.hpp b/daemon/table/name-tree-entry.hpp
index b0641d0..9875891 100644
--- a/daemon/table/name-tree-entry.hpp
+++ b/daemon/table/name-tree-entry.hpp
@@ -26,7 +26,7 @@
 #ifndef NFD_DAEMON_TABLE_NAME_TREE_ENTRY_HPP
 #define NFD_DAEMON_TABLE_NAME_TREE_ENTRY_HPP
 
-#include "core/common.hpp"
+#include "name-tree-hashtable.hpp"
 #include "table/fib-entry.hpp"
 #include "table/pit-entry.hpp"
 #include "table/measurements-entry.hpp"
@@ -35,66 +35,104 @@
 namespace nfd {
 namespace name_tree {
 
-class Node;
-class Entry;
-class NameTree;
-
-/**
- * \brief Name Tree Node Class
- */
-class Node
-{
-public:
-  Node();
-
-  ~Node();
-
-public:
-  // variables are in public as this is just a data structure
-  shared_ptr<Entry> m_entry; // Name Tree Entry (i.e., Name Prefix Entry)
-  Node* m_prev; // Previous Name Tree Node (to resolve hash collision)
-  Node* m_next; // Next Name Tree Node (to resolve hash collision)
-};
-
-/**
- * \brief Name Tree Entry Class
+/** \brief an entry in the name tree
  */
 class Entry : public enable_shared_from_this<Entry>, noncopyable
 {
 public:
-  explicit
-  Entry(const Name& prefix);
+  Entry(const Name& prefix, Node* node);
 
   const Name&
-  getPrefix() const;
+  getName() const
+  {
+    return m_name;
+  }
 
+  /// \deprecated
+  const Name&
+  getPrefix() const
+  {
+    return this->getName();
+  }
+
+  /** \return entry of getName().getPrefix(-1)
+   *  \retval nullptr this entry is the root entry, i.e. getName() == Name()
+   */
+  Entry*
+  getParent() const
+  {
+    return m_parent;
+  }
+
+  /** \brief set parent of this entry
+   *  \param entry entry of getName().getPrefix(-1)
+   *  \pre getParent() == nullptr
+   *  \post getParent() == &entry
+   *  \post entry.getChildren() contains this
+   */
   void
-  setHash(size_t hash);
+  setParent(Entry& entry);
 
-  size_t
-  getHash() const;
-
+  /** \brief unset parent of this entry
+   *  \post getParent() == nullptr
+   *  \post parent.getChildren() does not contain this
+   */
   void
-  setParent(shared_ptr<Entry> parent);
+  unsetParent();
 
-  shared_ptr<Entry>
-  getParent() const;
-
-  std::vector<shared_ptr<Entry>>&
-  getChildren();
-
+  /** \retval true this entry has at least one child
+   *  \retval false this entry has no children
+   */
   bool
-  hasChildren() const;
+  hasChildren() const
+  {
+    return !this->getChildren().empty();
+  }
 
+  /** \return children of this entry
+   */
+  const std::vector<Entry*>&
+  getChildren() const
+  {
+    return m_children;
+  }
+
+  /** \retval true this entry has no children and no table entries
+   *  \retval false this entry has child or attached table entry
+   */
   bool
-  isEmpty() const;
+  isEmpty() const
+  {
+    return !this->hasChildren() && !this->hasTableEntries();
+  }
 
 public: // attached table entries
+  /** \retval true at least one table entries is attached
+   *  \retval false no table entry is attached
+   */
+  bool
+  hasTableEntries() const;
+
+  fib::Entry*
+  getFibEntry() const
+  {
+    return m_fibEntry.get();
+  }
+
   void
   setFibEntry(unique_ptr<fib::Entry> fibEntry);
 
-  fib::Entry*
-  getFibEntry() const;
+  bool
+  hasPitEntries() const
+  {
+    return !this->getPitEntries().empty();
+  }
+
+  const std::vector<shared_ptr<pit::Entry>>&
+  getPitEntries() const
+  {
+    return m_pitEntries;
+  }
 
   void
   insertPitEntry(shared_ptr<pit::Entry> pitEntry);
@@ -102,118 +140,38 @@
   void
   erasePitEntry(shared_ptr<pit::Entry> pitEntry);
 
-  bool
-  hasPitEntries() const;
-
-  const std::vector<shared_ptr<pit::Entry>>&
-  getPitEntries() const;
+  measurements::Entry*
+  getMeasurementsEntry() const
+  {
+    return m_measurementsEntry.get();
+  }
 
   void
   setMeasurementsEntry(unique_ptr<measurements::Entry> measurementsEntry);
 
-  measurements::Entry*
-  getMeasurementsEntry() const;
+  strategy_choice::Entry*
+  getStrategyChoiceEntry() const
+  {
+    return m_strategyChoiceEntry.get();
+  }
 
   void
   setStrategyChoiceEntry(unique_ptr<strategy_choice::Entry> strategyChoiceEntry);
 
-  strategy_choice::Entry*
-  getStrategyChoiceEntry() const;
-
 private:
-  // Benefits of storing m_hash
-  // 1. m_hash is compared before m_prefix is compared
-  // 2. fast hash table resize support
-  size_t m_hash;
-  Name m_prefix;
-  shared_ptr<Entry> m_parent;     // Pointing to the parent entry.
-  std::vector<shared_ptr<Entry>> m_children; // Children pointers.
+  Name m_name;
+  Node* m_node;
+  Entry* m_parent;
+  std::vector<Entry*> m_children;
+
   unique_ptr<fib::Entry> m_fibEntry;
   std::vector<shared_ptr<pit::Entry>> m_pitEntries;
   unique_ptr<measurements::Entry> m_measurementsEntry;
   unique_ptr<strategy_choice::Entry> m_strategyChoiceEntry;
 
-  // get the Name Tree Node that is associated with this Name Tree Entry
-  Node* m_node;
-
-  friend class NameTree;
-  friend class FullEnumerationImpl;
-  friend class PartialEnumerationImpl;
-  friend class PrefixMatchImpl;
+  friend Node* getNode(const Entry& entry);
 };
 
-inline const Name&
-Entry::getPrefix() const
-{
-  return m_prefix;
-}
-
-inline size_t
-Entry::getHash() const
-{
-  return m_hash;
-}
-
-inline void
-Entry::setHash(size_t hash)
-{
-  m_hash = hash;
-}
-
-inline shared_ptr<Entry>
-Entry::getParent() const
-{
-  return m_parent;
-}
-
-inline void
-Entry::setParent(shared_ptr<Entry> parent)
-{
-  m_parent = parent;
-}
-
-inline std::vector<shared_ptr<name_tree::Entry>>&
-Entry::getChildren()
-{
-  return m_children;
-}
-
-inline bool
-Entry::hasChildren() const
-{
-  return !m_children.empty();
-}
-
-inline fib::Entry*
-Entry::getFibEntry() const
-{
-  return m_fibEntry.get();
-}
-
-inline bool
-Entry::hasPitEntries() const
-{
-  return !m_pitEntries.empty();
-}
-
-inline const std::vector<shared_ptr<pit::Entry>>&
-Entry::getPitEntries() const
-{
-  return m_pitEntries;
-}
-
-inline measurements::Entry*
-Entry::getMeasurementsEntry() const
-{
-  return m_measurementsEntry.get();
-}
-
-inline strategy_choice::Entry*
-Entry::getStrategyChoiceEntry() const
-{
-  return m_strategyChoiceEntry.get();
-}
-
 } // namespace name_tree
 } // namespace nfd
 
diff --git a/daemon/table/name-tree-hashtable.cpp b/daemon/table/name-tree-hashtable.cpp
new file mode 100644
index 0000000..1267f99
--- /dev/null
+++ b/daemon/table/name-tree-hashtable.cpp
@@ -0,0 +1,280 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (c) 2014-2016,  Regents of the University of California,
+ *                           Arizona Board of Regents,
+ *                           Colorado State University,
+ *                           University Pierre & Marie Curie, Sorbonne University,
+ *                           Washington University in St. Louis,
+ *                           Beijing Institute of Technology,
+ *                           The University of Memphis.
+ *
+ * This file is part of NFD (Named Data Networking Forwarding Daemon).
+ * See AUTHORS.md for complete list of NFD authors and contributors.
+ *
+ * NFD is free software: you can redistribute it and/or modify it under the terms
+ * of the GNU General Public License as published by the Free Software Foundation,
+ * either version 3 of the License, or (at your option) any later version.
+ *
+ * NFD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE.  See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * NFD, e.g., in COPYING.md file.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "name-tree-hashtable.hpp"
+#include "name-tree-entry.hpp"
+#include "core/logger.hpp"
+#include "core/city-hash.hpp"
+
+namespace nfd {
+namespace name_tree {
+
+NFD_LOG_INIT("NameTreeHashtable");
+
+class Hash32
+{
+public:
+  static HashValue
+  compute(const void* buffer, size_t length)
+  {
+    return static_cast<HashValue>(CityHash32(reinterpret_cast<const char*>(buffer), length));
+  }
+};
+
+class Hash64
+{
+public:
+  static HashValue
+  compute(const void* buffer, size_t length)
+  {
+    return static_cast<HashValue>(CityHash64(reinterpret_cast<const char*>(buffer), length));
+  }
+};
+
+/** \brief a type with compute static method to compute hash value from a raw buffer
+ */
+typedef std::conditional<(sizeof(HashValue) > 4), Hash64, Hash32>::type HashFunc;
+
+HashValue
+computeHash(const Name& name, ssize_t prefixLen)
+{
+  name.wireEncode(); // ensure wire buffer exists
+
+  HashValue h = 0;
+  for (size_t i = 0, last = prefixLen < 0 ? name.size() : prefixLen; i < last; ++i) {
+    const name::Component& comp = name[i];
+    h ^= HashFunc::compute(comp.wire(), comp.size());
+  }
+  return h;
+}
+
+HashSequence
+computeHashes(const Name& name)
+{
+  name.wireEncode(); // ensure wire buffer exists
+
+  HashSequence seq;
+  seq.reserve(name.size() + 1);
+
+  HashValue h = 0;
+  seq.push_back(h);
+
+  for (const name::Component& comp : name) {
+    h ^= HashFunc::compute(comp.wire(), comp.size());
+    seq.push_back(h);
+  }
+  return seq;
+}
+
+Node::Node(HashValue h, const Name& name)
+  : hash(h)
+  , prev(nullptr)
+  , next(nullptr)
+  , entry(make_shared<Entry>(name, this))
+{
+}
+
+Node::~Node()
+{
+  BOOST_ASSERT(prev == nullptr);
+  BOOST_ASSERT(next == nullptr);
+}
+
+Node*
+getNode(const Entry& entry)
+{
+  return entry.m_node;
+}
+
+HashtableOptions::HashtableOptions(size_t size)
+  : initialSize(size)
+  , minSize(size)
+{
+}
+
+Hashtable::Hashtable(const Options& options)
+  : m_options(options)
+  , m_size(0)
+{
+  BOOST_ASSERT(m_options.minSize > 0);
+  BOOST_ASSERT(m_options.initialSize >= m_options.minSize);
+  BOOST_ASSERT(m_options.expandLoadFactor > 0.0);
+  BOOST_ASSERT(m_options.expandLoadFactor <= 1.0);
+  BOOST_ASSERT(m_options.expandFactor > 1.0);
+  BOOST_ASSERT(m_options.shrinkLoadFactor >= 0.0);
+  BOOST_ASSERT(m_options.shrinkLoadFactor < 1.0);
+  BOOST_ASSERT(m_options.shrinkFactor > 0.0);
+  BOOST_ASSERT(m_options.shrinkFactor < 1.0);
+
+  m_buckets.resize(options.initialSize);
+  this->computeThresholds();
+}
+
+Hashtable::~Hashtable()
+{
+  for (size_t i = 0; i < m_buckets.size(); ++i) {
+    foreachNode(m_buckets[i], [] (Node* node) {
+      node->prev = node->next = nullptr;
+      delete node;
+    });
+  }
+}
+
+void
+Hashtable::attach(size_t bucket, Node* node)
+{
+  node->prev = nullptr;
+  node->next = m_buckets[bucket];
+
+  if (node->next != nullptr) {
+    BOOST_ASSERT(node->next->prev == nullptr);
+    node->next->prev = node;
+  }
+
+  m_buckets[bucket] = node;
+}
+
+void
+Hashtable::detach(size_t bucket, Node* node)
+{
+  if (node->prev != nullptr) {
+    BOOST_ASSERT(node->prev->next == node);
+    node->prev->next = node->next;
+  }
+  else {
+    BOOST_ASSERT(m_buckets[bucket] == node);
+    m_buckets[bucket] = node->next;
+  }
+
+  if (node->next != nullptr) {
+    BOOST_ASSERT(node->next->prev == node);
+    node->next->prev = node->prev;
+  }
+
+  node->prev = node->next = nullptr;
+}
+
+std::pair<const Node*, bool>
+Hashtable::findOrInsert(const Name& name, size_t prefixLen, HashValue h, bool allowInsert)
+{
+  size_t bucket = this->computeBucketIndex(h);
+
+  for (const Node* node = m_buckets[bucket]; node != nullptr; node = node->next) {
+    if (node->hash == h && name.compare(0, prefixLen, node->entry->getName()) == 0) {
+      NFD_LOG_TRACE("found " << name.getPrefix(prefixLen) << " hash=" << h << " bucket=" << bucket);
+      return {node, false};
+    }
+  }
+
+  if (!allowInsert) {
+    NFD_LOG_TRACE("not-found " << name.getPrefix(prefixLen) << " hash=" << h << " bucket=" << bucket);
+    return {nullptr, false};
+  }
+
+  Node* node = new Node(h, name.getPrefix(prefixLen));
+  this->attach(bucket, node);
+  NFD_LOG_TRACE("insert " << node->entry->getName() << " hash=" << h << " bucket=" << bucket);
+  ++m_size;
+
+  if (m_size > m_expandThreshold) {
+    this->resize(static_cast<size_t>(m_options.expandFactor * this->getNBuckets()));
+  }
+
+  return {node, true};
+}
+
+const Node*
+Hashtable::find(const Name& name, size_t prefixLen) const
+{
+  HashValue h = computeHash(name, prefixLen);
+  return const_cast<Hashtable*>(this)->findOrInsert(name, prefixLen, h, false).first;
+}
+
+const Node*
+Hashtable::find(const Name& name, size_t prefixLen, const HashSequence& hashes) const
+{
+  BOOST_ASSERT(hashes.at(prefixLen) == computeHash(name, prefixLen));
+  return const_cast<Hashtable*>(this)->findOrInsert(name, prefixLen, hashes[prefixLen], false).first;
+}
+
+std::pair<const Node*, bool>
+Hashtable::insert(const Name& name, size_t prefixLen, const HashSequence& hashes)
+{
+  BOOST_ASSERT(hashes.at(prefixLen) == computeHash(name, prefixLen));
+  return this->findOrInsert(name, prefixLen, hashes[prefixLen], true);
+}
+
+void
+Hashtable::erase(Node* node)
+{
+  BOOST_ASSERT(node != nullptr);
+  BOOST_ASSERT(node->entry->getParent() == nullptr);
+
+  size_t bucket = this->computeBucketIndex(node->hash);
+  NFD_LOG_TRACE("erase " << node->entry->getName() << " hash=" << node->hash << " bucket=" << bucket);
+
+  this->detach(bucket, node);
+  delete node;
+  --m_size;
+
+  if (m_size < m_shrinkThreshold) {
+    size_t newNBuckets = std::max(m_options.minSize,
+      static_cast<size_t>(m_options.shrinkFactor * this->getNBuckets()));
+    this->resize(newNBuckets);
+  }
+}
+
+void
+Hashtable::computeThresholds()
+{
+  m_expandThreshold = static_cast<size_t>(m_options.expandLoadFactor * this->getNBuckets());
+  m_shrinkThreshold = static_cast<size_t>(m_options.shrinkLoadFactor * this->getNBuckets());
+  NFD_LOG_TRACE("thresholds expand=" << m_expandThreshold << " shrink=" << m_shrinkThreshold);
+}
+
+void
+Hashtable::resize(size_t newNBuckets)
+{
+  if (this->getNBuckets() == newNBuckets) {
+    return;
+  }
+  NFD_LOG_DEBUG("resize from=" << this->getNBuckets() << " to=" << newNBuckets);
+
+  std::vector<Node*> oldBuckets;
+  oldBuckets.swap(m_buckets);
+  m_buckets.resize(newNBuckets);
+
+  for (Node* head : oldBuckets) {
+    foreachNode(head, [this] (Node* node) {
+      size_t bucket = this->computeBucketIndex(node->hash);
+      this->attach(bucket, node);
+    });
+  }
+
+  this->computeThresholds();
+}
+
+} // namespace name_tree
+} // namespace nfd
diff --git a/daemon/table/name-tree-hashtable.hpp b/daemon/table/name-tree-hashtable.hpp
new file mode 100644
index 0000000..a9f0cfa
--- /dev/null
+++ b/daemon/table/name-tree-hashtable.hpp
@@ -0,0 +1,254 @@
+/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
+/**
+ * Copyright (c) 2014-2016,  Regents of the University of California,
+ *                           Arizona Board of Regents,
+ *                           Colorado State University,
+ *                           University Pierre & Marie Curie, Sorbonne University,
+ *                           Washington University in St. Louis,
+ *                           Beijing Institute of Technology,
+ *                           The University of Memphis.
+ *
+ * This file is part of NFD (Named Data Networking Forwarding Daemon).
+ * See AUTHORS.md for complete list of NFD authors and contributors.
+ *
+ * NFD is free software: you can redistribute it and/or modify it under the terms
+ * of the GNU General Public License as published by the Free Software Foundation,
+ * either version 3 of the License, or (at your option) any later version.
+ *
+ * NFD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE.  See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * NFD, e.g., in COPYING.md file.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef NFD_DAEMON_TABLE_NAME_TREE_HASHTABLE_HPP
+#define NFD_DAEMON_TABLE_NAME_TREE_HASHTABLE_HPP
+
+#include "core/common.hpp"
+
+namespace nfd {
+namespace name_tree {
+
+class Entry;
+
+/** \brief a single hash value
+ */
+typedef size_t HashValue;
+
+/** \brief a sequence of hash values
+ *  \sa computeHashes
+ */
+typedef std::vector<HashValue> HashSequence;
+
+/** \brief computes a single hash value
+ *  \param name base name
+ *  \param prefixLen if non-negative, compute hash value for name.getPrefix(prefixLen);
+ *                   if negative, compute hash value for complete name
+ */
+HashValue
+computeHash(const Name& name, ssize_t prefixLen = -1);
+
+/** \brief computes hash values for each prefix of name
+ *  \return a hash sequence, where the i-th hash value equals computeHash(name, i)
+ */
+HashSequence
+computeHashes(const Name& name);
+
+/** \brief a hashtable node
+ *
+ *  Zero or more nodes can be added to a hashtable bucket. They are organized as
+ *  a doubly linked list through prev and next pointers.
+ */
+class Node
+{
+public:
+  /** \post entry != nullptr && entry->getName() == name
+   */
+  Node(HashValue h, const Name& name);
+
+  /** \pre prev == nullptr
+   *  \pre next == nullptr
+   */
+  ~Node();
+
+public:
+  HashValue hash;
+  Node* prev;
+  Node* next;
+  shared_ptr<Entry> entry; /// \todo #3687 make Node sole owner of Entry
+};
+
+/** \return node associated with entry
+ *  \note This function is for NameTree internal use.
+ */
+Node*
+getNode(const Entry& entry);
+
+/** \brief invoke a function for each node in a doubly linked list
+ *  \tparam N either Node or const Node
+ *  \tparam F a functor with signature void F(N*)
+ *  \note It's safe to delete the node in the function.
+ */
+template<typename N, typename F>
+void
+foreachNode(N* head, const F& func)
+{
+  N* node = head;
+  while (node != nullptr) {
+    N* next = node->next;
+    func(node);
+    node = next;
+  }
+}
+
+/** \brief provides options for Hashtable
+ */
+class HashtableOptions
+{
+public:
+  /** \brief constructor
+   *  \post initialSize == size
+   *  \post minSize == size
+   */
+  explicit
+  HashtableOptions(size_t size = 16);
+
+public:
+  /** \brief initial number of buckets
+   */
+  size_t initialSize;
+
+  /** \brief minimal number of buckets
+   */
+  size_t minSize;
+
+  /** \brief if hashtable has more than nBuckets*expandLoadFactor nodes, it will be expanded
+   */
+  float expandLoadFactor = 0.5;
+
+  /** \brief when hashtable is expanded, its new size is nBuckets*expandFactor
+   */
+  float expandFactor = 2.0;
+
+  /** \brief if hashtable has less than nBuckets*shrinkLoadFactor nodes, it will be shrunk
+   */
+  float shrinkLoadFactor = 0.1;
+
+  /** \brief when hashtable is shrunk, its new size is max(nBuckets*shrinkFactor, minSize)
+   */
+  float shrinkFactor = 0.5;
+};
+
+/** \brief a hashtable for fast exact name lookup
+ *
+ *  The Hashtable contains a number of buckets.
+ *  Each node is placed into a bucket determined by a hash value computed from its name.
+ *  Hash collision is resolved through a doubly linked list in each bucket.
+ *  The number of buckets is adjusted according to how many nodes are stored.
+ */
+class Hashtable
+{
+public:
+  typedef HashtableOptions Options;
+
+  explicit
+  Hashtable(const Options& options);
+
+  /** \brief deallocates all nodes
+   */
+  ~Hashtable();
+
+  /** \return number of nodes
+   */
+  size_t
+  size() const
+  {
+    return m_size;
+  }
+
+  /** \return number of buckets
+   */
+  size_t
+  getNBuckets() const
+  {
+    return m_buckets.size();
+  }
+
+  /** \return bucket index for hash value h
+   */
+  size_t
+  computeBucketIndex(HashValue h) const
+  {
+    return h % this->getNBuckets();
+  }
+
+  /** \return i-th bucket
+   *  \pre bucket < getNBuckets()
+   */
+  const Node*
+  getBucket(size_t bucket) const
+  {
+    BOOST_ASSERT(bucket < this->getNBuckets());
+    return m_buckets[bucket]; // don't use m_bucket.at() for better performance
+  }
+
+  /** \brief find node for name.getPrefix(prefixLen)
+   *  \pre name.size() > prefixLen
+   */
+  const Node*
+  find(const Name& name, size_t prefixLen) const;
+
+  /** \brief find node for name.getPrefix(prefixLen)
+   *  \pre name.size() > prefixLen
+   *  \pre hashes == computeHashes(name)
+   */
+  const Node*
+  find(const Name& name, size_t prefixLen, const HashSequence& hashes) const;
+
+  /** \brief find or insert node for name.getPrefix(prefixLen)
+   *  \pre name.size() > prefixLen
+   *  \pre hashes == computeHashes(name)
+   */
+  std::pair<const Node*, bool>
+  insert(const Name& name, size_t prefixLen, const HashSequence& hashes);
+
+  /** \brief delete node
+   *  \pre node exists in this hashtable
+   */
+  void
+  erase(Node* node);
+
+private:
+  /** \brief attach node to bucket
+   */
+  void
+  attach(size_t bucket, Node* node);
+
+  /** \brief detach node from bucket
+   */
+  void
+  detach(size_t bucket, Node* node);
+
+  std::pair<const Node*, bool>
+  findOrInsert(const Name& name, size_t prefixLen, HashValue h, bool allowInsert);
+
+  void
+  computeThresholds();
+
+  void
+  resize(size_t newNBuckets);
+
+private:
+  std::vector<Node*> m_buckets;
+  Options m_options;
+  size_t m_size;
+  size_t m_expandThreshold;
+  size_t m_shrinkThreshold;
+};
+
+} // namespace name_tree
+} // namespace nfd
+
+#endif // NFD_DAEMON_TABLE_NAME_TREE_HASHTABLE_HPP
diff --git a/daemon/table/name-tree-iterator.cpp b/daemon/table/name-tree-iterator.cpp
index 31077be..9832dcd 100644
--- a/daemon/table/name-tree-iterator.cpp
+++ b/daemon/table/name-tree-iterator.cpp
@@ -41,12 +41,15 @@
               "Iterator must be default-constructible");
 
 Iterator::Iterator()
-  : m_state(0)
+  : m_entry(nullptr)
+  , m_ref(nullptr)
+  , m_state(0)
 {
 }
 
-Iterator::Iterator(shared_ptr<EnumerationImpl> impl, shared_ptr<Entry> ref)
+Iterator::Iterator(shared_ptr<EnumerationImpl> impl, const Entry* ref)
   : m_impl(impl)
+  , m_entry(nullptr)
   , m_ref(ref)
   , m_state(0)
 {
@@ -98,8 +101,9 @@
   return os;
 }
 
-EnumerationImpl::EnumerationImpl(const NameTree& nt1)
-  : nt(nt1)
+EnumerationImpl::EnumerationImpl(const NameTree& nt)
+  : nt(nt)
+  , ht(nt.m_ht)
 {
 }
 
@@ -112,37 +116,38 @@
 void
 FullEnumerationImpl::advance(Iterator& i)
 {
-  // find initial entry
+  // find first entry
   if (i.m_entry == nullptr) {
-    for (size_t bucket = 0; bucket < nt.m_nBuckets; ++bucket) {
-      Node* node = nt.m_buckets[bucket];
+    for (size_t bucket = 0; bucket < ht.getNBuckets(); ++bucket) {
+      const Node* node = ht.getBucket(bucket);
       if (node != nullptr) {
-        i.m_entry = node->m_entry;
+        i.m_entry = node->entry.get();
         break;
       }
     }
-    if (i.m_entry == nullptr) { // empty enumeration
+    if (i.m_entry == nullptr) { // empty enumerable
       i = Iterator();
       return;
     }
-    if (m_pred(*i.m_entry)) { // visit initial entry
+    if (m_pred(*i.m_entry)) { // visit first entry
       return;
     }
   }
 
   // process entries in same bucket
-  for (Node* node = i.m_entry->m_node->m_next; node != nullptr; node = node->m_next) {
-    if (m_pred(*node->m_entry)) {
-      i.m_entry = node->m_entry;
+  for (const Node* node = getNode(*i.m_entry)->next; node != nullptr; node = node->next) {
+    if (m_pred(*node->entry)) {
+      i.m_entry = node->entry.get();
       return;
     }
   }
 
   // process other buckets
-  for (size_t bucket = i.m_entry->m_hash % nt.m_nBuckets + 1; bucket < nt.m_nBuckets; ++bucket) {
-    for (Node* node = nt.m_buckets[bucket]; node != nullptr; node = node->m_next) {
-      if (m_pred(*node->m_entry)) {
-        i.m_entry = node->m_entry;
+  size_t currentBucket = ht.computeBucketIndex(getNode(*i.m_entry)->hash);
+  for (size_t bucket = currentBucket + 1; bucket < ht.getNBuckets(); ++bucket) {
+    for (const Node* node = ht.getBucket(bucket); node != nullptr; node = node->next) {
+      if (m_pred(*node->entry)) {
+        i.m_entry = node->entry.get();
         return;
       }
     }
@@ -166,7 +171,7 @@
 
   // initialize: start from root
   if (i.m_entry == nullptr) {
-    if (i.m_ref == nullptr) { // empty enumeration
+    if (i.m_ref == nullptr) { // root does not exist
       i = Iterator();
       return;
     }
@@ -195,8 +200,8 @@
       // first child rejected, let while loop process other children (siblings of new m_entry)
     }
     else { // process siblings of m_entry
-      shared_ptr<Entry> parent = i.m_entry->getParent();
-      const std::vector<shared_ptr<Entry>>& siblings = parent->getChildren();
+      const Entry* parent = i.m_entry->getParent();
+      const std::vector<Entry*>& siblings = parent->getChildren();
       auto sibling = std::find(siblings.begin(), siblings.end(), i.m_entry);
       BOOST_ASSERT(sibling != siblings.end());
       while (++sibling != siblings.end()) {
@@ -232,7 +237,7 @@
 PrefixMatchImpl::advance(Iterator& i)
 {
   if (i.m_entry == nullptr) {
-    if (i.m_ref == nullptr) { // empty enumeration
+    if (i.m_ref == nullptr) { // empty enumerable
       i = Iterator();
       return;
     }
diff --git a/daemon/table/name-tree-iterator.hpp b/daemon/table/name-tree-iterator.hpp
index a764677..fcc57b1 100644
--- a/daemon/table/name-tree-iterator.hpp
+++ b/daemon/table/name-tree-iterator.hpp
@@ -72,7 +72,7 @@
 public:
   Iterator();
 
-  Iterator(shared_ptr<EnumerationImpl> impl, shared_ptr<Entry> ref);
+  Iterator(shared_ptr<EnumerationImpl> impl, const Entry* ref);
 
   virtual
   ~Iterator() = default;
@@ -84,7 +84,7 @@
     return *m_entry;
   }
 
-  shared_ptr<Entry>
+  const Entry*
   operator->() const
   {
     BOOST_ASSERT(m_impl != nullptr);
@@ -113,11 +113,11 @@
 
   /** \brief current entry; nullptr for uninitialized iterator
    */
-  shared_ptr<Entry> m_entry;
+  const Entry* m_entry;
 
   /** \brief reference entry used by enumeration implementation
    */
-  shared_ptr<Entry> m_ref;
+  const Entry* m_ref;
 
   /** \brief state used by enumeration implementation
    */
@@ -145,6 +145,7 @@
 
 protected:
   const NameTree& nt;
+  const Hashtable& ht;
 };
 
 /** \brief full enumeration implementation
diff --git a/daemon/table/name-tree.cpp b/daemon/table/name-tree.cpp
index 09b57f7..bbe5784 100644
--- a/daemon/table/name-tree.cpp
+++ b/daemon/table/name-tree.cpp
@@ -25,7 +25,6 @@
 
 #include "name-tree.hpp"
 #include "core/logger.hpp"
-#include "core/city-hash.hpp"
 
 #include <boost/concept/assert.hpp>
 #include <boost/concept_check.hpp>
@@ -36,190 +35,30 @@
 
 NFD_LOG_INIT("NameTree");
 
-// http://en.cppreference.com/w/cpp/concept/ForwardIterator
-BOOST_CONCEPT_ASSERT((boost::ForwardIterator<NameTree::const_iterator>));
-// boost::ForwardIterator follows SGI standard http://www.sgi.com/tech/stl/ForwardIterator.html,
-// which doesn't require DefaultConstructible
-#ifdef HAVE_IS_DEFAULT_CONSTRUCTIBLE
-static_assert(std::is_default_constructible<NameTree::const_iterator>::value,
-              "NameTree::const_iterator must be default-constructible");
-#else
-BOOST_CONCEPT_ASSERT((boost::DefaultConstructible<NameTree::const_iterator>));
-#endif // HAVE_IS_DEFAULT_CONSTRUCTIBLE
-
-class Hash32
-{
-public:
-  static size_t
-  compute(const char* buffer, size_t length)
-  {
-    return static_cast<size_t>(CityHash32(buffer, length));
-  }
-};
-
-class Hash64
-{
-public:
-  static size_t
-  compute(const char* buffer, size_t length)
-  {
-    return static_cast<size_t>(CityHash64(buffer, length));
-  }
-};
-
-/// @cond NoDocumentation
-typedef boost::mpl::if_c<sizeof(size_t) >= 8, Hash64, Hash32>::type CityHash;
-/// @endcond
-
-// Interface of different hash functions
-size_t
-computeHash(const Name& prefix)
-{
-  prefix.wireEncode();  // guarantees prefix's wire buffer is not empty
-
-  size_t hashValue = 0;
-  size_t hashUpdate = 0;
-
-  for (Name::const_iterator it = prefix.begin(); it != prefix.end(); ++it) {
-    const char* wireFormat = reinterpret_cast<const char*>( it->wire() );
-    hashUpdate = CityHash::compute(wireFormat, it->size());
-    hashValue ^= hashUpdate;
-  }
-
-  return hashValue;
-}
-
-std::vector<size_t>
-computeHashSet(const Name& prefix)
-{
-  prefix.wireEncode(); // guarantees prefix's wire buffer is not empty
-
-  size_t hashValue = 0;
-  size_t hashUpdate = 0;
-
-  std::vector<size_t> hashValueSet;
-  hashValueSet.push_back(hashValue);
-
-  for (Name::const_iterator it = prefix.begin(); it != prefix.end(); ++it) {
-    const char* wireFormat = reinterpret_cast<const char*>( it->wire() );
-    hashUpdate = CityHash::compute(wireFormat, it->size());
-    hashValue ^= hashUpdate;
-    hashValueSet.push_back(hashValue);
-  }
-
-  return hashValueSet;
-}
-
 NameTree::NameTree(size_t nBuckets)
-  : m_nItems(0)
-  , m_nBuckets(nBuckets)
-  , m_minNBuckets(nBuckets)
-  , m_enlargeLoadFactor(0.5) // more than 50% buckets loaded
-  , m_enlargeFactor(2)       // double the hash table size
-  , m_shrinkLoadFactor(0.1)  // less than 10% buckets loaded
-  , m_shrinkFactor(0.5)      // reduce the number of buckets by half
+  : m_ht(HashtableOptions(nBuckets))
 {
-  m_enlargeThreshold = static_cast<size_t>(m_enlargeLoadFactor * static_cast<double>(m_nBuckets));
-  m_shrinkThreshold = static_cast<size_t>(m_shrinkLoadFactor * static_cast<double>(m_nBuckets));
-
-  // array of node pointers
-  m_buckets = new Node*[m_nBuckets];
-  // Initialize the pointer array
-  for (size_t i = 0; i < m_nBuckets; ++i) {
-    m_buckets[i] = nullptr;
-  }
 }
 
-NameTree::~NameTree()
-{
-  for (size_t i = 0; i < m_nBuckets; ++i) {
-    if (m_buckets[i] != nullptr) {
-      delete m_buckets[i];
-    }
-  }
-
-  delete[] m_buckets;
-}
-
-// insert() is a private function, and called by only lookup()
-std::pair<shared_ptr<Entry>, bool>
-NameTree::insert(const Name& prefix)
-{
-  NFD_LOG_TRACE("insert " << prefix);
-
-  size_t hashValue = computeHash(prefix);
-  size_t loc = hashValue % m_nBuckets;
-
-  NFD_LOG_TRACE("Name " << prefix << " hash value = " << hashValue << "  location = " << loc);
-
-  // Check if this Name has been stored
-  Node* node = m_buckets[loc];
-  Node* nodePrev = node;
-
-  for (node = m_buckets[loc]; node != nullptr; node = node->m_next) {
-    if (node->m_entry != nullptr) {
-      if (prefix == node->m_entry->m_prefix) {
-         return {node->m_entry, false}; // false: old entry
-      }
-    }
-    nodePrev = node;
-  }
-
-  NFD_LOG_TRACE("Did not find " << prefix << ", need to insert it to the table");
-
-  // If no bucket is empty occupied, we need to create a new node, and it is
-  // linked from nodePrev
-  node = new Node();
-  node->m_prev = nodePrev;
-
-  if (nodePrev == nullptr) {
-    m_buckets[loc] = node;
-  }
-  else{
-    nodePrev->m_next = node;
-  }
-
-  // Create a new Entry
-  auto entry = make_shared<Entry>(prefix);
-  entry->setHash(hashValue);
-  node->m_entry = entry; // link the Entry to its Node
-  entry->m_node = node; // link the node to Entry. Used in eraseEntryIfEmpty.
-
-  return {entry, true}; // true: new entry
-}
-
-// Name Prefix Lookup. Create Name Tree Entry if not found
 shared_ptr<Entry>
-NameTree::lookup(const Name& prefix)
+NameTree::lookup(const Name& name)
 {
-  NFD_LOG_TRACE("lookup " << prefix);
+  NFD_LOG_TRACE("lookup " << name);
 
-  shared_ptr<Entry> entry;
-  shared_ptr<Entry> parent;
+  HashSequence hashes = computeHashes(name);
+  const Node* node = nullptr;
+  Entry* parent = nullptr;
 
-  for (size_t i = 0; i <= prefix.size(); ++i) {
-    Name temp = prefix.getPrefix(i);
-
-    // insert() will create the entry if it does not exist.
+  for (size_t prefixLen = 0; prefixLen <= name.size(); ++prefixLen) {
     bool isNew = false;
-    std::tie(entry, isNew) = insert(temp);
+    std::tie(node, isNew) = m_ht.insert(name, prefixLen, hashes);
 
-    if (isNew) {
-      ++m_nItems; // Increase the counter
-      entry->m_parent = parent;
-
-      if (parent != nullptr) {
-        parent->m_children.push_back(entry);
-      }
+    if (isNew && parent != nullptr) {
+      node->entry->setParent(*parent);
     }
-
-    if (m_nItems > m_enlargeThreshold) {
-      resize(m_enlargeFactor * m_nBuckets);
-    }
-
-    parent = entry;
+    parent = node->entry.get();
   }
-  return entry;
+  return node->entry;
 }
 
 shared_ptr<Entry>
@@ -263,132 +102,46 @@
   return nte;
 }
 
-// return {false: this entry is not empty, true: this entry is empty and erased}
-bool
-NameTree::eraseEntryIfEmpty(shared_ptr<Entry> entry)
+size_t
+NameTree::eraseEntryIfEmpty(Entry* entry)
 {
   BOOST_ASSERT(entry != nullptr);
 
-  NFD_LOG_TRACE("eraseEntryIfEmpty " << entry->getPrefix());
-
-  // first check if this Entry can be erased
-  if (entry->isEmpty()) {
-    // update child-related info in the parent
-    shared_ptr<Entry> parent = entry->getParent();
+  size_t nErased = 0;
+  for (Entry* parent = nullptr; entry != nullptr && entry->isEmpty(); entry = parent) {
+    parent = entry->getParent();
 
     if (parent != nullptr) {
-      std::vector<shared_ptr<Entry>>& parentChildrenList = parent->getChildren();
-
-      bool isFound = false;
-      size_t size = parentChildrenList.size();
-      for (size_t i = 0; i < size; ++i) {
-        if (parentChildrenList[i] == entry) {
-          parentChildrenList[i] = parentChildrenList[size - 1];
-          parentChildrenList.pop_back();
-          isFound = true;
-          break;
-        }
-      }
-
-      BOOST_VERIFY(isFound == true);
+      entry->unsetParent();
     }
 
-    // remove this Entry and its Name Tree Node
-    Node* node = entry->m_node;
-    Node* nodePrev = node->m_prev;
-
-    // configure the previous node
-    if (nodePrev != nullptr) {
-      // link the previous node to the next node
-      nodePrev->m_next = node->m_next;
-    }
-    else {
-      m_buckets[entry->getHash() % m_nBuckets] = node->m_next;
-    }
-
-    // link the previous node with the next node (skip the erased one)
-    if (node->m_next != nullptr) {
-      node->m_next->m_prev = nodePrev;
-      node->m_next = 0;
-    }
-
-    BOOST_ASSERT(node->m_next == nullptr);
-
-    --m_nItems;
-    delete node;
-
-    if (parent != nullptr) {
-      eraseEntryIfEmpty(parent);
-    }
-
-    size_t newNBuckets = static_cast<size_t>(m_shrinkFactor * static_cast<double>(m_nBuckets));
-
-    if (newNBuckets >= m_minNBuckets && m_nItems < m_shrinkThreshold) {
-      resize(newNBuckets);
-    }
-
-    return true;
-
+    m_ht.erase(getNode(*entry));
+    ++nErased;
   }
 
-  return false;
+  if (nErased == 0) {
+    NFD_LOG_TRACE("not-erase " << entry->getName());
+  }
+  return nErased;
 }
 
-// Exact Match
 shared_ptr<Entry>
-NameTree::findExactMatch(const Name& prefix) const
+NameTree::findExactMatch(const Name& name) const
 {
-  NFD_LOG_TRACE("findExactMatch " << prefix);
-
-  size_t hashValue = computeHash(prefix);
-  size_t loc = hashValue % m_nBuckets;
-
-  NFD_LOG_TRACE("Name " << prefix << " hash value = " << hashValue << "  location = " << loc);
-
-  shared_ptr<Entry> entry;
-  Node* node = nullptr;
-
-  for (node = m_buckets[loc]; node != nullptr; node = node->m_next) {
-    entry = node->m_entry;
-    if (entry != nullptr) {
-      if (hashValue == entry->getHash() && prefix == entry->getPrefix()) {
-        return entry;
-      }
-    }
-  }
-
-  // if not found, the default value of entry (null pointer) will be returned
-  entry.reset();
-  return entry;
+  const Node* node = m_ht.find(name, name.size());
+  return node == nullptr ? nullptr : node->entry;
 }
 
 // Longest Prefix Match
 shared_ptr<Entry>
-NameTree::findLongestPrefixMatch(const Name& prefix, const EntrySelector& entrySelector) const
+NameTree::findLongestPrefixMatch(const Name& name, const EntrySelector& entrySelector) const
 {
-  NFD_LOG_TRACE("findLongestPrefixMatch " << prefix);
+  HashSequence hashes = computeHashes(name);
 
-  shared_ptr<Entry> entry;
-  std::vector<size_t> hashValueSet = computeHashSet(prefix);
-
-  size_t hashValue = 0;
-  size_t loc = 0;
-
-  for (int i = static_cast<int>(prefix.size()); i >= 0; --i) {
-    hashValue = hashValueSet[i];
-    loc = hashValue % m_nBuckets;
-
-    Node* node = nullptr;
-    for (node = m_buckets[loc]; node != nullptr; node = node->m_next) {
-      entry = node->m_entry;
-      if (entry != nullptr) {
-        // isPrefixOf() is used to avoid making a copy of the name
-        if (hashValue == entry->getHash() &&
-            entry->getPrefix().isPrefixOf(prefix) &&
-            entrySelector(*entry)) {
-          return entry;
-        }
-      }
+  for (ssize_t prefixLen = name.size(); prefixLen >= 0; --prefixLen) {
+    const Node* node = m_ht.find(name, prefixLen, hashes);
+    if (node != nullptr && entrySelector(*node->entry)) {
+      return node->entry;
     }
   }
 
@@ -396,12 +149,13 @@
 }
 
 shared_ptr<Entry>
-NameTree::findLongestPrefixMatch(shared_ptr<Entry> entry,
+NameTree::findLongestPrefixMatch(shared_ptr<Entry> entry1,
                                  const EntrySelector& entrySelector) const
 {
+  Entry* entry = entry1.get();
   while (entry != nullptr) {
     if (entrySelector(*entry)) {
-      return entry;
+      return entry->shared_from_this();
     }
     entry = entry->getParent();
   }
@@ -436,7 +190,7 @@
   // trie from the root node.
 
   shared_ptr<Entry> entry = findLongestPrefixMatch(prefix, entrySelector);
-  return {Iterator(make_shared<PrefixMatchImpl>(*this, entrySelector), entry), end()};
+  return {Iterator(make_shared<PrefixMatchImpl>(*this, entrySelector), entry.get()), end()};
 }
 
 boost::iterator_range<NameTree::const_iterator>
@@ -453,98 +207,7 @@
 {
   // the first step is to process the root node
   shared_ptr<Entry> entry = findExactMatch(prefix);
-  return {Iterator(make_shared<PartialEnumerationImpl>(*this, entrySubTreeSelector), entry), end()};
-}
-
-// Hash Table Resize
-void
-NameTree::resize(size_t newNBuckets)
-{
-  NFD_LOG_TRACE("resize");
-
-  Node** newBuckets = new Node*[newNBuckets];
-  size_t count = 0;
-
-  // referenced ccnx hashtb.c hashtb_rehash()
-  Node** pp = nullptr;
-  Node* p = nullptr;
-  Node* pre = nullptr;
-  Node* q = nullptr; // record p->m_next
-
-  for (size_t i = 0; i < newNBuckets; ++i) {
-    newBuckets[i] = nullptr;
-  }
-
-  for (size_t i = 0; i < m_nBuckets; ++i) {
-    for (p = m_buckets[i]; p != nullptr; p = q) {
-      ++count;
-      q = p->m_next;
-      BOOST_ASSERT(p->m_entry != nullptr);
-      uint32_t h = p->m_entry->m_hash;
-      uint32_t b = h % newNBuckets;
-      pre = nullptr;
-      for (pp = &newBuckets[b]; *pp != nullptr; pp = &((*pp)->m_next)) {
-        pre = *pp;
-      }
-      p->m_prev = pre;
-      p->m_next = *pp; // Actually *pp always == nullptr in this case
-      *pp = p;
-    }
-  }
-
-  BOOST_ASSERT(count == m_nItems);
-
-  Node** oldBuckets = m_buckets;
-  m_buckets = newBuckets;
-  delete[] oldBuckets;
-
-  m_nBuckets = newNBuckets;
-  m_enlargeThreshold = static_cast<size_t>(m_enlargeLoadFactor * static_cast<double>(m_nBuckets));
-  m_shrinkThreshold = static_cast<size_t>(m_shrinkLoadFactor * static_cast<double>(m_nBuckets));
-}
-
-// For debugging
-void
-NameTree::dump(std::ostream& output) const
-{
-  NFD_LOG_TRACE("dump()");
-
-  Node* node = nullptr;
-  shared_ptr<Entry> entry;
-
-  for (size_t i = 0; i < m_nBuckets; ++i) {
-    for (node = m_buckets[i]; node != nullptr; node = node->m_next) {
-      entry = node->m_entry;
-
-      // if the Entry exist, dump its information
-      if (entry != nullptr) {
-        output << "Bucket" << i << '\t' << entry->m_prefix.toUri() << '\n';
-        output << "\t\tHash " << entry->m_hash << '\n';
-
-        if (entry->m_parent != nullptr) {
-          output << "\t\tparent->" << entry->m_parent->m_prefix.toUri();
-        }
-        else {
-          output << "\t\tROOT";
-        }
-        output << '\n';
-
-        if (!entry->m_children.empty()) {
-          output << "\t\tchildren = " << entry->m_children.size() << '\n';
-
-          for (size_t j = 0; j < entry->m_children.size(); ++j) {
-            output << "\t\t\tChild " << j << " " << entry->m_children[j]->getPrefix() << '\n';
-          }
-        }
-
-      }
-
-    }
-  }
-
-  output << "Bucket count = " << m_nBuckets << '\n';
-  output << "Stored item = " << m_nItems << '\n';
-  output << "--------------------------\n";
+  return {Iterator(make_shared<PartialEnumerationImpl>(*this, entrySubTreeSelector), entry.get()), end()};
 }
 
 } // namespace name_tree
diff --git a/daemon/table/name-tree.hpp b/daemon/table/name-tree.hpp
index 91ec3fa..ad9f314 100644
--- a/daemon/table/name-tree.hpp
+++ b/daemon/table/name-tree.hpp
@@ -31,19 +31,6 @@
 namespace nfd {
 namespace name_tree {
 
-/** \brief Compute the hash value of the given name prefix's WIRE FORMAT
- */
-size_t
-computeHash(const Name& prefix);
-
-/** \brief Incrementally compute hash values
- *  \return Return a vector of hash values, starting from the root prefix
- */
-std::vector<size_t>
-computeHashSet(const Name& prefix);
-
-/** \brief shared name-based index for FIB, PIT, Measurements, and StrategyChoice
- */
 class NameTree : noncopyable
 {
 public:
@@ -52,13 +39,14 @@
   explicit
   NameTree(size_t nBuckets = 1024);
 
-  ~NameTree();
-
 public: // information
   /** \brief Get the number of occupied entries in the Name Tree
    */
   size_t
-  size() const;
+  size() const
+  {
+    return m_ht.size();
+  }
 
   /** \brief Get the number of buckets in the Name Tree (NPHT)
    *
@@ -66,18 +54,19 @@
    *  table, i.e., m_nBuckets.
    */
   size_t
-  getNBuckets() const;
+  getNBuckets() const
+  {
+    return m_ht.getNBuckets();
+  }
 
   /** \return Name Tree Entry on which a table entry is attached
    */
   template<typename ENTRY>
   shared_ptr<Entry>
-  getEntry(const ENTRY& tableEntry) const;
-
-  /** \brief Dump all the information stored in the Name Tree for debugging.
-   */
-  void
-  dump(std::ostream& output) const;
+  getEntry(const ENTRY& tableEntry) const
+  {
+    return tableEntry.m_nameTreeEntry.lock();
+  }
 
 public: // mutation
   /** \brief Look for the Name Tree Entry that contains this name prefix.
@@ -86,12 +75,12 @@
    *  number of name components by one each time. All non-existing Name Tree
    *  Entries will be created.
    *
-   *  \param prefix The querying name prefix.
+   *  \param name The querying name prefix.
    *  \return The pointer to the Name Tree Entry that contains this full name prefix.
    *  \note Existing iterators are unaffected.
    */
   shared_ptr<Entry>
-  lookup(const Name& prefix);
+  lookup(const Name& name);
 
   /** \brief get NameTree entry from FIB entry
    *
@@ -128,15 +117,23 @@
    *        it has no children, then repeats the same process on its ancestors.
    *  \note Existing iterators, except those pointing to deleted entries, are unaffected.
    */
+  size_t
+  eraseEntryIfEmpty(Entry* entry);
+
+  /// \deprecated
   bool
-  eraseEntryIfEmpty(shared_ptr<Entry> entry);
+  eraseEntryIfEmpty(shared_ptr<Entry> entry)
+  {
+    BOOST_ASSERT(entry != nullptr);
+    return this->eraseEntryIfEmpty(entry.get()) > 0;
+  }
 
 public: // matching
   /** \brief Exact match lookup for the given name prefix.
    *  \return nullptr if this prefix is not found; otherwise return the Name Tree Entry address
    */
   shared_ptr<Entry>
-  findExactMatch(const Name& prefix) const;
+  findExactMatch(const Name& name) const;
 
   /** \brief Longest prefix matching for the given name
    *
@@ -144,7 +141,7 @@
    *  by one each time, until an Entry is found.
    */
   shared_ptr<Entry>
-  findLongestPrefixMatch(const Name& prefix,
+  findLongestPrefixMatch(const Name& name,
                          const EntrySelector& entrySelector = AnyEntry()) const;
 
   shared_ptr<Entry>
@@ -217,86 +214,27 @@
    *  \note The returned iterator may get invalidated when NameTree is modified
    */
   const_iterator
-  begin() const;
+  begin() const
+  {
+    return fullEnumerate().begin();
+  }
 
   /** \brief Get an iterator referring to the past-the-end FIB entry
    *  \note Iteration order is implementation-specific and is undefined
    *  \note The returned iterator may get invalidated when NameTree is modified
    */
   const_iterator
-  end() const;
+  end() const
+  {
+    return Iterator();
+  }
 
 private:
-  /** \brief Create a Name Tree Entry if it does not exist, or return the existing
-   *         Name Tree Entry address.
-   *
-   *  Called by lookup() only.
-   *
-   *  \return The first item is the Name Tree Entry address, the second item is
-   *          a bool value indicates whether this is an old entry (false) or a new
-   *          entry (true).
-   */
-  std::pair<shared_ptr<Entry>, bool>
-  insert(const Name& prefix);
+  Hashtable m_ht;
 
-  /** \brief Resize the hash table size when its load factor reaches a threshold.
-   *
-   *  As we are currently using a hand-written hash table implementation
-   *  for the Name Tree, the hash table resize() function should be kept in the
-   *  name-tree.hpp file.
-   *  \param newNBuckets The number of buckets for the new hash table.
-   */
-  void
-  resize(size_t newNBuckets);
-
-private:
-  size_t m_nItems;  ///< Number of items being stored
-  size_t m_nBuckets; ///< Number of hash buckets
-  size_t m_minNBuckets; ///< Minimum number of hash buckets
-  double m_enlargeLoadFactor;
-  size_t m_enlargeThreshold;
-  int m_enlargeFactor;
-  double m_shrinkLoadFactor;
-  size_t m_shrinkThreshold;
-  double m_shrinkFactor;
-  Node** m_buckets; ///< Name Tree Buckets in the NPHT
-
-  friend class FullEnumerationImpl;
-  friend class PartialEnumerationImpl;
-  friend class PrefixMatchImpl;
+  friend class EnumerationImpl;
 };
 
-inline size_t
-NameTree::size() const
-{
-  return m_nItems;
-}
-
-inline size_t
-NameTree::getNBuckets() const
-{
-  return m_nBuckets;
-}
-
-template<typename ENTRY>
-inline shared_ptr<Entry>
-NameTree::getEntry(const ENTRY& tableEntry) const
-{
-  return tableEntry.m_nameTreeEntry.lock();
-}
-
-inline NameTree::const_iterator
-NameTree::begin() const
-{
-  return fullEnumerate().begin();
-}
-
-inline NameTree::const_iterator
-NameTree::end() const
-{
-  return Iterator();
-}
-
 } // namespace name_tree
 
 using name_tree::NameTree;