blob: 1267f999edc3f8a6eee0706922ad18ab2c55211c [file] [log] [blame]
Junxiao Shib660b4c2016-08-06 20:47:44 +00001/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
2/**
3 * Copyright (c) 2014-2016, Regents of the University of California,
4 * Arizona Board of Regents,
5 * Colorado State University,
6 * University Pierre & Marie Curie, Sorbonne University,
7 * Washington University in St. Louis,
8 * Beijing Institute of Technology,
9 * The University of Memphis.
10 *
11 * This file is part of NFD (Named Data Networking Forwarding Daemon).
12 * See AUTHORS.md for complete list of NFD authors and contributors.
13 *
14 * NFD is free software: you can redistribute it and/or modify it under the terms
15 * of the GNU General Public License as published by the Free Software Foundation,
16 * either version 3 of the License, or (at your option) any later version.
17 *
18 * NFD is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
19 * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
20 * PURPOSE. See the GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * NFD, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
24 */
25
26#include "name-tree-hashtable.hpp"
27#include "name-tree-entry.hpp"
28#include "core/logger.hpp"
29#include "core/city-hash.hpp"
30
31namespace nfd {
32namespace name_tree {
33
34NFD_LOG_INIT("NameTreeHashtable");
35
36class Hash32
37{
38public:
39 static HashValue
40 compute(const void* buffer, size_t length)
41 {
42 return static_cast<HashValue>(CityHash32(reinterpret_cast<const char*>(buffer), length));
43 }
44};
45
46class Hash64
47{
48public:
49 static HashValue
50 compute(const void* buffer, size_t length)
51 {
52 return static_cast<HashValue>(CityHash64(reinterpret_cast<const char*>(buffer), length));
53 }
54};
55
56/** \brief a type with compute static method to compute hash value from a raw buffer
57 */
58typedef std::conditional<(sizeof(HashValue) > 4), Hash64, Hash32>::type HashFunc;
59
60HashValue
61computeHash(const Name& name, ssize_t prefixLen)
62{
63 name.wireEncode(); // ensure wire buffer exists
64
65 HashValue h = 0;
66 for (size_t i = 0, last = prefixLen < 0 ? name.size() : prefixLen; i < last; ++i) {
67 const name::Component& comp = name[i];
68 h ^= HashFunc::compute(comp.wire(), comp.size());
69 }
70 return h;
71}
72
73HashSequence
74computeHashes(const Name& name)
75{
76 name.wireEncode(); // ensure wire buffer exists
77
78 HashSequence seq;
79 seq.reserve(name.size() + 1);
80
81 HashValue h = 0;
82 seq.push_back(h);
83
84 for (const name::Component& comp : name) {
85 h ^= HashFunc::compute(comp.wire(), comp.size());
86 seq.push_back(h);
87 }
88 return seq;
89}
90
91Node::Node(HashValue h, const Name& name)
92 : hash(h)
93 , prev(nullptr)
94 , next(nullptr)
95 , entry(make_shared<Entry>(name, this))
96{
97}
98
99Node::~Node()
100{
101 BOOST_ASSERT(prev == nullptr);
102 BOOST_ASSERT(next == nullptr);
103}
104
105Node*
106getNode(const Entry& entry)
107{
108 return entry.m_node;
109}
110
111HashtableOptions::HashtableOptions(size_t size)
112 : initialSize(size)
113 , minSize(size)
114{
115}
116
117Hashtable::Hashtable(const Options& options)
118 : m_options(options)
119 , m_size(0)
120{
121 BOOST_ASSERT(m_options.minSize > 0);
122 BOOST_ASSERT(m_options.initialSize >= m_options.minSize);
123 BOOST_ASSERT(m_options.expandLoadFactor > 0.0);
124 BOOST_ASSERT(m_options.expandLoadFactor <= 1.0);
125 BOOST_ASSERT(m_options.expandFactor > 1.0);
126 BOOST_ASSERT(m_options.shrinkLoadFactor >= 0.0);
127 BOOST_ASSERT(m_options.shrinkLoadFactor < 1.0);
128 BOOST_ASSERT(m_options.shrinkFactor > 0.0);
129 BOOST_ASSERT(m_options.shrinkFactor < 1.0);
130
131 m_buckets.resize(options.initialSize);
132 this->computeThresholds();
133}
134
135Hashtable::~Hashtable()
136{
137 for (size_t i = 0; i < m_buckets.size(); ++i) {
138 foreachNode(m_buckets[i], [] (Node* node) {
139 node->prev = node->next = nullptr;
140 delete node;
141 });
142 }
143}
144
145void
146Hashtable::attach(size_t bucket, Node* node)
147{
148 node->prev = nullptr;
149 node->next = m_buckets[bucket];
150
151 if (node->next != nullptr) {
152 BOOST_ASSERT(node->next->prev == nullptr);
153 node->next->prev = node;
154 }
155
156 m_buckets[bucket] = node;
157}
158
159void
160Hashtable::detach(size_t bucket, Node* node)
161{
162 if (node->prev != nullptr) {
163 BOOST_ASSERT(node->prev->next == node);
164 node->prev->next = node->next;
165 }
166 else {
167 BOOST_ASSERT(m_buckets[bucket] == node);
168 m_buckets[bucket] = node->next;
169 }
170
171 if (node->next != nullptr) {
172 BOOST_ASSERT(node->next->prev == node);
173 node->next->prev = node->prev;
174 }
175
176 node->prev = node->next = nullptr;
177}
178
179std::pair<const Node*, bool>
180Hashtable::findOrInsert(const Name& name, size_t prefixLen, HashValue h, bool allowInsert)
181{
182 size_t bucket = this->computeBucketIndex(h);
183
184 for (const Node* node = m_buckets[bucket]; node != nullptr; node = node->next) {
185 if (node->hash == h && name.compare(0, prefixLen, node->entry->getName()) == 0) {
186 NFD_LOG_TRACE("found " << name.getPrefix(prefixLen) << " hash=" << h << " bucket=" << bucket);
187 return {node, false};
188 }
189 }
190
191 if (!allowInsert) {
192 NFD_LOG_TRACE("not-found " << name.getPrefix(prefixLen) << " hash=" << h << " bucket=" << bucket);
193 return {nullptr, false};
194 }
195
196 Node* node = new Node(h, name.getPrefix(prefixLen));
197 this->attach(bucket, node);
198 NFD_LOG_TRACE("insert " << node->entry->getName() << " hash=" << h << " bucket=" << bucket);
199 ++m_size;
200
201 if (m_size > m_expandThreshold) {
202 this->resize(static_cast<size_t>(m_options.expandFactor * this->getNBuckets()));
203 }
204
205 return {node, true};
206}
207
208const Node*
209Hashtable::find(const Name& name, size_t prefixLen) const
210{
211 HashValue h = computeHash(name, prefixLen);
212 return const_cast<Hashtable*>(this)->findOrInsert(name, prefixLen, h, false).first;
213}
214
215const Node*
216Hashtable::find(const Name& name, size_t prefixLen, const HashSequence& hashes) const
217{
218 BOOST_ASSERT(hashes.at(prefixLen) == computeHash(name, prefixLen));
219 return const_cast<Hashtable*>(this)->findOrInsert(name, prefixLen, hashes[prefixLen], false).first;
220}
221
222std::pair<const Node*, bool>
223Hashtable::insert(const Name& name, size_t prefixLen, const HashSequence& hashes)
224{
225 BOOST_ASSERT(hashes.at(prefixLen) == computeHash(name, prefixLen));
226 return this->findOrInsert(name, prefixLen, hashes[prefixLen], true);
227}
228
229void
230Hashtable::erase(Node* node)
231{
232 BOOST_ASSERT(node != nullptr);
233 BOOST_ASSERT(node->entry->getParent() == nullptr);
234
235 size_t bucket = this->computeBucketIndex(node->hash);
236 NFD_LOG_TRACE("erase " << node->entry->getName() << " hash=" << node->hash << " bucket=" << bucket);
237
238 this->detach(bucket, node);
239 delete node;
240 --m_size;
241
242 if (m_size < m_shrinkThreshold) {
243 size_t newNBuckets = std::max(m_options.minSize,
244 static_cast<size_t>(m_options.shrinkFactor * this->getNBuckets()));
245 this->resize(newNBuckets);
246 }
247}
248
249void
250Hashtable::computeThresholds()
251{
252 m_expandThreshold = static_cast<size_t>(m_options.expandLoadFactor * this->getNBuckets());
253 m_shrinkThreshold = static_cast<size_t>(m_options.shrinkLoadFactor * this->getNBuckets());
254 NFD_LOG_TRACE("thresholds expand=" << m_expandThreshold << " shrink=" << m_shrinkThreshold);
255}
256
257void
258Hashtable::resize(size_t newNBuckets)
259{
260 if (this->getNBuckets() == newNBuckets) {
261 return;
262 }
263 NFD_LOG_DEBUG("resize from=" << this->getNBuckets() << " to=" << newNBuckets);
264
265 std::vector<Node*> oldBuckets;
266 oldBuckets.swap(m_buckets);
267 m_buckets.resize(newNBuckets);
268
269 for (Node* head : oldBuckets) {
270 foreachNode(head, [this] (Node* node) {
271 size_t bucket = this->computeBucketIndex(node->hash);
272 this->attach(bucket, node);
273 });
274 }
275
276 this->computeThresholds();
277}
278
279} // namespace name_tree
280} // namespace nfd