catalog: add logging and fix bugs
* use user-friendly messages
* fix help output
refs: #3189
Change-Id: I33dcb1a9c806d01c6665e0ef941d5b4f1d64774f
diff --git a/.gitignore b/.gitignore
index 7679b3c..1efe831 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,7 +1,14 @@
-.*
-!.git*
-**/*.pyc
+# Emacs temp files
+*~
+
+# Mac OSX
+.DS_*
+
+# waf build system
+.waf-1*
+.waf3-*
+.lock*
build/
-waf-*
-
+# Compiled python code
+*.pyc
diff --git a/.jenkins.d/20-build.sh b/.jenkins.d/20-build.sh
index 76f827e..1b0b972 100755
--- a/.jenkins.d/20-build.sh
+++ b/.jenkins.d/20-build.sh
@@ -2,6 +2,8 @@
set -x
set -e
+COVERAGE=$( python -c "print '--with-coverage' if 'code-coverage' in '$JOB_NAME' else ''" )
+
# Cleanup
sudo ./waf -j1 --color=yes distclean
@@ -20,7 +22,7 @@
sudo ./waf -j1 --color=yes distclean
# Configure/build in optimized mode
-./waf -j1 --color=yes configure --with-tests
+./waf -j1 --color=yes configure --with-tests $COVERAGE
./waf -j1 --color=yes build
# (tests will be run against optimized version)
diff --git a/.jenkins.d/40-coverage.sh b/.jenkins.d/40-coverage.sh
new file mode 100755
index 0000000..1c80a9c
--- /dev/null
+++ b/.jenkins.d/40-coverage.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+set -x
+set -e
+
+IS_COVR=$( python -c "print 'yes' if 'code-coverage' in '$JOB_NAME' else 'no'" )
+
+if [[ $IS_COVR == "yes" ]]; then
+ BASE="`pwd | sed -e 's|/|\\\/|g'`\\"
+ (cd build && gcovr -x -f $BASE/catalog/src -r ../ -o coverage.xml ./)
+fi
diff --git a/.travis.yml b/.travis.yml
index 36518df..0969506 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,8 +9,8 @@
email:
on_success: always
on_failure: always
-env:
- global:
- - NODE_LABELS="Linux Ubuntu Ubuntu-12.04 Ubuntu-12.04-64bit Boost1.48"
script:
+ - if [[ $TRAVIS_OS_NAME == linux ]]; then export NODE_LABELS="Linux Ubuntu Ubuntu-12.04"; fi
+ - if [[ $TRAVIS_OS_NAME == osx ]]; then export NODE_LABELS="OSX OSX-10.10"; fi
+ - echo $NODE_LABELS
- ./.jenkins
diff --git a/.waf-tools/coverage.py b/.waf-tools/coverage.py
new file mode 100644
index 0000000..0a3db65
--- /dev/null
+++ b/.waf-tools/coverage.py
@@ -0,0 +1,24 @@
+# -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
+#
+# Copyright (c) 2014, Regents of the University of California
+#
+# GPL 3.0 license, see the COPYING.md file for more information
+
+from waflib import TaskGen
+
+def options(opt):
+ opt.add_option('--with-coverage', action='store_true', default=False, dest='with_coverage',
+ help='''Set compiler flags for gcc to enable code coverage information''')
+
+def configure(conf):
+ if conf.options.with_coverage:
+ conf.check_cxx(cxxflags=['-fprofile-arcs', '-ftest-coverage', '-fPIC'],
+ linkflags=['-fprofile-arcs'], uselib_store='GCOV', mandatory=True)
+
+@TaskGen.feature('cxx','cc')
+@TaskGen.after('process_source')
+def add_coverage(self):
+ if getattr(self, 'use', ''):
+ self.use += ' GCOV'
+ else:
+ self.use = 'GCOV'
diff --git a/catalog/src/main.cpp b/catalog/src/main.cpp
index 4c85d8c..7655315 100644
--- a/catalog/src/main.cpp
+++ b/catalog/src/main.cpp
@@ -25,12 +25,15 @@
#include <getopt.h>
#include <ndn-cxx/face.hpp>
+#ifdef HAVE_LOG4CXX
+ INIT_LOGGER("atmos-catalog::Main");
+#endif
void
usage()
{
std::cout << "\n Usage:\n atmos-catalog "
- "[-h] [-f config file] "
+ "[-h] [-f config file] \n"
" [-f config file] - set the configuration file\n"
" [-h] - print help and exit\n"
"\n";
@@ -42,6 +45,10 @@
int option;
std::string configFile(DEFAULT_CONFIG_FILE);
+#ifdef HAVE_LOG4CXX
+ log4cxx::PropertyConfigurator::configure(LOG4CXX_CONFIG_FILE);
+#endif
+
while ((option = getopt(argc, argv, "f:h")) != -1) {
switch (option) {
case 'f':
@@ -73,8 +80,23 @@
catalogInstance.addAdapter(queryAdapter);
catalogInstance.addAdapter(publishAdapter);
- catalogInstance.initialize();
- face->processEvents();
+ try {
+ catalogInstance.initialize();
+ }
+ catch (std::exception& e) {
+ std::cout << e.what() << std::endl;
+ }
+
+#ifndef NDEBUG
+ try {
+#endif
+ face->processEvents();
+#ifndef NDEBUG
+ }
+ catch (std::exception& e) {
+ _LOG_DEBUG(e.what());
+ }
+#endif
return 0;
}
diff --git a/catalog/src/publish/publish-adapter.hpp b/catalog/src/publish/publish-adapter.hpp
index c331bf7..5dd9206 100644
--- a/catalog/src/publish/publish-adapter.hpp
+++ b/catalog/src/publish/publish-adapter.hpp
@@ -42,8 +42,13 @@
#include <unordered_map>
#include <mutex>
+#include "util/logger.hpp"
+
namespace atmos {
namespace publish {
+#ifdef HAVE_LOG4CXX
+ INIT_LOGGER("PublishAdapter");
+#endif
#define RETRY_WHEN_TIMEOUT 2
// TODO: need to use the configured nameFields
@@ -409,8 +414,7 @@
m_signingId = ndn::Name(signingId);
setCatalogId();
- m_syncPrefix.clear();
- m_syncPrefix.append(syncPrefix);
+ m_syncPrefix = syncPrefix;
util::ConnectionDetails mysqlId(dbServer, dbUser, dbPasswd, dbName);
initializeDatabase(mysqlId);
@@ -448,7 +452,7 @@
MySQLPerformQuery(m_databaseHandler, createSyncTable, util::CREATE,
success, errMsg);
if (!success)
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
// create SQL string for table creation, id, sha256, and name are columns that we need
std::stringstream ss;
@@ -465,7 +469,7 @@
success = false;
MySQLPerformQuery(m_databaseHandler, ss.str(), util::CREATE, success, errMsg);
if (!success)
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
}
else {
throw Error("cannot connect to the Database");
@@ -477,17 +481,20 @@
PublishAdapter<DatabaseHandler>::onPublishInterest(const ndn::InterestFilter& filter,
const ndn::Interest& interest)
{
+ _LOG_DEBUG(">> PublishAdapter::onPublishInterest");
+
// Example Interest : /cmip5/publish/<uri>/<nonce>
- std::cout << "Publish interest : " << interest.getName().toUri() << std::endl;
+ _LOG_DEBUG(interest.getName().toUri());
//send back ACK
- char buf[4] = "ACK";
+ char buf[] = "ACK";
std::shared_ptr<ndn::Data> data = std::make_shared<ndn::Data>(interest.getName());
data->setFreshnessPeriod(ndn::time::milliseconds(10)); // 10 msec
data->setContent(reinterpret_cast<const uint8_t*>(buf), strlen(buf));
m_keyChain->sign(*data);
m_face->put(*data);
- std::cout << "Sent ACK for " << interest.getName() << std::endl;
+
+ _LOG_DEBUG("ACK interest : " << interest.getName().toUri());
//TODO: if already in catalog, what do we do?
@@ -501,15 +508,17 @@
m_face->expressInterest(*retrieveInterest,
bind(&PublishAdapter<DatabaseHandler>::onPublishedData,
this,_1, _2),
- bind(&PublishAdapter<DatabaseHandler>::onTimeout, this, _1));
- std::cout << "Expressing Interest for: " << retrieveInterest->toUri() << std::endl;
+ bind(&publish::PublishAdapter<DatabaseHandler>::onTimeout, this, _1));
+
+ _LOG_DEBUG("Expressing Interest " << retrieveInterest->toUri());
+ _LOG_DEBUG("<< PublishAdapter::onPublishInterest");
}
template <typename DatabaseHandler>
void
PublishAdapter<DatabaseHandler>::onTimeout(const ndn::Interest& interest)
{
- std::cout << "interest " << interest.getName() << " timed out";
+ _LOG_DEBUG(interest.getName() << "timed out");
}
template <typename DatabaseHandler>
@@ -517,7 +526,7 @@
PublishAdapter<DatabaseHandler>::onValidationFailed(const std::shared_ptr<const ndn::Data>& data,
const std::string& failureInfo)
{
- std::cout << "Validation failed: " << data->getName() << failureInfo << std::endl;
+ _LOG_DEBUG(data->getName() << " validation failed: " << failureInfo);
}
template <typename DatabaseHandler>
@@ -525,7 +534,8 @@
PublishAdapter<DatabaseHandler>::onPublishedData(const ndn::Interest& interest,
const ndn::Data& data)
{
- std::cout << "received Data " << data.getName() << std::endl;
+ _LOG_DEBUG(">> PublishAdapter::onPublishedData");
+ _LOG_DEBUG("Data name: " << data.getName());
if (data.getContent().empty()) {
return;
}
@@ -538,13 +548,15 @@
void
PublishAdapter<DatabaseHandler>::validatePublishedDataPaylod(const ndn::shared_ptr<const ndn::Data>& data)
{
+ _LOG_DEBUG(">> PublishAdapter::onValidatePublishedDataPayload");
+
// validate published data payload, if failed, return
if (!validatePublicationChanges(data)) {
- std::cout << "data validation failed : " << data->getName() << std::endl;
+ _LOG_DEBUG("Data validation failed : " << data->getName());
#ifndef NDEBUG
const std::string payload(reinterpret_cast<const char*>(data->getContent().value()),
data->getContent().value_size());
- std::cout << payload << std::endl;
+ _LOG_DEBUG(payload);
#endif
return;
}
@@ -565,8 +577,9 @@
ndn::Name nextInterestName = data->getName().getPrefix(-1);
uint64_t incomingSegment = data->getName()[-1].toSegment();
incomingSegment++;
- std::cout << " Next Interest Name " << nextInterestName << " Segment " << incomingSegment
- << std::endl;
+
+ _LOG_DEBUG("Next Interest Name " << nextInterestName << " Segment " << incomingSegment);
+
std::shared_ptr<ndn::Interest> nextInterest =
std::make_shared<ndn::Interest>(nextInterestName.appendSegment(incomingSegment));
nextInterest->setInterestLifetime(ndn::time::milliseconds(4000));
@@ -583,6 +596,8 @@
void
PublishAdapter<DatabaseHandler>::processUpdateData(const ndn::shared_ptr<const ndn::Data>& data)
{
+ _LOG_DEBUG(">> PublishAdapter::processUpdateData");
+
const std::string payload(reinterpret_cast<const char*>(data->getContent().value()),
data->getContent().value_size());
@@ -596,16 +611,12 @@
Json::Reader jsonReader;
if (!jsonReader.parse(payload, parsedFromPayload)) {
// todo: logging events
- std::cout << "fail to parse the update data" << std::endl;
+ _LOG_DEBUG("Fail to parse the update data");
return;
}
- else {
- std::cout << "received Json format payload : "
- << parsedFromPayload.toStyledString() << std::endl;
- }
+
std::stringstream ss;
if (json2Sql(ss, parsedFromPayload, util::ADD)) {
- std::cout << "sql string to insert data : " << ss.str() << std::endl;
// todo: before use, check if the connection is not NULL
// we may need to use lock here to ensure thread safe
operateDatabase(ss.str(), util::ADD);
@@ -614,7 +625,6 @@
ss.str("");
ss.clear();
if (json2Sql(ss, parsedFromPayload, util::REMOVE)) {
- std::cout << "sql string to remove data: " << ss.str() << std::endl;
operateDatabase(ss.str(), util::REMOVE);
}
}
@@ -631,17 +641,17 @@
chronosync::SeqNo
PublishAdapter<MYSQL>::getLatestSeqNo(const chronosync::MissingDataInfo& update)
{
+ _LOG_DEBUG(">> PublishAdapter::getLatestSeqNo");
+
std::string sql = "SELECT seq_num FROM chronosync_update_info WHERE session_name = '"
+ update.session.toUri() + "';";
-#ifndef NDEBUG
- std::cout << "get latest seqNo : " << sql << std::endl;
-#endif
+
std::string errMsg;
bool success;
std::shared_ptr<MYSQL_RES> results
= atmos::util::MySQLPerformQuery(m_databaseHandler, sql, util::QUERY, success, errMsg);
if (!success) {
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
return 0; //database connection error?
}
else if (results != nullptr){
@@ -672,14 +682,14 @@
std::string sql = "UPDATE chronosync_update_info SET seq_num = "
+ boost::lexical_cast<std::string>(update.high)
+ " WHERE session_name = '" + update.session.toUri() + "';";
- std::cout << "renew update Info : " << sql << std::endl;
+
std::string errMsg;
bool success = false;
m_mutex.lock();
util::MySQLPerformQuery(m_databaseHandler, sql, util::UPDATE, success, errMsg);
m_mutex.unlock();
if (!success)
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
}
template <typename DatabaseHandler>
@@ -697,14 +707,13 @@
+ update.session.toUri() + "', " + boost::lexical_cast<std::string>(update.high)
+ ");";
- std::cout << "add update Info : " << sql << std::endl;
std::string errMsg;
bool success = false;
m_mutex.lock();
util::MySQLPerformQuery(m_databaseHandler, sql, util::ADD, success, errMsg);
m_mutex.unlock();
if (!success)
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
}
template <typename DatabaseHandler>
@@ -712,6 +721,7 @@
PublishAdapter<DatabaseHandler>::onFetchUpdateDataTimeout(const ndn::Interest& interest)
{
// todo: record event, and use recovery Interest to fetch the whole table
+ _LOG_DEBUG("UpdateData retrieval timed out: " << interest.getName());
}
template <typename DatabaseHandler>
@@ -719,6 +729,8 @@
PublishAdapter<DatabaseHandler>::processSyncUpdate(const std::vector<chronosync::MissingDataInfo>&
updates)
{
+ _LOG_DEBUG(">> PublishAdapter::processSyncUpdate");
+
if (updates.empty()) {
return;
}
@@ -740,7 +752,9 @@
bind(&PublishAdapter<DatabaseHandler>::onFetchUpdateDataTimeout,
this, _1),
RETRY_WHEN_TIMEOUT);
- std::cout << "send Interest for [" << updates[i].session << ":" << seq << "]" << std::endl;
+
+ _LOG_DEBUG("Interest for [" << updates[i].session << ":" << seq << "]");
+
update = true;
}
}
@@ -772,7 +786,7 @@
atmos::util::MySQLPerformQuery(m_databaseHandler, sql, op, success, errMsg);
m_mutex.unlock();
if (!success)
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
}
template<typename DatabaseHandler>
@@ -782,7 +796,6 @@
util::DatabaseOperation op)
{
if (jsonValue.type() != Json::objectValue) {
- std::cout << jsonValue.toStyledString() << "is not json object" << std::endl;
return false;
}
if (op == util::ADD) {
@@ -804,7 +817,7 @@
// cast might be overflowed
Json::Value item = jsonValue["add"][static_cast<int>(i)];
if (!item.isConvertibleTo(Json::stringValue)) {
- std::cout << "malformed JsonQuery string : " << item.toStyledString() << std::endl;
+ _LOG_DEBUG("malformed JsonQuery string");
return false;
}
std::string fileName(item.asString());
@@ -827,14 +840,14 @@
if (updateNumber <= 0)
return false;
- sqlString << "delete from " << m_databaseTable << " where name in (";
+ sqlString << "DELETE FROM " << m_databaseTable << " WHERE name IN (";
for (size_t i = 0; i < updateNumber; ++i) {
if (i > 0)
sqlString << ",";
// cast might be overflowed
Json::Value item = jsonValue["remove"][static_cast<int>(i)];
if (!item.isConvertibleTo(Json::stringValue)) {
- std::cout << "malformed JsonQuery string : " << item.toStyledString() << std::endl;
+ _LOG_DEBUG("Malformed JsonQuery");
return false;
}
std::string fileName(item.asString());
@@ -891,6 +904,8 @@
PublishAdapter<DatabaseHandler>::validatePublicationChanges(const
std::shared_ptr<const ndn::Data>& data)
{
+ _LOG_DEBUG(">> PublishAdapter::validatePublicationChanges");
+
// The data name must be "/<publisher-prefix>/<nonce>"
// the prefix is the data name removes the last component
ndn::Name publisherPrefix = data->getName().getPrefix(-1);
@@ -901,7 +916,7 @@
Json::Reader reader;
if (!reader.parse(payload, parsedFromString)) {
// parse error, log events
- std::cout << "Cannot parse the published data " << data->getName() << " into Json" << std::endl;
+ _LOG_DEBUG("Fail to parse the published Data" << data->getName());
return false;
}
diff --git a/catalog/src/query/query-adapter.hpp b/catalog/src/query/query-adapter.hpp
index 0a5ae0b..435ad60 100644
--- a/catalog/src/query/query-adapter.hpp
+++ b/catalog/src/query/query-adapter.hpp
@@ -51,8 +51,15 @@
#include <array>
#include <utility>
+#include "util/logger.hpp"
+
+
namespace atmos {
namespace query {
+#ifdef HAVE_LOG4CXX
+ INIT_LOGGER("QueryAdapter");
+#endif
+
// todo: calculate payload limit by get the size of a signed empty Data packet
static const size_t PAYLOAD_LIMIT = 7000;
@@ -455,17 +462,15 @@
QueryAdapter<DatabaseHandler>::onQueryInterest(const ndn::InterestFilter& filter,
const ndn::Interest& interest)
{
- // strictly enforce query initialization namespace.
- // Name should be our local prefix + "query" + parameters
+ _LOG_DEBUG(">> QueryAdapter::onQueryInterest");
+
if (interest.getName().size() != filter.getPrefix().size() + 1) {
// @todo: return a nack
return;
}
std::shared_ptr<const ndn::Interest> interestPtr = interest.shared_from_this();
-#ifndef NDEBUG
- std::cout << "incoming query interest : " << interestPtr->getName() << std::endl;
-#endif
+ _LOG_DEBUG("Interest : " << interestPtr->getName());
// @todo: use thread pool
std::thread queryThread(&QueryAdapter<DatabaseHandler>::runJsonQuery,
@@ -484,14 +489,14 @@
// now. In the future, this should check some form of
// InMemoryStorage.
-#ifndef NDEBUG
- std::cout << "incoming query-results interest : " << interest.toUri() << std::endl;
-#endif
+ _LOG_DEBUG(">> QueryAdapter::onQueryResultsInterest");
auto data = m_cache.find(interest.getName());
if (data) {
m_face->put(*data);
}
+
+ _LOG_DEBUG("<< QueryAdapter::onQueryResultsInterest");
}
template <typename DatabaseHandler>
@@ -499,11 +504,11 @@
QueryAdapter<DatabaseHandler>::onFiltersInitializationInterest(const ndn::InterestFilter& filter,
const ndn::Interest& interest)
{
+ _LOG_DEBUG(">> QueryAdapter::onFiltersInitializationInterest");
std::shared_ptr<const ndn::Interest> interestPtr = interest.shared_from_this();
-#ifndef NDEBUG
- std::cout << "incoming initialization interest : " << interestPtr->getName() << std::endl;
-#endif
+ _LOG_DEBUG("Interest : " << interestPtr->getName());
+
// TODO: save the content in memory, first check the memory, if not exists, start thread to generate it
// Note that if ChronoSync state changes, we need to clear the saved value, and regenerate it
@@ -517,12 +522,15 @@
interestPtr);
queryThread.join();
}
+
+ _LOG_DEBUG("<< QueryAdapter::onFiltersInitializationInterest");
}
template <typename DatabaseHandler>
void
QueryAdapter<DatabaseHandler>::populateFiltersMenu(std::shared_ptr<const ndn::Interest> interest)
{
+ _LOG_DEBUG(">> QueryAdapter::populateFiltersMenu");
Json::Value filters;
Json::FastWriter fastWriter;
getFiltersMenu(filters);
@@ -545,16 +553,16 @@
filterData->setContent(reinterpret_cast<const uint8_t*>(payload + startIndex), payloadLength);
signData(*filterData);
-#ifndef NDEBUG
- std::cout << "populate filter Data : " << segmentName << std::endl;
-#endif
+
+ _LOG_DEBUG("Populate Filter Data :" << segmentName);
+
m_mutex.lock();
m_cache.insert(*filterData);
try {
m_face->put(*filterData);
}// catch exceptions and log
catch (std::exception& e) {
- std::cout << e.what() << std::endl;
+ _LOG_DEBUG(e.what());
}
m_mutex.unlock();
@@ -575,6 +583,7 @@
m_face->put(*filterData);
m_mutex.unlock();
}
+ _LOG_DEBUG("<< QueryAdapter::populateFiltersMenu");
}
template <typename DatabaseHandler>
@@ -589,6 +598,7 @@
void
QueryAdapter<MYSQL>::getFiltersMenu(Json::Value& value)
{
+ _LOG_DEBUG(">> QueryAdapter::getFiltersMenu");
Json::Value tmp;
for (size_t i = 0; i < m_filterCategoryNames.size(); i++) {
@@ -602,7 +612,7 @@
= atmos::util::MySQLPerformQuery(m_databaseHandler, getFilterSql,
util::QUERY, success, errMsg);
if (!success) {
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
value.clear();
return;
}
@@ -615,9 +625,7 @@
tmp.clear();
}
-#ifndef NDEBUG
- std::cout << value.toStyledString() << std::endl;
-#endif
+ _LOG_DEBUG("<< QueryAdapter::getFiltersMenu");
}
template <typename DatabaseHandler>
@@ -663,7 +671,7 @@
signData(*ack);
#ifndef NDEBUG
- std::cout << "qurey-results data name in ACK : " << queryResultNameStr << std::endl;
+ _LOG_DEBUG(queryResultNameStr);
#endif
return ack;
}
@@ -681,7 +689,7 @@
signData(*nack);
#ifndef NDEBUG
- std::cout << "make NACK : " << ndn::Name(dataPrefix).appendSegment(segmentNo) << std::endl;
+ _LOG_DEBUG(ndn::Name(dataPrefix).appendSegment(segmentNo));
#endif
m_mutex.lock();
m_cache.insert(*nack);
@@ -694,11 +702,11 @@
QueryAdapter<DatabaseHandler>::json2Sql(std::stringstream& sqlQuery,
Json::Value& jsonValue)
{
+ _LOG_DEBUG(">> QueryAdapter::json2Sql");
#ifndef NDEBUG
- std::cout << "jsonValue in json2Sql: " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG(jsonValue.toStyledString());
#endif
if (jsonValue.type() != Json::objectValue) {
- std::cout << jsonValue.toStyledString() << "is not json object" << std::endl;
return false;
}
@@ -710,13 +718,13 @@
Json::Value value = (*iter);
if (key == Json::nullValue || value == Json::nullValue) {
- std::cout << "null key or value in JsonValue: " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG("null key or value in JsonValue");
return false;
}
// cannot convert to string
if (!key.isConvertibleTo(Json::stringValue) || !value.isConvertibleTo(Json::stringValue)) {
- std::cout << "malformed JsonQuery string : " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG("malformed JsonQuery string");
return false;
}
@@ -747,11 +755,11 @@
Json::Value& jsonValue,
bool& lastComponent)
{
+ _LOG_DEBUG(">> QueryAdapter::json2AutocompletionSql");
#ifndef NDEBUG
- std::cout << "jsonValue in json2AutocompletionSql: " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG(jsonValue.toStyledString());
#endif
if (jsonValue.type() != Json::objectValue) {
- std::cout << jsonValue.toStyledString() << "is not json object" << std::endl;
return false;
}
@@ -763,13 +771,13 @@
Json::Value value = (*iter);
if (key == Json::nullValue || value == Json::nullValue) {
- std::cout << "null key or value in JsonValue: " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG("null key or value in JsonValue");
return false;
}
// cannot convert to string
if (!key.isConvertibleTo(Json::stringValue) || !value.isConvertibleTo(Json::stringValue)) {
- std::cout << "malformed JsonQuery string : " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG("malformed JsonQuery string");
return false;
}
@@ -829,11 +837,11 @@
QueryAdapter<DatabaseHandler>::json2PrefixBasedSearchSql(std::stringstream& sqlQuery,
Json::Value& jsonValue)
{
+ _LOG_DEBUG(">> QueryAdapter::json2CompleteSearchSql");
#ifndef NDEBUG
- std::cout << "jsonValue in json2PrefixBasedSearchSql: " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG(jsonValue.toStyledString());
#endif
if (jsonValue.type() != Json::objectValue) {
- std::cout << jsonValue.toStyledString() << "is not json object" << std::endl;
return false;
}
@@ -845,13 +853,13 @@
Json::Value value = (*iter);
if (key == Json::nullValue || value == Json::nullValue) {
- std::cout << "null key or value in JsonValue: " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG("null key or value in JsonValue");
return false;
}
// cannot convert to string
if (!key.isConvertibleTo(Json::stringValue) || !value.isConvertibleTo(Json::stringValue)) {
- std::cout << "malformed JsonQuery string : " << jsonValue.toStyledString() << std::endl;
+ _LOG_DEBUG("malformed JsonQuery string");
return false;
}
@@ -913,6 +921,8 @@
void
QueryAdapter<DatabaseHandler>::runJsonQuery(std::shared_ptr<const ndn::Interest> interest)
{
+ _LOG_DEBUG(">> QueryAdapter::runJsonQuery");
+
// 1) Strip the prefix off the ndn::Interest's ndn::Name
// +1 to grab JSON component after "query" component
@@ -947,7 +957,7 @@
Json::Reader reader;
if (!reader.parse(jsonQuery, parsedFromString)) {
// @todo: send NACK?
- std::cout << "cannot parse the JsonQuery" << std::endl;
+ _LOG_DEBUG("cannot parse the JsonQuery");
return;
}
@@ -1027,8 +1037,9 @@
bool autocomplete,
bool lastComponent)
{
+ _LOG_DEBUG(">> QueryAdapter::prepareSegments");
#ifndef NDEBUG
- std::cout << "prepareSegments() executes sql : " << sqlString << std::endl;
+ _LOG_DEBUG(sqlString);
#endif
std::string errMsg;
bool success;
@@ -1036,10 +1047,10 @@
std::shared_ptr<MYSQL_RES> results
= atmos::util::MySQLPerformQuery(m_databaseHandler, sqlString, util::QUERY, success, errMsg);
if (!success)
- std::cout << errMsg << std::endl;
+ _LOG_DEBUG(errMsg);
if (!results) {
- std::cout << "null MYSQL_RES for query : " << sqlString << std::endl;
+ _LOG_DEBUG("NULL MYSQL_RES for" << sqlString);
// @todo: throw runtime error or log the error message?
return;
@@ -1048,11 +1059,7 @@
uint64_t resultCount = mysql_num_rows(results.get());
#ifndef NDEBUG
- std::cout << "Query results for \""
- << sqlString
- << "\" contain "
- << resultCount
- << " rows" << std::endl;
+ _LOG_DEBUG("Query resuls contain " << resultCount << "rows");
#endif
MYSQL_ROW row;
@@ -1113,9 +1120,9 @@
entry["lastComponent"] = Json::Value(true);
#ifndef NDEBUG
- std::cout << "resultCount " << resultCount
- << "; viewStart " << viewStart
- << "; viewEnd " << viewEnd << std::endl;
+ _LOG_DEBUG("resultCount " << resultCount << ";"
+ << "viewStart " << viewStart << ";"
+ << "viewEnd " << viewEnd);
#endif
if (isAutocomplete) {
@@ -1137,7 +1144,7 @@
data->setFinalBlockId(ndn::Name::Component::fromSegment(segmentNo));
}
#ifndef NDEBUG
- std::cout << "makeReplyData : " << segmentName << std::endl;
+ _LOG_DEBUG(segmentName);
#endif
signData(*data);
return data;
diff --git a/catalog/src/util/logger.hpp b/catalog/src/util/logger.hpp
new file mode 100644
index 0000000..3e2d8dd
--- /dev/null
+++ b/catalog/src/util/logger.hpp
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; c-file-style: "gnu"; indent-tabs-mode:nil -*- */
+/*
+ * Copyright (c) 2012-2015 University of California, Los Angeles,
+ * 2015 Colorado State University
+ *
+ * This file is part of ChronoSync, synchronization library for distributed realtime
+ * applications for NDN.
+ *
+ * ChronoSync is free software: you can redistribute it and/or modify it under the terms
+ * of the GNU General Public License as published by the Free Software Foundation, either
+ * version 3 of the License, or (at your option) any later version.
+ *
+ * ChronoSync is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
+ * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * ChronoSync, e.g., in COPYING.md file. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * @author Zhenkai Zhu <http://irl.cs.ucla.edu/~zhenkai/>
+ * @author Chaoyi Bian <bcy@pku.edu.cn>
+ * @author Alexander Afanasyev <http://lasr.cs.ucla.edu/afanasyev/index.html>
+ * @author Chengyu Fan <chengyu@cs.colostate.edu>
+ */
+
+#ifndef ATMOS_CATALOG_LOGGER_HPP
+#define ATMOS_CATALOG_LOGGER_HPP
+
+#ifdef HAVE_LOG4CXX
+
+#include <log4cxx/logger.h>
+#include <log4cxx/propertyconfigurator.h>
+
+#define INIT_LOGGER(name) \
+ static log4cxx::LoggerPtr staticModuleLogger = log4cxx::Logger::getLogger(name)
+
+#define _LOG_DEBUG(x) \
+ LOG4CXX_DEBUG(staticModuleLogger, x)
+
+#define _LOG_TRACE(x) \
+ LOG4CXX_TRACE(staticModuleLogger, x)
+
+#define _LOG_FUNCTION(x) \
+ LOG4CXX_TRACE(staticModuleLogger, __FUNCTION__ << "(" << x << ")")
+
+#define _LOG_FUNCTION_NOARGS \
+ LOG4CXX_TRACE(staticModuleLogger, __FUNCTION__ << "()")
+
+#define _LOG_ERROR(x) \
+ LOG4CXX_ERROR(staticModuleLogger, x)
+
+#else // HAVE_LOG4CXX
+
+#define INIT_LOGGER(name)
+#define _LOG_FUNCTION(x)
+#define _LOG_FUNCTION_NOARGS
+#define _LOG_TRACE(x)
+#define INIT_LOGGERS(x)
+#define _LOG_ERROR(x)
+
+#ifdef _DEBUG
+
+#include <thread>
+#include <iostream>
+#include <ndn-cxx/util/time.hpp>
+
+// to print out messages, must be in debug mode
+#define _LOG_DEBUG(x) \
+ std::clog << ndn::time::system_clock::now() << " " << std::this_thread::get_id() << \
+ " " << x << std::endl
+
+#else // _DEBUG
+
+#define _LOG_DEBUG(x)
+
+#endif // _DEBUG
+
+#endif // HAVE_LOG4CXX
+
+#endif // ATMOS_CATALOG_LOGGER_HPP
diff --git a/catalog/tests/unit-tests/publish/test-publish-adapter.cpp b/catalog/tests/unit-tests/publish/test-publish-adapter.cpp
index bc47970..8ff0486 100644
--- a/catalog/tests/unit-tests/publish/test-publish-adapter.cpp
+++ b/catalog/tests/unit-tests/publish/test-publish-adapter.cpp
@@ -280,7 +280,7 @@
ss.str("");
ss.clear();
- std::string expectRes2 = "delete from cmip5 where name in ('ndn:/1/2/3/4/5/6/7/8/9/10',\
+ std::string expectRes2 = "DELETE FROM cmip5 WHERE name IN ('ndn:/1/2/3/4/5/6/7/8/9/10',\
'/a/b/c/d','/test/for/remove');";
BOOST_CHECK_EQUAL(publishAdapterTest1.testJson2Sql(ss, testJson, util::REMOVE), true);
BOOST_CHECK_EQUAL(ss.str(), expectRes2);
diff --git a/log4cxx.properties b/log4cxx.properties
new file mode 100644
index 0000000..3144df3
--- /dev/null
+++ b/log4cxx.properties
@@ -0,0 +1,22 @@
+# Set root logger level to DEBUG and its only appender to R.
+log4j.rootLogger=ERROR, R
+
+log4j.appender.R=org.apache.log4j.RollingFileAppender
+log4j.appender.R.File=example.log
+log4j.appender.R.MaxFileSize=100KB
+
+log4j.appender.R.layout=org.apache.log4j.PatternLayout
+log4j.appender.R.layout.ConversionPattern=%d{hh:mm:ss,SSS} %-14t %-14c %m%n
+
+# R is set to be a ConsoleAppender.
+#log4j.appender.R=org.apache.log4j.ConsoleAppender
+
+# R uses PatternLayout.
+#log4j.appender.R.layout=org.apache.log4j.PatternLayout
+#log4j.appender.R.target=System.err
+#log4j.appender.R.layout.ConversionPattern=%d{dd-MMM HH:MM:SS,SSS} %p %c %m%n
+#log4j.appender.R.layout.ConversionPattern=%d{hh:mm:ss,SSS} %-14t %-14c %m%n
+#log4j.appender.R.layout.ConversionPattern=%d{ss,SSS} %-12c %m%n
+
+log4j.logger.QueryAdapter = DEBUG
+log4j.logger.PublishAdapter = DEBUG
diff --git a/wscript b/wscript
index 28c51be..6b3c33a 100644
--- a/wscript
+++ b/wscript
@@ -33,11 +33,14 @@
def options(opt):
opt.load(['compiler_cxx', 'gnu_dirs'])
- opt.load(['default-compiler-flags', 'boost'],
+ opt.load(['default-compiler-flags', 'boost', 'coverage'],
tooldir=['.waf-tools'])
opt = opt.add_option_group('ndn-atmos Options')
+ opt.add_option('--with-log4cxx', action='store_true', default=False, dest='log4cxx',
+ help='''Compile with log4cxx''')
+
opt.add_option('--with-tests', action='store_true', default=False,
dest='with_tests', help='''build unit tests''')
@@ -65,6 +68,11 @@
conf.check_cfg(path='mysql_config', args=['--cflags', '--libs'], package='',
uselib_store='MYSQL', mandatory=True)
+
+ if conf.options.log4cxx:
+ conf.check_cfg(package='liblog4cxx', args=['--cflags', '--libs'], uselib_store='LOG4CXX',
+ mandatory=True)
+
boost_libs = 'system random thread filesystem'
if conf.options.with_tests:
@@ -79,7 +87,10 @@
" (http://redmine.named-data.net/projects/nfd/wiki/Boost_FAQ)")
return
+ conf.load('coverage')
+
conf.define('DEFAULT_CONFIG_FILE', '%s/ndn-atmos/catalog.conf' % conf.env['SYSCONFDIR'])
+ conf.define('LOG4CXX_CONFIG_FILE', '%s/ndn-atmos/log4cxx.properties' % conf.env['SYSCONFDIR'])
conf.write_config_header('config.hpp')
@@ -90,7 +101,7 @@
features='cxx',
source=bld.path.ant_glob(['catalog/src/**/*.cpp'],
excl=['catalog/src/main.cpp']),
- use='NDN_CXX BOOST JSON MYSQL SYNC',
+ use='NDN_CXX BOOST JSON MYSQL SYNC LOG4CXX',
includes='catalog/src .',
export_includes='catalog/src .'
)
@@ -114,3 +125,10 @@
target='catalog.conf.sample',
install_path="${SYSCONFDIR}/ndn-atmos"
)
+
+ bld(
+ features="subst",
+ source='log4cxx.properties',
+ target='log4cxx.properties',
+ install_path="${SYSCONFDIR}/ndn-atmos"
+ )