提交 9cd9d6bb 编写于 作者: 栩xx's avatar 栩xx

代码性能优化(json解析库更换为rapidjson),完成部分性能测试

上级 a2269759
无法预览此类型文件
...@@ -46,69 +46,86 @@ void OlympicProgram::resolveInput() ...@@ -46,69 +46,86 @@ void OlympicProgram::resolveInput()
//作用:处理指令为total时的输出 //作用:处理指令为total时的输出
void OlympicProgram::outputTotal() void OlympicProgram::outputTotal()
{ {
ifstream myfile("./data/total.json"); Document d; //文档树
ofstream outfile(this->output, ios::app); ofstream outfile(this->output, ios::app);
Json::Reader reader; if (d.Parse(ReadFile("./data/total.json").c_str()).HasParseError())
Json::Value root; throw string("解析错误!\n");
if (!myfile.is_open()) const rapidjson::Value& data = d["data"]; //data成员
const rapidjson::Value& medalsList = data["medalsList"]; //medalsList成员
string os;
for (unsigned int i = 0; i < medalsList.Size(); i++)
{ {
cout << "未成功打开文件" << endl; //必要的临时储存变量指针
return; rapidjson::Value::ConstMemberIterator rank = medalsList[i].FindMember("rank");
} rapidjson::Value::ConstMemberIterator countryid = medalsList[i].FindMember("countryid");
if (reader.parse(myfile, root)) rapidjson::Value::ConstMemberIterator gold = medalsList[i].FindMember("gold");
{ rapidjson::Value::ConstMemberIterator silver = medalsList[i].FindMember("silver");
int num = root["data"]["total"].asInt(); rapidjson::Value::ConstMemberIterator bronze = medalsList[i].FindMember("bronze");
Json::Value node = root["data"]["medalsList"]; rapidjson::Value::ConstMemberIterator count = medalsList[i].FindMember("count");
for (int i = 0; i < num; i++)
{ os =(string) "rank" + rank->value.GetString()+ ':' + countryid->value.GetString() + '\n'
outfile << "rank" << (i + 1) << ':' << node[i]["countryid"].asString() << '\n'; + "gold:" + gold->value.GetString() + '\n'
outfile << "gold:" << node[i]["gold"].asString() << '\n'; + "silver:" + silver->value.GetString() + '\n'
outfile << "silver:" << node[i]["silver"].asString() << '\n'; + "bronze:" + bronze->value.GetString() + '\n'
outfile << "bronze:" << node[i]["bronze"].asString() << '\n'; + "total:" + count->value.GetString() + '\n'
outfile << "total:" << node[i]["count"].asString() << '\n'; + "-----" + '\n';
outfile << "--------------------------------------------------" << '\n'; outfile << os;
}
} }
myfile.close();
outfile.close(); outfile.close();
} }
string ReadFile(string filename)
{
ifstream fin(filename.c_str(), ios::in);
string str, t;
if (!fin)
throw string("文件打开失败!\n");
while (getline(fin, t))
str += t;
fin.close();
return str;
}
//入口:比赛日期 //入口:比赛日期
//返回值:无 //返回值:无
//作用:处理指令为具体比赛日期时的输出 //作用:处理指令为具体比赛日期时的输出
void OlympicProgram::outputSchedule(string date) void OlympicProgram::outputSchedule(string date)
{ {
Document d; //文档树
ofstream outfile(this->output, ios::app); ofstream outfile(this->output, ios::app);
ifstream myfile("./data/"+date+".json"); if (d.Parse(ReadFile("./data/" + date + ".json").c_str()).HasParseError())
Json::Reader reader; throw string("解析错误!\n");
Json::Value root; const rapidjson::Value& data = d["data"]; //data成员
const rapidjson::Value& matchList = data["matchList"]; //matchList成员
if (!myfile.is_open()) string os;
for (unsigned int i = 0; i < matchList.Size(); i++)
{ {
cout << "未成功打开文件" << endl; //必要的临时储存变量指针
return; rapidjson::Value::ConstMemberIterator startdatecn = matchList[i].FindMember("startdatecn");
} rapidjson::Value::ConstMemberIterator itemcodename = matchList[i].FindMember("itemcodename");
if (reader.parse(myfile, root)) rapidjson::Value::ConstMemberIterator title = matchList[i].FindMember("title");
{ rapidjson::Value::ConstMemberIterator venuename = matchList[i].FindMember("venuename");
int num = root["data"]["total"].asInt(); rapidjson::Value::ConstMemberIterator t1 = matchList[i].FindMember("homename");
Json::Value node = root["data"]["matchList"]; rapidjson::Value::ConstMemberIterator t2 = matchList[i].FindMember("awayname"); //必要的临时储存变量指针
for (int i = 0; i < num; i++)
{ string homename = t1->value.GetString();
string time = node[i]["startdatecn"].asString().substr(11, 5); string awayname = t2->value.GetString();
string homename = node[i]["homename"].asString();
string awayname = node[i]["awayname"].asString(); string time = startdatecn->value.GetString();
outfile << "time:" << time << '\n'; os = (string)"time:" + time.substr(11,5 )+ '\n'
outfile << "sport:" << node[i]["itemcodename"].asString() << '\n'; + "sport:" + itemcodename->value.GetString() + '\n';
if (homename.empty() & awayname.empty()) if (homename.empty() & awayname.empty())
outfile << "name:" << node[i]["title"].asString() << '\n'; os +=(string) "name:" + title->value.GetString() + '\n';
else else
outfile << "name:" << node[i]["title"].asString() << " " << homename << "vs" << awayname << '\n'; os += (string)"name:" + title->value.GetString() + " " + homename + "vs" + awayname + '\n';
outfile << "venue:" << node[i]["venuename"].asString() << '\n'; os += (string)"venue:" + venuename->value.GetString() + '\n' + "-----" + '\n';
outfile << "--------------------------------------------------" << '\n'; outfile << os;
}
} }
myfile.close();
outfile.close(); outfile.close();
} }
//入口:无 //入口:无
...@@ -118,7 +135,7 @@ void OlympicProgram::outputWrong(string wrong) ...@@ -118,7 +135,7 @@ void OlympicProgram::outputWrong(string wrong)
{ {
ofstream outfile(this->output, ios::app); ofstream outfile(this->output, ios::app);
outfile << wrong << '\n'; outfile << wrong << '\n';
outfile << "--------------------------------------------------" << '\n'; outfile << "-----" << '\n';
outfile.close(); outfile.close();
} }
......
...@@ -2,8 +2,13 @@ ...@@ -2,8 +2,13 @@
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
#include <string> #include <string>
#include "include/json/json.h" //#include "include/json/json.h"
#include "rapidjson-master/include/rapidjson/document.h"
#include "rapidjson-master/include/rapidjson/writer.h"
#include "rapidjson-master/include/rapidjson/stringbuffer.h"
using namespace std; using namespace std;
using namespace rapidjson;
//主要程序类 //主要程序类
class OlympicProgram { class OlympicProgram {
private: private:
...@@ -18,4 +23,5 @@ public: ...@@ -18,4 +23,5 @@ public:
}; };
string solveOrder(string order); string solveOrder(string order);
bool isTrueDate(string date); bool isTrueDate(string date);
string ReadFile(string filename);
...@@ -9,6 +9,18 @@ int main(int argc, char* argv[]) ...@@ -9,6 +9,18 @@ int main(int argc, char* argv[])
//OlympicProgram test(argv[1], argv[2]); //OlympicProgram test(argv[1], argv[2]);
OlympicProgram test("input.txt", "output.txt"); OlympicProgram test("input.txt", "output.txt");
test.resolveInput(); test.resolveInput();
/*ofstream outfile("input.txt");
for (int i = 0; i < 100; i++)
{
outfile << "total" << '\n';
for (int i = 202; i <= 220; i++)
{
outfile << "schedule 0" << i << '\n';
}
}
outfile.close();*/
return 0; return 0;
} }
......
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef CPPTL_JSON_ASSERTIONS_H_INCLUDED
#define CPPTL_JSON_ASSERTIONS_H_INCLUDED
#include <stdlib.h>
#include <sstream>
#if !defined(JSON_IS_AMALGAMATION)
#include "config.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
/** It should not be possible for a maliciously designed file to
* cause an abort() or seg-fault, so these macros are used only
* for pre-condition violations and internal logic errors.
*/
#if JSON_USE_EXCEPTION
// @todo <= add detail about condition in exception
# define JSON_ASSERT(condition) \
{if (!(condition)) {Json::throwLogicError( "assert json failed" );}}
# define JSON_FAIL_MESSAGE(message) \
{ \
std::ostringstream oss; oss << message; \
Json::throwLogicError(oss.str()); \
abort(); \
}
#else // JSON_USE_EXCEPTION
# define JSON_ASSERT(condition) assert(condition)
// The call to assert() will show the failure message in debug builds. In
// release builds we abort, for a core-dump or debugger.
# define JSON_FAIL_MESSAGE(message) \
{ \
std::ostringstream oss; oss << message; \
assert(false && oss.str().c_str()); \
abort(); \
}
#endif
#define JSON_ASSERT_MESSAGE(condition, message) \
if (!(condition)) { \
JSON_FAIL_MESSAGE(message); \
}
#endif // CPPTL_JSON_ASSERTIONS_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef JSON_AUTOLINK_H_INCLUDED
#define JSON_AUTOLINK_H_INCLUDED
#include "config.h"
#ifdef JSON_IN_CPPTL
#include <cpptl/cpptl_autolink.h>
#endif
#if !defined(JSON_NO_AUTOLINK) && !defined(JSON_DLL_BUILD) && \
!defined(JSON_IN_CPPTL)
#define CPPTL_AUTOLINK_NAME "json"
#undef CPPTL_AUTOLINK_DLL
#ifdef JSON_DLL
#define CPPTL_AUTOLINK_DLL
#endif
#include "autolink.h"
#endif
#endif // JSON_AUTOLINK_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef JSON_CONFIG_H_INCLUDED
#define JSON_CONFIG_H_INCLUDED
/// If defined, indicates that json library is embedded in CppTL library.
//# define JSON_IN_CPPTL 1
/// If defined, indicates that json may leverage CppTL library
//# define JSON_USE_CPPTL 1
/// If defined, indicates that cpptl vector based map should be used instead of
/// std::map
/// as Value container.
//# define JSON_USE_CPPTL_SMALLMAP 1
// If non-zero, the library uses exceptions to report bad input instead of C
// assertion macros. The default is to use exceptions.
#ifndef JSON_USE_EXCEPTION
#define JSON_USE_EXCEPTION 1
#endif
/// If defined, indicates that the source file is amalgated
/// to prevent private header inclusion.
/// Remarks: it is automatically defined in the generated amalgated header.
// #define JSON_IS_AMALGAMATION
#ifdef JSON_IN_CPPTL
#include <cpptl/config.h>
#ifndef JSON_USE_CPPTL
#define JSON_USE_CPPTL 1
#endif
#endif
#ifdef JSON_IN_CPPTL
#define JSON_API CPPTL_API
#elif defined(JSON_DLL_BUILD)
#if defined(_MSC_VER)
#define JSON_API __declspec(dllexport)
#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
#endif // if defined(_MSC_VER)
#elif defined(JSON_DLL)
#if defined(_MSC_VER)
#define JSON_API __declspec(dllimport)
#define JSONCPP_DISABLE_DLL_INTERFACE_WARNING
#endif // if defined(_MSC_VER)
#endif // ifdef JSON_IN_CPPTL
#if !defined(JSON_API)
#define JSON_API
#endif
#if !defined(JSON_HAS_UNIQUE_PTR)
#if __cplusplus >= 201103L
#define JSON_HAS_UNIQUE_PTR (1)
#elif _MSC_VER >= 1600
#define JSON_HAS_UNIQUE_PTR (1)
#else
#define JSON_HAS_UNIQUE_PTR (0)
#endif
#endif
// If JSON_NO_INT64 is defined, then Json only support C++ "int" type for
// integer
// Storages, and 64 bits integer support is disabled.
// #define JSON_NO_INT64 1
#if defined(_MSC_VER) && _MSC_VER <= 1200 // MSVC 6
// Microsoft Visual Studio 6 only support conversion from __int64 to double
// (no conversion from unsigned __int64).
#define JSON_USE_INT64_DOUBLE_CONVERSION 1
// Disable warning 4786 for VS6 caused by STL (identifier was truncated to '255'
// characters in the debug information)
// All projects I've ever seen with VS6 were using this globally (not bothering
// with pragma push/pop).
#pragma warning(disable : 4786)
#endif // if defined(_MSC_VER) && _MSC_VER < 1200 // MSVC 6
#if defined(_MSC_VER) && _MSC_VER >= 1500 // MSVC 2008
/// Indicates that the following function is deprecated.
#define JSONCPP_DEPRECATED(message) __declspec(deprecated(message))
#elif defined(__clang__) && defined(__has_feature)
#if __has_feature(attribute_deprecated_with_message)
#define JSONCPP_DEPRECATED(message) __attribute__ ((deprecated(message)))
#endif
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
#define JSONCPP_DEPRECATED(message) __attribute__ ((deprecated(message)))
#elif defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
#define JSONCPP_DEPRECATED(message) __attribute__((__deprecated__))
#endif
#if !defined(JSONCPP_DEPRECATED)
#define JSONCPP_DEPRECATED(message)
#endif // if !defined(JSONCPP_DEPRECATED)
namespace Json {
typedef int Int;
typedef unsigned int UInt;
#if defined(JSON_NO_INT64)
typedef int LargestInt;
typedef unsigned int LargestUInt;
#undef JSON_HAS_INT64
#else // if defined(JSON_NO_INT64)
// For Microsoft Visual use specific types as long long is not supported
#if defined(_MSC_VER) // Microsoft Visual Studio
typedef __int64 Int64;
typedef unsigned __int64 UInt64;
#else // if defined(_MSC_VER) // Other platforms, use long long
typedef long long int Int64;
typedef unsigned long long int UInt64;
#endif // if defined(_MSC_VER)
typedef Int64 LargestInt;
typedef UInt64 LargestUInt;
#define JSON_HAS_INT64
#endif // if defined(JSON_NO_INT64)
} // end namespace Json
#endif // JSON_CONFIG_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef CPPTL_JSON_FEATURES_H_INCLUDED
#define CPPTL_JSON_FEATURES_H_INCLUDED
#if !defined(JSON_IS_AMALGAMATION)
#include "forwards.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
namespace Json {
/** \brief Configuration passed to reader and writer.
* This configuration object can be used to force the Reader or Writer
* to behave in a standard conforming way.
*/
class JSON_API Features {
public:
/** \brief A configuration that allows all features and assumes all strings
* are UTF-8.
* - C & C++ comments are allowed
* - Root object can be any JSON value
* - Assumes Value strings are encoded in UTF-8
*/
static Features all();
/** \brief A configuration that is strictly compatible with the JSON
* specification.
* - Comments are forbidden.
* - Root object must be either an array or an object value.
* - Assumes Value strings are encoded in UTF-8
*/
static Features strictMode();
/** \brief Initialize the configuration like JsonConfig::allFeatures;
*/
Features();
/// \c true if comments are allowed. Default: \c true.
bool allowComments_;
/// \c true if root must be either an array or an object value. Default: \c
/// false.
bool strictRoot_;
};
} // namespace Json
#endif // CPPTL_JSON_FEATURES_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef JSON_FORWARDS_H_INCLUDED
#define JSON_FORWARDS_H_INCLUDED
#if !defined(JSON_IS_AMALGAMATION)
#include "config.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
namespace Json {
// writer.h
class FastWriter;
class StyledWriter;
// reader.h
class Reader;
// features.h
class Features;
// value.h
typedef unsigned int ArrayIndex;
class StaticString;
class Path;
class PathArgument;
class Value;
class ValueIteratorBase;
class ValueIterator;
class ValueConstIterator;
} // namespace Json
#endif // JSON_FORWARDS_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef JSON_JSON_H_INCLUDED
#define JSON_JSON_H_INCLUDED
#include "autolink.h"
#include "value.h"
#include "reader.h"
#include "writer.h"
#include "features.h"
#endif // JSON_JSON_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef CPPTL_JSON_READER_H_INCLUDED
#define CPPTL_JSON_READER_H_INCLUDED
#if !defined(JSON_IS_AMALGAMATION)
#include "features.h"
#include "value.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
#include <deque>
#include <iosfwd>
#include <stack>
#include <string>
#include <istream>
// Disable warning C4251: <data member>: <type> needs to have dll-interface to
// be used by...
#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#pragma warning(push)
#pragma warning(disable : 4251)
#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
namespace Json {
/** \brief Unserialize a <a HREF="http://www.json.org">JSON</a> document into a
*Value.
*
* \deprecated Use CharReader and CharReaderBuilder.
*/
class JSON_API Reader {
public:
typedef char Char;
typedef const Char* Location;
/** \brief Constructs a Reader allowing all features
* for parsing.
*/
Reader();
/** \brief Constructs a Reader allowing the specified feature set
* for parsing.
*/
Reader(const Features& features);
/** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
* document.
* \param document UTF-8 encoded string containing the document to read.
* \param root [out] Contains the root value of the document if it was
* successfully parsed.
* \param collectComments \c true to collect comment and allow writing them
* back during
* serialization, \c false to discard comments.
* This parameter is ignored if
* Features::allowComments_
* is \c false.
* \return \c true if the document was successfully parsed, \c false if an
* error occurred.
*/
bool
parse(const std::string& document, Value& root, bool collectComments = true);
/** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
document.
* \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
document to read.
* \param endDoc Pointer on the end of the UTF-8 encoded string of the
document to read.
* Must be >= beginDoc.
* \param root [out] Contains the root value of the document if it was
* successfully parsed.
* \param collectComments \c true to collect comment and allow writing them
back during
* serialization, \c false to discard comments.
* This parameter is ignored if
Features::allowComments_
* is \c false.
* \return \c true if the document was successfully parsed, \c false if an
error occurred.
*/
bool parse(const char* beginDoc,
const char* endDoc,
Value& root,
bool collectComments = true);
/// \brief Parse from input stream.
/// \see Json::operator>>(std::istream&, Json::Value&).
bool parse(std::istream& is, Value& root, bool collectComments = true);
/** \brief Returns a user friendly string that list errors in the parsed
* document.
* \return Formatted error message with the list of errors with their location
* in
* the parsed document. An empty string is returned if no error
* occurred
* during parsing.
* \deprecated Use getFormattedErrorMessages() instead (typo fix).
*/
JSONCPP_DEPRECATED("Use getFormattedErrorMessages() instead.")
std::string getFormatedErrorMessages() const;
/** \brief Returns a user friendly string that list errors in the parsed
* document.
* \return Formatted error message with the list of errors with their location
* in
* the parsed document. An empty string is returned if no error
* occurred
* during parsing.
*/
std::string getFormattedErrorMessages() const;
private:
enum TokenType {
tokenEndOfStream = 0,
tokenObjectBegin,
tokenObjectEnd,
tokenArrayBegin,
tokenArrayEnd,
tokenString,
tokenNumber,
tokenTrue,
tokenFalse,
tokenNull,
tokenArraySeparator,
tokenMemberSeparator,
tokenComment,
tokenError
};
class Token {
public:
TokenType type_;
Location start_;
Location end_;
};
class ErrorInfo {
public:
Token token_;
std::string message_;
Location extra_;
};
typedef std::deque<ErrorInfo> Errors;
bool readToken(Token& token);
void skipSpaces();
bool match(Location pattern, int patternLength);
bool readComment();
bool readCStyleComment();
bool readCppStyleComment();
bool readString();
void readNumber();
bool readValue();
bool readObject(Token& token);
bool readArray(Token& token);
bool decodeNumber(Token& token);
bool decodeNumber(Token& token, Value& decoded);
bool decodeString(Token& token);
bool decodeString(Token& token, std::string& decoded);
bool decodeDouble(Token& token);
bool decodeDouble(Token& token, Value& decoded);
bool decodeUnicodeCodePoint(Token& token,
Location& current,
Location end,
unsigned int& unicode);
bool decodeUnicodeEscapeSequence(Token& token,
Location& current,
Location end,
unsigned int& unicode);
bool addError(const std::string& message, Token& token, Location extra = 0);
bool recoverFromError(TokenType skipUntilToken);
bool addErrorAndRecover(const std::string& message,
Token& token,
TokenType skipUntilToken);
void skipUntilSpace();
Value& currentValue();
Char getNextChar();
void
getLocationLineAndColumn(Location location, int& line, int& column) const;
std::string getLocationLineAndColumn(Location location) const;
void addComment(Location begin, Location end, CommentPlacement placement);
void skipCommentTokens(Token& token);
typedef std::stack<Value*> Nodes;
Nodes nodes_;
Errors errors_;
std::string document_;
Location begin_;
Location end_;
Location current_;
Location lastValueEnd_;
Value* lastValue_;
std::string commentsBefore_;
Features features_;
bool collectComments_;
}; // Reader
/** Interface for reading JSON from a char array.
*/
class JSON_API CharReader {
public:
virtual ~CharReader() {}
/** \brief Read a Value from a <a HREF="http://www.json.org">JSON</a>
document.
* The document must be a UTF-8 encoded string containing the document to read.
*
* \param beginDoc Pointer on the beginning of the UTF-8 encoded string of the
document to read.
* \param endDoc Pointer on the end of the UTF-8 encoded string of the
document to read.
* Must be >= beginDoc.
* \param root [out] Contains the root value of the document if it was
* successfully parsed.
* \param errs [out] Formatted error messages (if not NULL)
* a user friendly string that lists errors in the parsed
* document.
* \return \c true if the document was successfully parsed, \c false if an
error occurred.
*/
virtual bool parse(
char const* beginDoc, char const* endDoc,
Value* root, std::string* errs) = 0;
class Factory {
public:
virtual ~Factory() {}
/** \brief Allocate a CharReader via operator new().
* \throw std::exception if something goes wrong (e.g. invalid settings)
*/
virtual CharReader* newCharReader() const = 0;
}; // Factory
}; // CharReader
/** \brief Build a CharReader implementation.
Usage:
\code
using namespace Json;
CharReaderBuilder builder;
builder["collectComments"] = false;
Value value;
std::string errs;
bool ok = parseFromStream(builder, std::cin, &value, &errs);
\endcode
*/
class JSON_API CharReaderBuilder : public CharReader::Factory {
public:
// Note: We use a Json::Value so that we can add data-members to this class
// without a major version bump.
/** Configuration of this builder.
These are case-sensitive.
Available settings (case-sensitive):
- `"collectComments": false or true`
- true to collect comment and allow writing them
back during serialization, false to discard comments.
This parameter is ignored if allowComments is false.
- `"allowComments": false or true`
- true if comments are allowed.
- `"strictRoot": false or true`
- true if root must be either an array or an object value
- `"allowDroppedNullPlaceholders": false or true`
- true if dropped null placeholders are allowed. (See StreamWriterBuilder.)
- `"allowNumericKeys": false or true`
- true if numeric object keys are allowed.
- `"allowSingleQuotes": false or true`
- true if '' are allowed for strings (both keys and values)
- `"stackLimit": integer`
- Exceeding stackLimit (recursive depth of `readValue()`) will
cause an exception.
- This is a security issue (seg-faults caused by deeply nested JSON),
so the default is low.
- `"failIfExtra": false or true`
- If true, `parse()` returns false when extra non-whitespace trails
the JSON value in the input string.
- `"rejectDupKeys": false or true`
- If true, `parse()` returns false when a key is duplicated within an object.
- `"allowSpecialFloats": false or true`
- If true, special float values (NaNs and infinities) are allowed
and their values are lossfree restorable.
You can examine 'settings_` yourself
to see the defaults. You can also write and read them just like any
JSON Value.
\sa setDefaults()
*/
Json::Value settings_;
CharReaderBuilder();
virtual ~CharReaderBuilder();
virtual CharReader* newCharReader() const;
/** \return true if 'settings' are legal and consistent;
* otherwise, indicate bad settings via 'invalid'.
*/
bool validate(Json::Value* invalid) const;
/** A simple way to update a specific setting.
*/
Value& operator[](std::string key);
/** Called by ctor, but you can use this to reset settings_.
* \pre 'settings' != NULL (but Json::null is fine)
* \remark Defaults:
* \snippet src/lib_json/json_reader.cpp CharReaderBuilderDefaults
*/
static void setDefaults(Json::Value* settings);
/** Same as old Features::strictMode().
* \pre 'settings' != NULL (but Json::null is fine)
* \remark Defaults:
* \snippet src/lib_json/json_reader.cpp CharReaderBuilderStrictMode
*/
static void strictMode(Json::Value* settings);
};
/** Consume entire stream and use its begin/end.
* Someday we might have a real StreamReader, but for now this
* is convenient.
*/
bool JSON_API parseFromStream(
CharReader::Factory const&,
std::istream&,
Value* root, std::string* errs);
/** \brief Read from 'sin' into 'root'.
Always keep comments from the input JSON.
This can be used to read a file into a particular sub-object.
For example:
\code
Json::Value root;
cin >> root["dir"]["file"];
cout << root;
\endcode
Result:
\verbatim
{
"dir": {
"file": {
// The input stream JSON would be nested here.
}
}
}
\endverbatim
\throw std::exception on parse error.
\see Json::operator<<()
*/
JSON_API std::istream& operator>>(std::istream&, Value&);
} // namespace Json
#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#pragma warning(pop)
#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#endif // CPPTL_JSON_READER_H_INCLUDED
此差异已折叠。
// DO NOT EDIT. This file (and "version") is generated by CMake.
// Run CMake configure step to update it.
#ifndef JSON_VERSION_H_INCLUDED
# define JSON_VERSION_H_INCLUDED
# define JSONCPP_VERSION_STRING "0.10.6"
# define JSONCPP_VERSION_MAJOR 0
# define JSONCPP_VERSION_MINOR 10
# define JSONCPP_VERSION_PATCH 6
# define JSONCPP_VERSION_QUALIFIER
# define JSONCPP_VERSION_HEXA ((JSONCPP_VERSION_MAJOR << 24) | (JSONCPP_VERSION_MINOR << 16) | (JSONCPP_VERSION_PATCH << 8))
#endif // JSON_VERSION_H_INCLUDED
// Copyright 2007-2010 Baptiste Lepilleur
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
#ifndef JSON_WRITER_H_INCLUDED
#define JSON_WRITER_H_INCLUDED
#if !defined(JSON_IS_AMALGAMATION)
#include "value.h"
#endif // if !defined(JSON_IS_AMALGAMATION)
#include <vector>
#include <string>
#include <ostream>
// Disable warning C4251: <data member>: <type> needs to have dll-interface to
// be used by...
#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#pragma warning(push)
#pragma warning(disable : 4251)
#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
namespace Json {
class Value;
/**
Usage:
\code
using namespace Json;
void writeToStdout(StreamWriter::Factory const& factory, Value const& value) {
std::unique_ptr<StreamWriter> const writer(
factory.newStreamWriter());
writer->write(value, &std::cout);
std::cout << std::endl; // add lf and flush
}
\endcode
*/
class JSON_API StreamWriter {
protected:
std::ostream* sout_; // not owned; will not delete
public:
StreamWriter();
virtual ~StreamWriter();
/** Write Value into document as configured in sub-class.
Do not take ownership of sout, but maintain a reference during function.
\pre sout != NULL
\return zero on success (For now, we always return zero, so check the stream instead.)
\throw std::exception possibly, depending on configuration
*/
virtual int write(Value const& root, std::ostream* sout) = 0;
/** \brief A simple abstract factory.
*/
class JSON_API Factory {
public:
virtual ~Factory();
/** \brief Allocate a CharReader via operator new().
* \throw std::exception if something goes wrong (e.g. invalid settings)
*/
virtual StreamWriter* newStreamWriter() const = 0;
}; // Factory
}; // StreamWriter
/** \brief Write into stringstream, then return string, for convenience.
* A StreamWriter will be created from the factory, used, and then deleted.
*/
std::string JSON_API writeString(StreamWriter::Factory const& factory, Value const& root);
/** \brief Build a StreamWriter implementation.
Usage:
\code
using namespace Json;
Value value = ...;
StreamWriterBuilder builder;
builder["commentStyle"] = "None";
builder["indentation"] = " "; // or whatever you like
std::unique_ptr<Json::StreamWriter> writer(
builder.newStreamWriter());
writer->write(value, &std::cout);
std::cout << std::endl; // add lf and flush
\endcode
*/
class JSON_API StreamWriterBuilder : public StreamWriter::Factory {
public:
// Note: We use a Json::Value so that we can add data-members to this class
// without a major version bump.
/** Configuration of this builder.
Available settings (case-sensitive):
- "commentStyle": "None" or "All"
- "indentation": "<anything>"
- "enableYAMLCompatibility": false or true
- slightly change the whitespace around colons
- "dropNullPlaceholders": false or true
- Drop the "null" string from the writer's output for nullValues.
Strictly speaking, this is not valid JSON. But when the output is being
fed to a browser's Javascript, it makes for smaller output and the
browser can handle the output just fine.
- "useSpecialFloats": false or true
- If true, outputs non-finite floating point values in the following way:
NaN values as "NaN", positive infinity as "Infinity", and negative infinity
as "-Infinity".
You can examine 'settings_` yourself
to see the defaults. You can also write and read them just like any
JSON Value.
\sa setDefaults()
*/
Json::Value settings_;
StreamWriterBuilder();
virtual ~StreamWriterBuilder();
/**
* \throw std::exception if something goes wrong (e.g. invalid settings)
*/
virtual StreamWriter* newStreamWriter() const;
/** \return true if 'settings' are legal and consistent;
* otherwise, indicate bad settings via 'invalid'.
*/
bool validate(Json::Value* invalid) const;
/** A simple way to update a specific setting.
*/
Value& operator[](std::string key);
/** Called by ctor, but you can use this to reset settings_.
* \pre 'settings' != NULL (but Json::null is fine)
* \remark Defaults:
* \snippet src/lib_json/json_writer.cpp StreamWriterBuilderDefaults
*/
static void setDefaults(Json::Value* settings);
};
/** \brief Abstract class for writers.
* \deprecated Use StreamWriter. (And really, this is an implementation detail.)
*/
class JSON_API Writer {
public:
virtual ~Writer();
virtual std::string write(const Value& root) = 0;
};
/** \brief Outputs a Value in <a HREF="http://www.json.org">JSON</a> format
*without formatting (not human friendly).
*
* The JSON document is written in a single line. It is not intended for 'human'
*consumption,
* but may be usefull to support feature such as RPC where bandwith is limited.
* \sa Reader, Value
* \deprecated Use StreamWriterBuilder.
*/
class JSON_API FastWriter : public Writer {
public:
FastWriter();
virtual ~FastWriter() {}
void enableYAMLCompatibility();
public: // overridden from Writer
virtual std::string write(const Value& root);
private:
void writeValue(const Value& value);
std::string document_;
bool yamlCompatiblityEnabled_;
};
/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a
*human friendly way.
*
* The rules for line break and indent are as follow:
* - Object value:
* - if empty then print {} without indent and line break
* - if not empty the print '{', line break & indent, print one value per
*line
* and then unindent and line break and print '}'.
* - Array value:
* - if empty then print [] without indent and line break
* - if the array contains no object value, empty array or some other value
*types,
* and all the values fit on one lines, then print the array on a single
*line.
* - otherwise, it the values do not fit on one line, or the array contains
* object or non empty array, then print one value per line.
*
* If the Value have comments then they are outputed according to their
*#CommentPlacement.
*
* \sa Reader, Value, Value::setComment()
* \deprecated Use StreamWriterBuilder.
*/
class JSON_API StyledWriter : public Writer {
public:
StyledWriter();
virtual ~StyledWriter() {}
public: // overridden from Writer
/** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
* \param root Value to serialize.
* \return String containing the JSON document that represents the root value.
*/
virtual std::string write(const Value& root);
private:
void writeValue(const Value& value);
void writeArrayValue(const Value& value);
bool isMultineArray(const Value& value);
void pushValue(const std::string& value);
void writeIndent();
void writeWithIndent(const std::string& value);
void indent();
void unindent();
void writeCommentBeforeValue(const Value& root);
void writeCommentAfterValueOnSameLine(const Value& root);
bool hasCommentForValue(const Value& value);
static std::string normalizeEOL(const std::string& text);
typedef std::vector<std::string> ChildValues;
ChildValues childValues_;
std::string document_;
std::string indentString_;
int rightMargin_;
int indentSize_;
bool addChildValues_;
};
/** \brief Writes a Value in <a HREF="http://www.json.org">JSON</a> format in a
human friendly way,
to a stream rather than to a string.
*
* The rules for line break and indent are as follow:
* - Object value:
* - if empty then print {} without indent and line break
* - if not empty the print '{', line break & indent, print one value per
line
* and then unindent and line break and print '}'.
* - Array value:
* - if empty then print [] without indent and line break
* - if the array contains no object value, empty array or some other value
types,
* and all the values fit on one lines, then print the array on a single
line.
* - otherwise, it the values do not fit on one line, or the array contains
* object or non empty array, then print one value per line.
*
* If the Value have comments then they are outputed according to their
#CommentPlacement.
*
* \param indentation Each level will be indented by this amount extra.
* \sa Reader, Value, Value::setComment()
* \deprecated Use StreamWriterBuilder.
*/
class JSON_API StyledStreamWriter {
public:
StyledStreamWriter(std::string indentation = "\t");
~StyledStreamWriter() {}
public:
/** \brief Serialize a Value in <a HREF="http://www.json.org">JSON</a> format.
* \param out Stream to write to. (Can be ostringstream, e.g.)
* \param root Value to serialize.
* \note There is no point in deriving from Writer, since write() should not
* return a value.
*/
void write(std::ostream& out, const Value& root);
private:
void writeValue(const Value& value);
void writeArrayValue(const Value& value);
bool isMultineArray(const Value& value);
void pushValue(const std::string& value);
void writeIndent();
void writeWithIndent(const std::string& value);
void indent();
void unindent();
void writeCommentBeforeValue(const Value& root);
void writeCommentAfterValueOnSameLine(const Value& root);
bool hasCommentForValue(const Value& value);
static std::string normalizeEOL(const std::string& text);
typedef std::vector<std::string> ChildValues;
ChildValues childValues_;
std::ostream* document_;
std::string indentString_;
int rightMargin_;
std::string indentation_;
bool addChildValues_ : 1;
bool indented_ : 1;
};
#if defined(JSON_HAS_INT64)
std::string JSON_API valueToString(Int value);
std::string JSON_API valueToString(UInt value);
#endif // if defined(JSON_HAS_INT64)
std::string JSON_API valueToString(LargestInt value);
std::string JSON_API valueToString(LargestUInt value);
std::string JSON_API valueToString(double value);
std::string JSON_API valueToString(bool value);
std::string JSON_API valueToQuotedString(const char* value);
/// \brief Output using the StyledStreamWriter.
/// \see Json::operator>>()
JSON_API std::ostream& operator<<(std::ostream&, const Value& root);
} // namespace Json
#if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#pragma warning(pop)
#endif // if defined(JSONCPP_DISABLE_DLL_INTERFACE_WARNING)
#endif // JSON_WRITER_H_INCLUDED
# Set the default behavior, in case people don't have core.autocrlf set.
* text=auto
# Explicitly declare text files you want to always be normalized and converted
# to native line endings on checkout.
*.cpp text
*.h text
*.txt text
*.md text
*.cmake text
*.svg text
*.dot text
*.yml text
*.in text
*.sh text
*.autopkg text
Dockerfile text
# Denote all files that are truly binary and should not be modified.
*.png binary
*.jpg binary
*.json binary
\ No newline at end of file
/bin/*
!/bin/data
!/bin/encodings
!/bin/jsonchecker
!/bin/types
!/bin/unittestschema
/build
/doc/html
/doc/doxygen_*.db
*.a
# Temporary files created during CMake build
CMakeCache.txt
CMakeFiles
cmake_install.cmake
CTestTestfile.cmake
Makefile
RapidJSON*.cmake
RapidJSON.pc
Testing
/googletest
install_manifest.txt
Doxyfile
Doxyfile.zh-cn
DartConfiguration.tcl
*.nupkg
# Files created by OS
*.DS_Store
[submodule "thirdparty/gtest"]
path = thirdparty/gtest
url = https://github.com/google/googletest.git
sudo: required
dist: xenial
language: cpp
cache:
- ccache
addons:
apt:
sources:
- ubuntu-toolchain-r-test
packages:
- cmake
- valgrind
- clang-8
env:
global:
- USE_CCACHE=1
- CCACHE_SLOPPINESS=pch_defines,time_macros
- CCACHE_COMPRESS=1
- CCACHE_MAXSIZE=100M
- ARCH_FLAGS_x86='-m32' # #266: don't use SSE on 32-bit
- ARCH_FLAGS_x86_64='-msse4.2' # use SSE4.2 on 64-bit
- ARCH_FLAGS_aarch64='-march=armv8-a'
- GITHUB_REPO='Tencent/rapidjson'
- secure: "HrsaCb+N66EG1HR+LWH1u51SjaJyRwJEDzqJGYMB7LJ/bfqb9mWKF1fLvZGk46W5t7TVaXRDD5KHFx9DPWvKn4gRUVkwTHEy262ah5ORh8M6n/6VVVajeV/AYt2C0sswdkDBDO4Xq+xy5gdw3G8s1A4Inbm73pUh+6vx+7ltBbk="
matrix:
include:
# gcc
- env: CONF=release ARCH=x86 CXX11=ON CXX17=OFF MEMBERSMAP=OFF
compiler: gcc
arch: amd64
- env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF MEMBERSMAP=OFF
compiler: gcc
arch: amd64
- env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF MEMBERSMAP=ON
compiler: gcc
arch: amd64
- env: CONF=debug ARCH=x86 CXX11=OFF CXX17=OFF MEMBERSMAP=OFF
compiler: gcc
arch: amd64
- env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=OFF MEMBERSMAP=OFF
compiler: gcc
arch: amd64
- env: CONF=debug ARCH=x86 CXX11=OFF CXX17=ON MEMBERSMAP=ON CXX_FLAGS='-D_GLIBCXX_DEBUG'
compiler: gcc
arch: amd64
- env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=ON MEMBERSMAP=ON CXX_FLAGS='-D_GLIBCXX_DEBUG'
compiler: gcc
arch: amd64
- env: CONF=release ARCH=aarch64 CXX11=ON CXX17=OFF MEMBERSMAP=OFF
compiler: gcc
arch: arm64
- env: CONF=release ARCH=aarch64 CXX11=OFF CXX17=OFF MEMBERSMAP=OFF
compiler: gcc
arch: arm64
- env: CONF=release ARCH=aarch64 CXX11=OFF CXX17=ON MEMBERSMAP=ON
compiler: gcc
arch: arm64
# clang
- env: CONF=release ARCH=x86 CXX11=ON CXX17=OFF MEMBERSMAP=ON CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF MEMBERSMAP=ON CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF MEMBERSMAP=OFF CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=debug ARCH=x86 CXX11=OFF CXX17=OFF MEMBERSMAP=ON CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=OFF MEMBERSMAP=ON CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=debug ARCH=x86 CXX11=OFF CXX17=ON MEMBERSMAP=OFF CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=ON MEMBERSMAP=OFF CCACHE_CPP2=yes
compiler: clang
arch: amd64
- env: CONF=debug ARCH=aarch64 CXX11=ON CXX17=OFF MEMBERSMAP=ON CCACHE_CPP2=yes
compiler: clang
arch: arm64
- env: CONF=debug ARCH=aarch64 CXX11=OFF CXX17=OFF MEMBERSMAP=ON CCACHE_CPP2=yes
compiler: clang
arch: arm64
- env: CONF=debug ARCH=aarch64 CXX11=OFF CXX17=ON MEMBERSMAP=OFF CCACHE_CPP2=yes
compiler: clang
arch: arm64
# coverage report
- env: CONF=debug ARCH=x86 GCOV_FLAGS='--coverage' CXX_FLAGS='-O0' CXX11=OFF CXX17=OFF
compiler: gcc
arch: amd64
cache:
- ccache
- pip
after_success:
- pip install --user cpp-coveralls
- coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h
- env: CONF=debug ARCH=x86_64 GCOV_FLAGS='--coverage' CXX_FLAGS='-O0' CXX11=ON CXX17=OFF MEMBERSMAP=ON
compiler: gcc
arch: amd64
cache:
- ccache
- pip
after_success:
- pip install --user cpp-coveralls
- coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h
- env: CONF=debug ARCH=aarch64 GCOV_FLAGS='--coverage' CXX_FLAGS='-O0' CXX11=OFF CXX17=ON
compiler: gcc
arch: arm64
cache:
- ccache
- pip
after_success:
- pip install --user cpp-coveralls
- coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h
- script: # Documentation task
- cd build
- cmake .. -DRAPIDJSON_HAS_STDSTRING=ON -DCMAKE_VERBOSE_MAKEFILE=ON
- make travis_doc
cache: false
addons:
apt:
packages:
- doxygen
before_install:
- if [ "x86_64" = "$(arch)" ]; then sudo apt-get install -y g++-multilib libc6-dbg:i386 --allow-unauthenticated; fi
before_script:
# travis provides clang-7 for amd64 and clang-3.8 for arm64
# here use clang-8 to all architectures as clang-7 is not available for arm64
- if [ -f /usr/bin/clang++-8 ]; then
sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-8 1000;
sudo update-alternatives --config clang++;
export PATH=/usr/bin:$PATH;
fi
- if [ "$CXX" = "clang++" ]; then export CCACHE_CPP2=yes; fi
- ccache -s
# hack to avoid Valgrind bug (https://bugs.kde.org/show_bug.cgi?id=326469),
# exposed by merging PR#163 (using -march=native)
# TODO: Since this bug is already fixed. Remove this when valgrind can be upgraded.
- sed -i "s/-march=native//" CMakeLists.txt
- mkdir build
script:
- if [ "$CXX" = "clang++" ]; then export CXXFLAGS="-stdlib=libc++ ${CXXFLAGS}"; fi
- >
eval "ARCH_FLAGS=\${ARCH_FLAGS_${ARCH}}" ;
(cd build && cmake
-DRAPIDJSON_HAS_STDSTRING=ON
-DRAPIDJSON_USE_MEMBERSMAP=$MEMBERSMAP
-DRAPIDJSON_BUILD_CXX11=$CXX11
-DRAPIDJSON_BUILD_CXX17=$CXX17
-DCMAKE_VERBOSE_MAKEFILE=ON
-DCMAKE_BUILD_TYPE=$CONF
-DCMAKE_CXX_FLAGS="$ARCH_FLAGS $GCOV_FLAGS $CXX_FLAGS"
-DCMAKE_EXE_LINKER_FLAGS=$GCOV_FLAGS
..)
- cd build
- make tests -j 2
- make examples -j 2
- ctest -j 2 -V `[ "$CONF" = "release" ] || echo "-E perftest"`
# Change Log
All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
## 1.1.0 - 2016-08-25
### Added
* Add GenericDocument ctor overload to specify JSON type (#369)
* Add FAQ (#372, #373, #374, #376)
* Add forward declaration header `fwd.h`
* Add @PlatformIO Library Registry manifest file (#400)
* Implement assignment operator for BigInteger (#404)
* Add comments support (#443)
* Adding coapp definition (#460)
* documenttest.cpp: EXPECT_THROW when checking empty allocator (470)
* GenericDocument: add implicit conversion to ParseResult (#480)
* Use <wchar.h> with C++ linkage on Windows ARM (#485)
* Detect little endian for Microsoft ARM targets
* Check Nan/Inf when writing a double (#510)
* Add JSON Schema Implementation (#522)
* Add iostream wrapper (#530)
* Add Jsonx example for converting JSON into JSONx (a XML format) (#531)
* Add optional unresolvedTokenIndex parameter to Pointer::Get() (#532)
* Add encoding validation option for Writer/PrettyWriter (#534)
* Add Writer::SetMaxDecimalPlaces() (#536)
* Support {0, } and {0, m} in Regex (#539)
* Add Value::Get/SetFloat(), Value::IsLossLessFloat/Double() (#540)
* Add stream position check to reader unit tests (#541)
* Add Templated accessors and range-based for (#542)
* Add (Pretty)Writer::RawValue() (#543)
* Add Document::Parse(std::string), Document::Parse(const char*, size_t length) and related APIs. (#553)
* Add move constructor for GenericSchemaDocument (#554)
* Add VS2010 and VS2015 to AppVeyor CI (#555)
* Add parse-by-parts example (#556, #562)
* Support parse number as string (#564, #589)
* Add kFormatSingleLineArray for PrettyWriter (#577)
* Added optional support for trailing commas (#584)
* Added filterkey and filterkeydom examples (#615)
* Added npm docs (#639)
* Allow options for writing and parsing NaN/Infinity (#641)
* Add std::string overload to PrettyWriter::Key() when RAPIDJSON_HAS_STDSTRING is defined (#698)
### Fixed
* Fix gcc/clang/vc warnings (#350, #394, #397, #444, #447, #473, #515, #582, #589, #595, #667)
* Fix documentation (#482, #511, #550, #557, #614, #635, #660)
* Fix emscripten alignment issue (#535)
* Fix missing allocator to uses of AddMember in document (#365)
* CMake will no longer complain that the minimum CMake version is not specified (#501)
* Make it usable with old VC8 (VS2005) (#383)
* Prohibit C++11 move from Document to Value (#391)
* Try to fix incorrect 64-bit alignment (#419)
* Check return of fwrite to avoid warn_unused_result build failures (#421)
* Fix UB in GenericDocument::ParseStream (#426)
* Keep Document value unchanged on parse error (#439)
* Add missing return statement (#450)
* Fix Document::Parse(const Ch*) for transcoding (#478)
* encodings.h: fix typo in preprocessor condition (#495)
* Custom Microsoft headers are necessary only for Visual Studio 2012 and lower (#559)
* Fix memory leak for invalid regex (26e69ffde95ba4773ab06db6457b78f308716f4b)
* Fix a bug in schema minimum/maximum keywords for 64-bit integer (e7149d665941068ccf8c565e77495521331cf390)
* Fix a crash bug in regex (#605)
* Fix schema "required" keyword cannot handle duplicated keys (#609)
* Fix cmake CMP0054 warning (#612)
* Added missing include guards in istreamwrapper.h and ostreamwrapper.h (#634)
* Fix undefined behaviour (#646)
* Fix buffer overrun using PutN (#673)
* Fix rapidjson::value::Get<std::string>() may returns wrong data (#681)
* Add Flush() for all value types (#689)
* Handle malloc() fail in PoolAllocator (#691)
* Fix builds on x32 platform. #703
### Changed
* Clarify problematic JSON license (#392)
* Move Travis to container based infrastructure (#504, #558)
* Make whitespace array more compact (#513)
* Optimize Writer::WriteString() with SIMD (#544)
* x86-64 48-bit pointer optimization for GenericValue (#546)
* Define RAPIDJSON_HAS_CXX11_RVALUE_REFS directly in clang (#617)
* Make GenericSchemaDocument constructor explicit (#674)
* Optimize FindMember when use std::string (#690)
## [1.0.2] - 2015-05-14
### Added
* Add Value::XXXMember(...) overloads for std::string (#335)
### Fixed
* Include rapidjson.h for all internal/error headers.
* Parsing some numbers incorrectly in full-precision mode (`kFullPrecisionParseFlag`) (#342)
* Fix some numbers parsed incorrectly (#336)
* Fix alignment of 64bit platforms (#328)
* Fix MemoryPoolAllocator::Clear() to clear user-buffer (0691502573f1afd3341073dd24b12c3db20fbde4)
### Changed
* CMakeLists for include as a thirdparty in projects (#334, #337)
* Change Document::ParseStream() to use stack allocator for Reader (ffbe38614732af8e0b3abdc8b50071f386a4a685)
## [1.0.1] - 2015-04-25
### Added
* Changelog following [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog) suggestions.
### Fixed
* Parsing of some numbers (e.g. "1e-00011111111111") causing assertion (#314).
* Visual C++ 32-bit compilation error in `diyfp.h` (#317).
## [1.0.0] - 2015-04-22
### Added
* 100% [Coverall](https://coveralls.io/r/Tencent/rapidjson?branch=master) coverage.
* Version macros (#311)
### Fixed
* A bug in trimming long number sequence (4824f12efbf01af72b8cb6fc96fae7b097b73015).
* Double quote in unicode escape (#288).
* Negative zero roundtrip (double only) (#289).
* Standardize behavior of `memcpy()` and `malloc()` (0c5c1538dcfc7f160e5a4aa208ddf092c787be5a, #305, 0e8bbe5e3ef375e7f052f556878be0bd79e9062d).
### Removed
* Remove an invalid `Document::ParseInsitu()` API (e7f1c6dd08b522cfcf9aed58a333bd9a0c0ccbeb).
## 1.0-beta - 2015-04-8
### Added
* RFC 7159 (#101)
* Optional Iterative Parser (#76)
* Deep-copy values (#20)
* Error code and message (#27)
* ASCII Encoding (#70)
* `kParseStopWhenDoneFlag` (#83)
* `kParseFullPrecisionFlag` (881c91d696f06b7f302af6d04ec14dd08db66ceb)
* Add `Key()` to handler concept (#134)
* C++11 compatibility and support (#128)
* Optimized number-to-string and vice versa conversions (#137, #80)
* Short-String Optimization (#131)
* Local stream optimization by traits (#32)
* Travis & Appveyor Continuous Integration, with Valgrind verification (#24, #242)
* Redo all documentation (English, Simplified Chinese)
### Changed
* Copyright ownership transferred to THL A29 Limited (a Tencent company).
* Migrating from Premake to CMAKE (#192)
* Resolve all warning reports
### Removed
* Remove other JSON libraries for performance comparison (#180)
## 0.11 - 2012-11-16
## 0.1 - 2011-11-18
[Unreleased]: https://github.com/Tencent/rapidjson/compare/v1.1.0...HEAD
[1.1.0]: https://github.com/Tencent/rapidjson/compare/v1.0.2...v1.1.0
[1.0.2]: https://github.com/Tencent/rapidjson/compare/v1.0.1...v1.0.2
[1.0.1]: https://github.com/Tencent/rapidjson/compare/v1.0.0...v1.0.1
[1.0.0]: https://github.com/Tencent/rapidjson/compare/v1.0-beta...v1.0.0
SET(GTEST_SEARCH_PATH
"${GTEST_SOURCE_DIR}"
"${CMAKE_CURRENT_LIST_DIR}/../thirdparty/gtest/googletest")
IF(UNIX)
IF(RAPIDJSON_BUILD_THIRDPARTY_GTEST)
LIST(APPEND GTEST_SEARCH_PATH "/usr/src/gtest")
ELSE()
LIST(INSERT GTEST_SEARCH_PATH 1 "/usr/src/gtest")
ENDIF()
ENDIF()
FIND_PATH(GTEST_SOURCE_DIR
NAMES CMakeLists.txt src/gtest_main.cc
PATHS ${GTEST_SEARCH_PATH})
# Debian installs gtest include directory in /usr/include, thus need to look
# for include directory separately from source directory.
FIND_PATH(GTEST_INCLUDE_DIR
NAMES gtest/gtest.h
PATH_SUFFIXES include
HINTS ${GTEST_SOURCE_DIR}
PATHS ${GTEST_SEARCH_PATH})
INCLUDE(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GTestSrc DEFAULT_MSG
GTEST_SOURCE_DIR
GTEST_INCLUDE_DIR)
includedir=@INCLUDE_INSTALL_DIR@
Name: @PROJECT_NAME@
Description: A fast JSON parser/generator for C++ with both SAX/DOM style API
Version: @LIB_VERSION_STRING@
URL: https://github.com/Tencent/rapidjson
Cflags: -I${includedir}
################################################################################
# CMake minimum version required
cmake_minimum_required(VERSION 3.0)
################################################################################
# RapidJSON source dir
set( RapidJSON_SOURCE_DIR "@CONFIG_SOURCE_DIR@")
################################################################################
# RapidJSON build dir
set( RapidJSON_DIR "@CONFIG_DIR@")
################################################################################
# Compute paths
get_filename_component(RapidJSON_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
set( RapidJSON_INCLUDE_DIR "@RapidJSON_INCLUDE_DIR@" )
set( RapidJSON_INCLUDE_DIRS "@RapidJSON_INCLUDE_DIR@" )
message(STATUS "RapidJSON found. Headers: ${RapidJSON_INCLUDE_DIRS}")
if(NOT TARGET rapidjson)
add_library(rapidjson INTERFACE IMPORTED)
set_property(TARGET rapidjson PROPERTY
INTERFACE_INCLUDE_DIRECTORIES ${RapidJSON_INCLUDE_DIRS})
endif()
SET(PACKAGE_VERSION "@LIB_VERSION_STRING@")
IF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
SET(PACKAGE_VERSION_EXACT "true")
ENDIF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
IF (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION)
SET(PACKAGE_VERSION_COMPATIBLE "true")
ELSE (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION)
SET(PACKAGE_VERSION_UNSUITABLE "true")
ENDIF (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION)
version: 1.1.0.{build}
configuration:
- Debug
- Release
environment:
matrix:
# - VS_VERSION: 9 2008
# VS_PLATFORM: win32
# - VS_VERSION: 9 2008
# VS_PLATFORM: x64
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
VS_VERSION: 10 2010
VS_PLATFORM: win32
CXX11: OFF
CXX17: OFF
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
VS_VERSION: 10 2010
VS_PLATFORM: x64
CXX11: OFF
CXX17: OFF
MEMBERSMAP: ON
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
VS_VERSION: 11 2012
VS_PLATFORM: win32
CXX11: OFF
CXX17: OFF
MEMBERSMAP: ON
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
VS_VERSION: 11 2012
VS_PLATFORM: x64
CXX11: OFF
CXX17: OFF
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
VS_VERSION: 12 2013
VS_PLATFORM: win32
CXX11: OFF
CXX17: OFF
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013
VS_VERSION: 12 2013
VS_PLATFORM: x64
CXX11: OFF
CXX17: OFF
MEMBERSMAP: ON
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
VS_VERSION: 14 2015
VS_PLATFORM: win32
CXX11: OFF
CXX17: OFF
MEMBERSMAP: ON
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
VS_VERSION: 14 2015
VS_PLATFORM: x64
CXX11: OFF
CXX17: OFF
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
VS_VERSION: 15 2017
VS_PLATFORM: win32
CXX11: OFF
CXX17: OFF
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
VS_VERSION: 15 2017
VS_PLATFORM: x64
CXX11: OFF
CXX17: OFF
MEMBERSMAP: ON
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
VS_VERSION: 15 2017
VS_PLATFORM: x64
CXX11: ON
CXX17: OFF
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
VS_VERSION: 15 2017
VS_PLATFORM: x64
CXX11: OFF
CXX17: ON
MEMBERSMAP: OFF
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
VS_VERSION: 16 2019
VS_PLATFORM: x64
CXX11: OFF
CXX17: ON
MEMBERSMAP: ON
before_build:
- git submodule update --init --recursive
- cmake -H. -BBuild/VS -G "Visual Studio %VS_VERSION%" -DCMAKE_GENERATOR_PLATFORM=%VS_PLATFORM% -DCMAKE_VERBOSE_MAKEFILE=ON -DBUILD_SHARED_LIBS=true -DRAPIDJSON_BUILD_CXX11=%CXX11% -DRAPIDJSON_BUILD_CXX17=%CXX17% -DRAPIDJSON_USE_MEMBERSMAP=%MEMBERSMAP% -Wno-dev
build:
project: Build\VS\RapidJSON.sln
parallel: true
verbosity: minimal
test_script:
- cd Build\VS && if %CONFIGURATION%==Debug (ctest --verbose -E perftest --build-config %CONFIGURATION%) else (ctest --verbose --build-config %CONFIGURATION%)
The MIT License (MIT)
Copyright (c) 2017 Bart Muzzin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Derived from:
The MIT License (MIT)
Copyright (c) 2015 mojmir svoboda
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
# rapidjson.natvis
This file can be used as a [Visual Studio Visualizer](https://docs.microsoft.com/en-gb/visualstudio/debugger/create-custom-views-of-native-objects) to aid in visualizing rapidjson structures within the Visual Studio debugger. Natvis visualizers are supported in Visual Studio 2012 and later. To install, copy the file into this directory:
`%USERPROFILE%\Documents\Visual Studio 2012\Visualizers`
Each version of Visual Studio has a similar directory, it must be copied into each directory to be used with that particular version. In Visual Studio 2015 and later, this can be done without restarting Visual Studio (a new debugging session must be started).
<?xml version="1.0" encoding="utf-8"?>
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<!-- rapidjson::GenericValue - basic support -->
<Type Name="rapidjson::GenericValue&lt;*,*&gt;">
<DisplayString Condition="(data_.f.flags &amp; kTypeMask) == kNullType">null</DisplayString>
<DisplayString Condition="data_.f.flags == kTrueFlag">true</DisplayString>
<DisplayString Condition="data_.f.flags == kFalseFlag">false</DisplayString>
<DisplayString Condition="data_.f.flags == kShortStringFlag">{(const Ch*)data_.ss.str,na}</DisplayString>
<DisplayString Condition="(data_.f.flags &amp; kTypeMask) == kStringType">{(const Ch*)((size_t)data_.s.str &amp; 0x0000FFFFFFFFFFFF),na}</DisplayString>
<DisplayString Condition="(data_.f.flags &amp; kNumberIntFlag) == kNumberIntFlag">{data_.n.i.i}</DisplayString>
<DisplayString Condition="(data_.f.flags &amp; kNumberUintFlag) == kNumberUintFlag">{data_.n.u.u}</DisplayString>
<DisplayString Condition="(data_.f.flags &amp; kNumberInt64Flag) == kNumberInt64Flag">{data_.n.i64}</DisplayString>
<DisplayString Condition="(data_.f.flags &amp; kNumberUint64Flag) == kNumberUint64Flag">{data_.n.u64}</DisplayString>
<DisplayString Condition="(data_.f.flags &amp; kNumberDoubleFlag) == kNumberDoubleFlag">{data_.n.d}</DisplayString>
<DisplayString Condition="data_.f.flags == kObjectType">Object members={data_.o.size}</DisplayString>
<DisplayString Condition="data_.f.flags == kArrayType">Array members={data_.a.size}</DisplayString>
<Expand>
<Item Condition="data_.f.flags == kObjectType" Name="[size]">data_.o.size</Item>
<Item Condition="data_.f.flags == kObjectType" Name="[capacity]">data_.o.capacity</Item>
<ArrayItems Condition="data_.f.flags == kObjectType">
<Size>data_.o.size</Size>
<!-- NOTE: Rapidjson stores some extra data in the high bits of pointers, which is why the mask -->
<ValuePointer>(rapidjson::GenericMember&lt;$T1,$T2&gt;*)(((size_t)data_.o.members) &amp; 0x0000FFFFFFFFFFFF)</ValuePointer>
</ArrayItems>
<Item Condition="data_.f.flags == kArrayType" Name="[size]">data_.a.size</Item>
<Item Condition="data_.f.flags == kArrayType" Name="[capacity]">data_.a.capacity</Item>
<ArrayItems Condition="data_.f.flags == kArrayType">
<Size>data_.a.size</Size>
<!-- NOTE: Rapidjson stores some extra data in the high bits of pointers, which is why the mask -->
<ValuePointer>(rapidjson::GenericValue&lt;$T1,$T2&gt;*)(((size_t)data_.a.elements) &amp; 0x0000FFFFFFFFFFFF)</ValuePointer>
</ArrayItems>
</Expand>
</Type>
</AutoVisualizer>
此差异已折叠。
此差异已折叠。
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
nodesep=0.5
penwidth=0.5
colorscheme=spectral7
node [shape=box, fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5, style=filled, fillcolor=white]
edge [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "SAX"
style=filled
fillcolor=6
Reader -> Writer [style=invis]
}
subgraph cluster2 {
margin="10,10"
labeljust="left"
label = "DOM"
style=filled
fillcolor=7
Value
Document
}
Handler [label="<<concept>>\nHandler"]
{
edge [arrowtail=onormal, dir=back]
Value -> Document
Handler -> Document
Handler -> Writer
}
{
edge [arrowhead=vee, style=dashed, constraint=false]
Reader -> Handler [label="calls"]
Value -> Handler [label="calls"]
Document -> Reader [label="uses"]
}
}
\ No newline at end of file
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
{
node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray]
oldjson [label="\{|\"|m|s|g|\"|:|\"|H|e|l|l|o|\\|n|W|o|r|l|d|!|\"|,|\"|\\|u|0|0|7|3|t|a|r|s|\"|:|1|0|\}", xlabel="Before Parsing"]
//newjson [label="\{|\"|<a>m|s|g|\\0|:|\"|<b>H|e|l|l|o|\\n|W|o|r|l|d|!|\\0|\"|,|\"|<c>s|t|a|r|s|\\0|t|a|r|s|:|1|0|\}", xlabel="After Parsing"]
newjson [shape=plaintext, label=<
<table BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="2"><tr>
<td>{</td>
<td>"</td><td port="a">m</td><td>s</td><td>g</td><td bgcolor="yellow">\\0</td>
<td>:</td>
<td>"</td><td port="b">H</td><td>e</td><td>l</td><td>l</td><td>o</td><td bgcolor="yellow">\\n</td><td bgcolor="yellow">W</td><td bgcolor="yellow">o</td><td bgcolor="yellow">r</td><td bgcolor="yellow">l</td><td bgcolor="yellow">d</td><td bgcolor="yellow">!</td><td bgcolor="yellow">\\0</td><td>"</td>
<td>,</td>
<td>"</td><td port="c" bgcolor="yellow">s</td><td bgcolor="yellow">t</td><td bgcolor="yellow">a</td><td bgcolor="yellow">r</td><td bgcolor="yellow">s</td><td bgcolor="yellow">\\0</td><td>t</td><td>a</td><td>r</td><td>s</td>
<td>:</td>
<td>1</td><td>0</td>
<td>}</td>
</tr></table>
>, xlabel="After Parsing"]
}
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Document by In situ Parsing"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
root [label="{object|}", fillcolor=3]
{
msg [label="{string|<a>}", fillcolor=5]
helloworld [label="{string|<a>}", fillcolor=5]
stars [label="{string|<a>}", fillcolor=5]
ten [label="{number|10}", fillcolor=6]
}
}
oldjson -> root [label=" ParseInsitu()" lhead="cluster1"]
edge [arrowhead=vee]
root -> { msg; stars }
edge [arrowhead="none"]
msg -> helloworld
stars -> ten
{
edge [arrowhead=vee, arrowtail=dot, arrowsize=0.5, dir=both, tailclip=false]
msg:a:c -> newjson:a
helloworld:a:c -> newjson:b
stars:a:c -> newjson:c
}
//oldjson -> newjson [style=invis]
}
\ No newline at end of file
digraph {
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
penwidth=0.0
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
node [shape = doublecircle]; Start; Finish;
node [shape = box; style = "rounded, filled"; fillcolor=white ];
Start -> ArrayInitial [label=" ["];
Start -> ObjectInitial [label=" {"];
subgraph clusterArray {
margin="10,10"
style=filled
fillcolor=gray95
label = "Array"
ArrayInitial; Element; ElementDelimiter; ArrayFinish;
}
subgraph clusterObject {
margin="10,10"
style=filled
fillcolor=gray95
label = "Object"
ObjectInitial; MemberKey; KeyValueDelimiter; MemberValue; MemberDelimiter; ObjectFinish;
}
ArrayInitial -> ArrayInitial [label="["];
ArrayInitial -> ArrayFinish [label=" ]"];
ArrayInitial -> ObjectInitial [label="{", constraint=false];
ArrayInitial -> Element [label="string\nfalse\ntrue\nnull\nnumber"];
Element -> ArrayFinish [label="]"];
Element -> ElementDelimiter [label=","];
ElementDelimiter -> ArrayInitial [label=" ["];
ElementDelimiter -> ObjectInitial [label="{"];
ElementDelimiter -> Element [label="string\nfalse\ntrue\nnull\nnumber"];
ObjectInitial -> ObjectFinish [label=" }"];
ObjectInitial -> MemberKey [label=" string "];
MemberKey -> KeyValueDelimiter [label=":"];
KeyValueDelimiter -> ArrayInitial [label="["];
KeyValueDelimiter -> ObjectInitial [label=" {"];
KeyValueDelimiter -> MemberValue [label=" string\n false\n true\n null\n number"];
MemberValue -> ObjectFinish [label="}"];
MemberValue -> MemberDelimiter [label=","];
MemberDelimiter -> MemberKey [label=" string "];
ArrayFinish -> Finish;
ObjectFinish -> Finish;
}
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Before"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
{
rank = same
b1 [label="{b:number|456}", fillcolor=6]
a1 [label="{a:number|123}", fillcolor=6]
}
a1 -> b1 [style="dashed", label="Move", dir=back]
}
subgraph cluster2 {
margin="10,10"
labeljust="left"
label = "After"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
{
rank = same
b2 [label="{b:null|}", fillcolor=1]
a2 [label="{a:number|456}", fillcolor=6]
}
a2 -> b2 [style=invis, dir=back]
}
b1 -> b2 [style=invis]
}
\ No newline at end of file
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Before Copying (Hypothetic)"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
c1 [label="{contacts:array|}", fillcolor=4]
c11 [label="{|}"]
c12 [label="{|}"]
c13 [shape="none", label="...", style="solid"]
o1 [label="{o:object|}", fillcolor=3]
ghost [label="{o:object|}", style=invis]
c1 -> o1 [style="dashed", label="AddMember", constraint=false]
edge [arrowhead=vee]
c1 -> { c11; c12; c13 }
o1 -> ghost [style=invis]
}
subgraph cluster2 {
margin="10,10"
labeljust="left"
label = "After Copying (Hypothetic)"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
c2 [label="{contacts:array|}", fillcolor=4]
c3 [label="{array|}", fillcolor=4]
c21 [label="{|}"]
c22 [label="{|}"]
c23 [shape=none, label="...", style="solid"]
o2 [label="{o:object|}", fillcolor=3]
cs [label="{string|\"contacts\"}", fillcolor=5]
c31 [label="{|}"]
c32 [label="{|}"]
c33 [shape="none", label="...", style="solid"]
edge [arrowhead=vee]
c2 -> { c21; c22; c23 }
o2 -> cs
cs -> c3 [arrowhead=none]
c3 -> { c31; c32; c33 }
}
ghost -> o2 [style=invis]
}
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
forcelabels=true
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Before Moving"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
c1 [label="{contacts:array|}", fillcolor=4]
c11 [label="{|}"]
c12 [label="{|}"]
c13 [shape=none, label="...", style="solid"]
o1 [label="{o:object|}", fillcolor=3]
ghost [label="{o:object|}", style=invis]
c1 -> o1 [style="dashed", constraint=false, label="AddMember"]
edge [arrowhead=vee]
c1 -> { c11; c12; c13 }
o1 -> ghost [style=invis]
}
subgraph cluster2 {
margin="10,10"
labeljust="left"
label = "After Moving"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
c2 [label="{contacts:null|}", fillcolor=1]
c3 [label="{array|}", fillcolor=4]
c21 [label="{|}"]
c22 [label="{|}"]
c23 [shape="none", label="...", style="solid"]
o2 [label="{o:object|}", fillcolor=3]
cs [label="{string|\"contacts\"}", fillcolor=5]
c2 -> o2 [style="dashed", constraint=false, label="AddMember", style=invis]
edge [arrowhead=vee]
c3 -> { c21; c22; c23 }
o2 -> cs
cs -> c3 [arrowhead=none]
}
ghost -> o2 [style=invis]
}
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
{
node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray]
normaljson [label="\{|\"|m|s|g|\"|:|\"|H|e|l|l|o|\\|n|W|o|r|l|d|!|\"|,|\"|\\|u|0|0|7|3|t|a|r|s\"|:|1|0|\}"]
{
rank = same
msgstring [label="m|s|g|\\0"]
helloworldstring [label="H|e|l|l|o|\\n|W|o|r|l|d|!|\\0"]
starsstring [label="s|t|a|r|s\\0"]
}
}
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Document by Normal Parsing"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
root [label="{object|}", fillcolor=3]
{
msg [label="{string|<a>}", fillcolor=5]
helloworld [label="{string|<a>}", fillcolor=5]
stars [label="{string|<a>}", fillcolor=5]
ten [label="{number|10}", fillcolor=6]
}
}
normaljson -> root [label=" Parse()" lhead="cluster1"]
edge [arrowhead=vee]
root -> { msg; stars }
edge [arrowhead="none"]
msg -> helloworld
stars -> ten
edge [arrowhead=vee, arrowtail=dot, arrowsize=0.5, dir=both, tailclip=false]
msg:a:c -> msgstring:w
helloworld:a:c -> helloworldstring:w
stars:a:c -> starsstring:w
msgstring -> helloworldstring -> starsstring [style=invis]
}
\ No newline at end of file
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10, arrowhead=normal]
{
node [shape=record, fontsize="8", margin="0.04", height=0.2, color=gray]
srcjson [label="\{|\"|p|r|o|j|e|c|t|\"|:|\"|r|a|p|i|d|j|s|o|n|\"|,|\"|s|t|a|r|s|\"|:|1|0|\}"]
dstjson [label="\{|\"|p|r|o|j|e|c|t|\"|:|\"|r|a|p|i|d|j|s|o|n|\"|,|\"|s|t|a|r|s|\"|:|1|1|\}"]
}
{
node [shape="box", style="filled", fillcolor="gray95"]
Document2 [label="(Modified) Document"]
Writer
}
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Document"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
root [label="{object|}", fillcolor=3]
{
project [label="{string|\"project\"}", fillcolor=5]
rapidjson [label="{string|\"rapidjson\"}", fillcolor=5]
stars [label="{string|\"stars\"}", fillcolor=5]
ten [label="{number|10}", fillcolor=6]
}
edge [arrowhead=vee]
root -> { project; stars }
edge [arrowhead="none"]
project -> rapidjson
stars -> ten
}
srcjson -> root [label=" Parse()", lhead="cluster1"]
ten -> Document2 [label=" Increase \"stars\"", ltail="cluster1" ]
Document2 -> Writer [label=" Traverse DOM by Accept()"]
Writer -> dstjson [label=" Output to StringBuffer"]
}
\ No newline at end of file
digraph {
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.2
penwidth=0.5
node [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
edge [fontname="Inconsolata, Consolas", fontsize=10]
subgraph cluster1 {
margin="10,10"
labeljust="left"
label = "Document"
style=filled
fillcolor=gray95
node [shape=Mrecord, style=filled, colorscheme=spectral7]
root [label="{object|}", fillcolor=3]
{
hello [label="{string|\"hello\"}", fillcolor=5]
t [label="{string|\"t\"}", fillcolor=5]
f [label="{string|\"f\"}", fillcolor=5]
n [label="{string|\"n\"}", fillcolor=5]
i [label="{string|\"i\"}", fillcolor=5]
pi [label="{string|\"pi\"}", fillcolor=5]
a [label="{string|\"a\"}", fillcolor=5]
world [label="{string|\"world\"}", fillcolor=5]
true [label="{true|}", fillcolor=7]
false [label="{false|}", fillcolor=2]
null [label="{null|}", fillcolor=1]
i1 [label="{number|123}", fillcolor=6]
pi1 [label="{number|3.1416}", fillcolor=6]
array [label="{array|size=4}", fillcolor=4]
a1 [label="{number|1}", fillcolor=6]
a2 [label="{number|2}", fillcolor=6]
a3 [label="{number|3}", fillcolor=6]
a4 [label="{number|4}", fillcolor=6]
}
edge [arrowhead=vee]
root -> { hello; t; f; n; i; pi; a }
array -> { a1; a2; a3; a4 }
edge [arrowhead=none]
hello -> world
t -> true
f -> false
n -> null
i -> i1
pi -> pi1
a -> array
}
}
\ No newline at end of file
digraph {
rankdir=LR
compound=true
fontname="Inconsolata, Consolas"
fontsize=10
margin="0,0"
ranksep=0.3
nodesep=0.15
penwidth=0.5
colorscheme=spectral7
node [shape=box, fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5, style=filled, fillcolor=white]
edge [fontname="Inconsolata, Consolas", fontsize=10, penwidth=0.5]
subgraph cluster0 {
style=filled
fillcolor=4
Encoding [label="<<concept>>\nEncoding"]
edge [arrowtail=onormal, dir=back]
Encoding -> { UTF8; UTF16; UTF32; ASCII; AutoUTF }
UTF16 -> { UTF16LE; UTF16BE }
UTF32 -> { UTF32LE; UTF32BE }
}
subgraph cluster1 {
style=filled
fillcolor=5
Stream [label="<<concept>>\nStream"]
InputByteStream [label="<<concept>>\nInputByteStream"]
OutputByteStream [label="<<concept>>\nOutputByteStream"]
edge [arrowtail=onormal, dir=back]
Stream -> {
StringStream; InsituStringStream; StringBuffer;
EncodedInputStream; EncodedOutputStream;
AutoUTFInputStream; AutoUTFOutputStream
InputByteStream; OutputByteStream
}
InputByteStream -> { MemoryStream; FlieReadStream }
OutputByteStream -> { MemoryBuffer; FileWriteStream }
}
subgraph cluster2 {
style=filled
fillcolor=3
Allocator [label="<<concept>>\nAllocator"]
edge [arrowtail=onormal, dir=back]
Allocator -> { CrtAllocator; MemoryPoolAllocator }
}
{
edge [arrowtail=odiamond, arrowhead=vee, dir=both]
EncodedInputStream -> InputByteStream
EncodedOutputStream -> OutputByteStream
AutoUTFInputStream -> InputByteStream
AutoUTFOutputStream -> OutputByteStream
MemoryPoolAllocator -> Allocator [label="base", tailport=s]
}
{
edge [arrowhead=vee, style=dashed]
AutoUTFInputStream -> AutoUTF
AutoUTFOutputStream -> AutoUTF
}
//UTF32LE -> Stream [style=invis]
}
\ No newline at end of file
# DOM
Document Object Model(DOM) is an in-memory representation of JSON for query and manipulation. The basic usage of DOM is described in [Tutorial](doc/tutorial.md). This section will describe some details and more advanced usages.
[TOC]
# Template {#Template}
In the tutorial, `Value` and `Document` was used. Similarly to `std::string`, these are actually `typedef` of template classes:
~~~~~~~~~~cpp
namespace rapidjson {
template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
class GenericValue {
// ...
};
template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
class GenericDocument : public GenericValue<Encoding, Allocator> {
// ...
};
typedef GenericValue<UTF8<> > Value;
typedef GenericDocument<UTF8<> > Document;
} // namespace rapidjson
~~~~~~~~~~
User can customize these template parameters.
## Encoding {#Encoding}
The `Encoding` parameter specifies the encoding of JSON String value in memory. Possible options are `UTF8`, `UTF16`, `UTF32`. Note that, these 3 types are also template class. `UTF8<>` is `UTF8<char>`, which means using char to store the characters. You may refer to [Encoding](doc/encoding.md) for details.
Suppose a Windows application would query localization strings stored in JSON files. Unicode-enabled functions in Windows use UTF-16 (wide character) encoding. No matter what encoding was used in JSON files, we can store the strings in UTF-16 in memory.
~~~~~~~~~~cpp
using namespace rapidjson;
typedef GenericDocument<UTF16<> > WDocument;
typedef GenericValue<UTF16<> > WValue;
FILE* fp = fopen("localization.json", "rb"); // non-Windows use "r"
char readBuffer[256];
FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
AutoUTFInputStream<unsigned, FileReadStream> eis(bis); // wraps bis into eis
WDocument d;
d.ParseStream<0, AutoUTF<unsigned> >(eis);
const WValue locale(L"ja"); // Japanese
MessageBoxW(hWnd, d[locale].GetString(), L"Test", MB_OK);
~~~~~~~~~~
## Allocator {#Allocator}
The `Allocator` defines which allocator class is used when allocating/deallocating memory for `Document`/`Value`. `Document` owns, or references to an `Allocator` instance. On the other hand, `Value` does not do so, in order to reduce memory consumption.
The default allocator used in `GenericDocument` is `MemoryPoolAllocator`. This allocator actually allocate memory sequentially, and cannot deallocate one by one. This is very suitable when parsing a JSON into a DOM tree.
Another allocator is `CrtAllocator`, of which CRT is short for C RunTime library. This allocator simply calls the standard `malloc()`/`realloc()`/`free()`. When there is a lot of add and remove operations, this allocator may be preferred. But this allocator is far less efficient than `MemoryPoolAllocator`.
# Parsing {#Parsing}
`Document` provides several functions for parsing. In below, (1) is the fundamental function, while the others are helpers which call (1).
~~~~~~~~~~cpp
using namespace rapidjson;
// (1) Fundamental
template <unsigned parseFlags, typename SourceEncoding, typename InputStream>
GenericDocument& GenericDocument::ParseStream(InputStream& is);
// (2) Using the same Encoding for stream
template <unsigned parseFlags, typename InputStream>
GenericDocument& GenericDocument::ParseStream(InputStream& is);
// (3) Using default parse flags
template <typename InputStream>
GenericDocument& GenericDocument::ParseStream(InputStream& is);
// (4) In situ parsing
template <unsigned parseFlags>
GenericDocument& GenericDocument::ParseInsitu(Ch* str);
// (5) In situ parsing, using default parse flags
GenericDocument& GenericDocument::ParseInsitu(Ch* str);
// (6) Normal parsing of a string
template <unsigned parseFlags, typename SourceEncoding>
GenericDocument& GenericDocument::Parse(const Ch* str);
// (7) Normal parsing of a string, using same Encoding of Document
template <unsigned parseFlags>
GenericDocument& GenericDocument::Parse(const Ch* str);
// (8) Normal parsing of a string, using default parse flags
GenericDocument& GenericDocument::Parse(const Ch* str);
~~~~~~~~~~
The examples of [tutorial](doc/tutorial.md) uses (8) for normal parsing of string. The examples of [stream](doc/stream.md) uses the first three. *In situ* parsing will be described soon.
The `parseFlags` are combination of the following bit-flags:
Parse flags | Meaning
------------------------------|-----------------------------------
`kParseNoFlags` | No flag is set.
`kParseDefaultFlags` | Default parse flags. It is equal to macro `RAPIDJSON_PARSE_DEFAULT_FLAGS`, which is defined as `kParseNoFlags`.
`kParseInsituFlag` | In-situ(destructive) parsing.
`kParseValidateEncodingFlag` | Validate encoding of JSON strings.
`kParseIterativeFlag` | Iterative(constant complexity in terms of function call stack size) parsing.
`kParseStopWhenDoneFlag` | After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate `kParseErrorDocumentRootNotSingular` error. Using this flag for parsing multiple JSONs in the same stream.
`kParseFullPrecisionFlag` | Parse number in full precision (slower). If this flag is not set, the normal precision (faster) is used. Normal precision has maximum 3 [ULP](http://en.wikipedia.org/wiki/Unit_in_the_last_place) error.
`kParseCommentsFlag` | Allow one-line `// ...` and multi-line `/* ... */` comments (relaxed JSON syntax).
`kParseNumbersAsStringsFlag` | Parse numerical type values as strings.
`kParseTrailingCommasFlag` | Allow trailing commas at the end of objects and arrays (relaxed JSON syntax).
`kParseNanAndInfFlag` | Allow parsing `NaN`, `Inf`, `Infinity`, `-Inf` and `-Infinity` as `double` values (relaxed JSON syntax).
`kParseEscapedApostropheFlag` | Allow escaped apostrophe `\'` in strings (relaxed JSON syntax).
By using a non-type template parameter, instead of a function parameter, C++ compiler can generate code which is optimized for specified combinations, improving speed, and reducing code size (if only using a single specialization). The downside is the flags needed to be determined in compile-time.
The `SourceEncoding` parameter defines what encoding is in the stream. This can be differed to the `Encoding` of the `Document`. See [Transcoding and Validation](#TranscodingAndValidation) section for details.
And the `InputStream` is type of input stream.
## Parse Error {#ParseError}
When the parse processing succeeded, the `Document` contains the parse results. When there is an error, the original DOM is *unchanged*. And the error state of parsing can be obtained by `bool HasParseError()`, `ParseErrorCode GetParseError()` and `size_t GetErrorOffset()`.
Parse Error Code | Description
--------------------------------------------|---------------------------------------------------
`kParseErrorNone` | No error.
`kParseErrorDocumentEmpty` | The document is empty.
`kParseErrorDocumentRootNotSingular` | The document root must not follow by other values.
`kParseErrorValueInvalid` | Invalid value.
`kParseErrorObjectMissName` | Missing a name for object member.
`kParseErrorObjectMissColon` | Missing a colon after a name of object member.
`kParseErrorObjectMissCommaOrCurlyBracket` | Missing a comma or `}` after an object member.
`kParseErrorArrayMissCommaOrSquareBracket` | Missing a comma or `]` after an array element.
`kParseErrorStringUnicodeEscapeInvalidHex` | Incorrect hex digit after `\\u` escape in string.
`kParseErrorStringUnicodeSurrogateInvalid` | The surrogate pair in string is invalid.
`kParseErrorStringEscapeInvalid` | Invalid escape character in string.
`kParseErrorStringMissQuotationMark` | Missing a closing quotation mark in string.
`kParseErrorStringInvalidEncoding` | Invalid encoding in string.
`kParseErrorNumberTooBig` | Number too big to be stored in `double`.
`kParseErrorNumberMissFraction` | Miss fraction part in number.
`kParseErrorNumberMissExponent` | Miss exponent in number.
The offset of error is defined as the character number from beginning of stream. Currently RapidJSON does not keep track of line number.
To get an error message, RapidJSON provided a English messages in `rapidjson/error/en.h`. User can customize it for other locales, or use a custom localization system.
Here shows an example of parse error handling.
~~~~~~~~~~cpp
#include "rapidjson/document.h"
#include "rapidjson/error/en.h"
// ...
Document d;
if (d.Parse(json).HasParseError()) {
fprintf(stderr, "\nError(offset %u): %s\n",
(unsigned)d.GetErrorOffset(),
GetParseError_En(d.GetParseError()));
// ...
}
~~~~~~~~~~
## In Situ Parsing {#InSituParsing}
From [Wikipedia](http://en.wikipedia.org/wiki/In_situ):
> *In situ* ... is a Latin phrase that translates literally to "on site" or "in position". It means "locally", "on site", "on the premises" or "in place" to describe an event where it takes place, and is used in many different contexts.
> ...
> (In computer science) An algorithm is said to be an in situ algorithm, or in-place algorithm, if the extra amount of memory required to execute the algorithm is O(1), that is, does not exceed a constant no matter how large the input. For example, heapsort is an in situ sorting algorithm.
In normal parsing process, a large overhead is to decode JSON strings and copy them to other buffers. *In situ* parsing decodes those JSON string at the place where it is stored. It is possible in JSON because the length of decoded string is always shorter than or equal to the one in JSON. In this context, decoding a JSON string means to process the escapes, such as `"\n"`, `"\u1234"`, etc., and add a null terminator (`'\0'`)at the end of string.
The following diagrams compare normal and *in situ* parsing. The JSON string values contain pointers to the decoded string.
![normal parsing](diagram/normalparsing.png)
In normal parsing, the decoded string are copied to freshly allocated buffers. `"\\n"` (2 characters) is decoded as `"\n"` (1 character). `"\\u0073"` (6 characters) is decoded as `"s"` (1 character).
![instiu parsing](diagram/insituparsing.png)
*In situ* parsing just modified the original JSON. Updated characters are highlighted in the diagram. If the JSON string does not contain escape character, such as `"msg"`, the parsing process merely replace the closing double quotation mark with a null character.
Since *in situ* parsing modify the input, the parsing API needs `char*` instead of `const char*`.
~~~~~~~~~~cpp
// Read whole file into a buffer
FILE* fp = fopen("test.json", "r");
fseek(fp, 0, SEEK_END);
size_t filesize = (size_t)ftell(fp);
fseek(fp, 0, SEEK_SET);
char* buffer = (char*)malloc(filesize + 1);
size_t readLength = fread(buffer, 1, filesize, fp);
buffer[readLength] = '\0';
fclose(fp);
// In situ parsing the buffer into d, buffer will also be modified
Document d;
d.ParseInsitu(buffer);
// Query/manipulate the DOM here...
free(buffer);
// Note: At this point, d may have dangling pointers pointed to the deallocated buffer.
~~~~~~~~~~
The JSON strings are marked as const-string. But they may not be really "constant". The life cycle of it depends on the JSON buffer.
In situ parsing minimizes allocation overheads and memory copying. Generally this improves cache coherence, which is an important factor of performance in modern computer.
There are some limitations of *in situ* parsing:
1. The whole JSON is in memory.
2. The source encoding in stream and target encoding in document must be the same.
3. The buffer need to be retained until the document is no longer used.
4. If the DOM need to be used for long period after parsing, and there are few JSON strings in the DOM, retaining the buffer may be a memory waste.
*In situ* parsing is mostly suitable for short-term JSON that only need to be processed once, and then be released from memory. In practice, these situation is very common, for example, deserializing JSON to C++ objects, processing web requests represented in JSON, etc.
## Transcoding and Validation {#TranscodingAndValidation}
RapidJSON supports conversion between Unicode formats (officially termed UCS Transformation Format) internally. During DOM parsing, the source encoding of the stream can be different from the encoding of the DOM. For example, the source stream contains a UTF-8 JSON, while the DOM is using UTF-16 encoding. There is an example code in [EncodedInputStream](doc/stream.md).
When writing a JSON from DOM to output stream, transcoding can also be used. An example is in [EncodedOutputStream](doc/stream.md).
During transcoding, the source string is decoded to into Unicode code points, and then the code points are encoded in the target format. During decoding, it will validate the byte sequence in the source string. If it is not a valid sequence, the parser will be stopped with `kParseErrorStringInvalidEncoding` error.
When the source encoding of stream is the same as encoding of DOM, by default, the parser will *not* validate the sequence. User may use `kParseValidateEncodingFlag` to force validation.
# Techniques {#Techniques}
Some techniques about using DOM API is discussed here.
## DOM as SAX Event Publisher
In RapidJSON, stringifying a DOM with `Writer` may be look a little bit weird.
~~~~~~~~~~cpp
// ...
Writer<StringBuffer> writer(buffer);
d.Accept(writer);
~~~~~~~~~~
Actually, `Value::Accept()` is responsible for publishing SAX events about the value to the handler. With this design, `Value` and `Writer` are decoupled. `Value` can generate SAX events, and `Writer` can handle those events.
User may create custom handlers for transforming the DOM into other formats. For example, a handler which converts the DOM into XML.
For more about SAX events and handler, please refer to [SAX](doc/sax.md).
## User Buffer {#UserBuffer}
Some applications may try to avoid memory allocations whenever possible.
`MemoryPoolAllocator` can support this by letting user to provide a buffer. The buffer can be on the program stack, or a "scratch buffer" which is statically allocated (a static/global array) for storing temporary data.
`MemoryPoolAllocator` will use the user buffer to satisfy allocations. When the user buffer is used up, it will allocate a chunk of memory from the base allocator (by default the `CrtAllocator`).
Here is an example of using stack memory. The first allocator is for storing values, while the second allocator is for storing temporary data during parsing.
~~~~~~~~~~cpp
typedef GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<>> DocumentType;
char valueBuffer[4096];
char parseBuffer[1024];
MemoryPoolAllocator<> valueAllocator(valueBuffer, sizeof(valueBuffer));
MemoryPoolAllocator<> parseAllocator(parseBuffer, sizeof(parseBuffer));
DocumentType d(&valueAllocator, sizeof(parseBuffer), &parseAllocator);
d.Parse(json);
~~~~~~~~~~
If the total size of allocation is less than 4096+1024 bytes during parsing, this code does not invoke any heap allocation (via `new` or `malloc()`) at all.
User can query the current memory consumption in bytes via `MemoryPoolAllocator::Size()`. And then user can determine a suitable size of user buffer.
# DOM
文档对象模型(Document Object Model, DOM)是一种罝于内存中的 JSON 表示方式,以供查询及操作。我们已于 [教程](doc/tutorial.zh-cn.md) 中介绍了 DOM 的基本用法,本节将讲述一些细节及高级用法。
[TOC]
# 模板 {#Template}
教程中使用了 `Value``Document` 类型。与 `std::string` 相似,这些类型其实是两个模板类的 `typedef`
~~~~~~~~~~cpp
namespace rapidjson {
template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
class GenericValue {
// ...
};
template <typename Encoding, typename Allocator = MemoryPoolAllocator<> >
class GenericDocument : public GenericValue<Encoding, Allocator> {
// ...
};
typedef GenericValue<UTF8<> > Value;
typedef GenericDocument<UTF8<> > Document;
} // namespace rapidjson
~~~~~~~~~~
使用者可以自定义这些模板参数。
## 编码 {#Encoding}
`Encoding` 参数指明在内存中的 JSON String 使用哪种编码。可行的选项有 `UTF8``UTF16``UTF32`。要注意这 3 个类型其实也是模板类。`UTF8<>` 等同 `UTF8<char>`,这代表它使用 `char` 来存储字符串。更多细节可以参考 [编码](doc/encoding.zh-cn.md)
这里是一个例子。假设一个 Windows 应用软件希望查询存储于 JSON 中的本地化字符串。Windows 中含 Unicode 的函数使用 UTF-16(宽字符)编码。无论 JSON 文件使用哪种编码,我们都可以把字符串以 UTF-16 形式存储在内存。
~~~~~~~~~~cpp
using namespace rapidjson;
typedef GenericDocument<UTF16<> > WDocument;
typedef GenericValue<UTF16<> > WValue;
FILE* fp = fopen("localization.json", "rb"); // 非 Windows 平台使用 "r"
char readBuffer[256];
FileReadStream bis(fp, readBuffer, sizeof(readBuffer));
AutoUTFInputStream<unsigned, FileReadStream> eis(bis); // 包装 bis 成 eis
WDocument d;
d.ParseStream<0, AutoUTF<unsigned> >(eis);
const WValue locale(L"ja"); // Japanese
MessageBoxW(hWnd, d[locale].GetString(), L"Test", MB_OK);
~~~~~~~~~~
## 分配器 {#Allocator}
`Allocator` 定义当 `Document`/`Value` 分配或释放内存时使用那个分配类。`Document` 拥有或引用到一个 `Allocator` 实例。而为了节省内存,`Value` 没有这么做。
`GenericDocument` 的缺省分配器是 `MemoryPoolAllocator`。此分配器实际上会顺序地分配内存,并且不能逐一释放。当要解析一个 JSON 并生成 DOM,这种分配器是非常合适的。
RapidJSON 还提供另一个分配器 `CrtAllocator`,当中 CRT 是 C 运行库(C RunTime library)的缩写。此分配器简单地读用标准的 `malloc()`/`realloc()`/`free()`。当我们需要许多增减操作,这种分配器会更为适合。然而这种分配器远远比 `MemoryPoolAllocator` 低效。
# 解析 {#Parsing}
`Document` 提供几个解析函数。以下的 (1) 是根本的函数,其他都是调用 (1) 的协助函数。
~~~~~~~~~~cpp
using namespace rapidjson;
// (1) 根本
template <unsigned parseFlags, typename SourceEncoding, typename InputStream>
GenericDocument& GenericDocument::ParseStream(InputStream& is);
// (2) 使用流的编码
template <unsigned parseFlags, typename InputStream>
GenericDocument& GenericDocument::ParseStream(InputStream& is);
// (3) 使用缺省标志
template <typename InputStream>
GenericDocument& GenericDocument::ParseStream(InputStream& is);
// (4) 原位解析
template <unsigned parseFlags>
GenericDocument& GenericDocument::ParseInsitu(Ch* str);
// (5) 原位解析,使用缺省标志
GenericDocument& GenericDocument::ParseInsitu(Ch* str);
// (6) 正常解析一个字符串
template <unsigned parseFlags, typename SourceEncoding>
GenericDocument& GenericDocument::Parse(const Ch* str);
// (7) 正常解析一个字符串,使用 Document 的编码
template <unsigned parseFlags>
GenericDocument& GenericDocument::Parse(const Ch* str);
// (8) 正常解析一个字符串,使用缺省标志
GenericDocument& GenericDocument::Parse(const Ch* str);
~~~~~~~~~~
[教程](doc/tutorial.zh-cn.md) 中的例使用 (8) 去正常解析字符串。而 [](doc/stream.zh-cn.md) 的例子使用前 3 个函数。我们将稍后介绍原位(*In situ*) 解析。
`parseFlags` 是以下位标置的组合:
解析位标志 | 意义
------------------------------|-----------------------------------
`kParseNoFlags` | 没有任何标志。
`kParseDefaultFlags` | 缺省的解析选项。它等于 `RAPIDJSON_PARSE_DEFAULT_FLAGS` 宏,此宏定义为 `kParseNoFlags`
`kParseInsituFlag` | 原位(破坏性)解析。
`kParseValidateEncodingFlag` | 校验 JSON 字符串的编码。
`kParseIterativeFlag` | 迭代式(调用堆栈大小为常数复杂度)解析。
`kParseStopWhenDoneFlag` | 当从流解析了一个完整的 JSON 根节点之后,停止继续处理余下的流。当使用了此标志,解析器便不会产生 `kParseErrorDocumentRootNotSingular` 错误。可使用本标志去解析同一个流里的多个 JSON。
`kParseFullPrecisionFlag` | 使用完整的精确度去解析数字(较慢)。如不设置此标节,则会使用正常的精确度(较快)。正常精确度会有最多 3 个 [ULP](http://en.wikipedia.org/wiki/Unit_in_the_last_place) 的误差。
`kParseCommentsFlag` | 容许单行 `// ...` 及多行 `/* ... */` 注释(放宽的 JSON 语法)。
`kParseNumbersAsStringsFlag` | 把数字类型解析成字符串。
`kParseTrailingCommasFlag` | 容许在对象和数组结束前含有逗号(放宽的 JSON 语法)。
`kParseNanAndInfFlag` | 容许 `NaN``Inf``Infinity``-Inf``-Infinity` 作为 `double` 值(放宽的 JSON 语法)。
`kParseEscapedApostropheFlag` | 容许字符串中转义单引号 `\'` (放宽的 JSON 语法)。
由于使用了非类型模板参数,而不是函数参数,C++ 编译器能为个别组合生成代码,以改善性能及减少代码尺寸(当只用单种特化)。缺点是需要在编译期决定标志。
`SourceEncoding` 参数定义流使用了什么编码。这与 `Document``Encoding` 不相同。细节可参考 [转码和校验](#TranscodingAndValidation) 一节。
此外 `InputStream` 是输入流的类型。
## 解析错误 {#ParseError}
当解析过程顺利完成,`Document` 便会含有解析结果。当过程出现错误,原来的 DOM 会*维持不变*。可使用 `bool HasParseError()``ParseErrorCode GetParseError()``size_t GetErrorOffset()` 获取解析的错误状态。
解析错误代号 | 描述
--------------------------------------------|---------------------------------------------------
`kParseErrorNone` | 无错误。
`kParseErrorDocumentEmpty` | 文档是空的。
`kParseErrorDocumentRootNotSingular` | 文档的根后面不能有其它值。
`kParseErrorValueInvalid` | 不合法的值。
`kParseErrorObjectMissName` | Object 成员缺少名字。
`kParseErrorObjectMissColon` | Object 成员名字后缺少冒号。
`kParseErrorObjectMissCommaOrCurlyBracket` | Object 成员后缺少逗号或 `}`
`kParseErrorArrayMissCommaOrSquareBracket` | Array 元素后缺少逗号或 `]`
`kParseErrorStringUnicodeEscapeInvalidHex` | String 中的 `\\u` 转义符后含非十六进位数字。
`kParseErrorStringUnicodeSurrogateInvalid` | String 中的代理对(surrogate pair)不合法。
`kParseErrorStringEscapeInvalid` | String 含非法转义字符。
`kParseErrorStringMissQuotationMark` | String 缺少关闭引号。
`kParseErrorStringInvalidEncoding` | String 含非法编码。
`kParseErrorNumberTooBig` | Number 的值太大,不能存储于 `double`
`kParseErrorNumberMissFraction` | Number 缺少了小数部分。
`kParseErrorNumberMissExponent` | Number 缺少了指数。
错误的偏移量定义为从流开始至错误处的字符数量。目前 RapidJSON 不记录错误行号。
要取得错误讯息,RapidJSON 在 `rapidjson/error/en.h` 中提供了英文错误讯息。使用者可以修改它用于其他语言环境,或使用一个自定义的本地化系统。
以下是一个处理错误的例子。
~~~~~~~~~~cpp
#include "rapidjson/document.h"
#include "rapidjson/error/en.h"
// ...
Document d;
if (d.Parse(json).HasParseError()) {
fprintf(stderr, "\nError(offset %u): %s\n",
(unsigned)d.GetErrorOffset(),
GetParseError_En(d.GetParseErrorCode()));
// ...
}
~~~~~~~~~~
## 原位解析 {#InSituParsing}
根据 [维基百科](http://en.wikipedia.org/wiki/In_situ):
> *In situ* ... is a Latin phrase that translates literally to "on site" or "in position". It means "locally", "on site", "on the premises" or "in place" to describe an event where it takes place, and is used in many different contexts.
> ...
> (In computer science) An algorithm is said to be an in situ algorithm, or in-place algorithm, if the extra amount of memory required to execute the algorithm is O(1), that is, does not exceed a constant no matter how large the input. For example, heapsort is an in situ sorting algorithm.
> 翻译:*In situ*……是一个拉丁文片语,字面上的意思是指「现场」、「在位置」。在许多不同语境中,它描述一个事件发生的位置,意指「本地」、「现场」、「在处所」、「就位」。
> ……
> (在计算机科学中)一个算法若称为原位算法,或在位算法,是指执行该算法所需的额外内存空间是 O(1) 的,换句话说,无论输入大小都只需要常数空间。例如,堆排序是一个原位排序算法。
在正常的解析过程中,对 JSON string 解码并复制至其他缓冲区是一个很大的开销。原位解析(*in situ* parsing)把这些 JSON string 直接解码于它原来存储的地方。由于解码后的 string 长度总是短于或等于原来储存于 JSON 的 string,所以这是可行的。在这个语境下,对 JSON string 进行解码是指处理转义符,如 `"\n"``"\u1234"` 等,以及在 string 末端加入空终止符号 (`'\0'`)。
以下的图比较正常及原位解析。JSON string 值包含指向解码后的字符串。
![正常解析](diagram/normalparsing.png)
在正常解析中,解码后的字符串被复制至全新分配的缓冲区中。`"\\n"`(2 个字符)被解码成 `"\n"`(1 个字符)。`"\\u0073"`(6 个字符)被解码成 `"s"`(1 个字符)。
![原位解析](diagram/insituparsing.png)
原位解析直接修改了原来的 JSON。图中高亮了被更新的字符。若 JSON string 不含转义符,例如 `"msg"`,那么解析过程仅仅是以空字符代替结束双引号。
由于原位解析修改了输入,其解析 API 需要 `char*` 而非 `const char*`
~~~~~~~~~~cpp
// 把整个文件读入 buffer
FILE* fp = fopen("test.json", "r");
fseek(fp, 0, SEEK_END);
size_t filesize = (size_t)ftell(fp);
fseek(fp, 0, SEEK_SET);
char* buffer = (char*)malloc(filesize + 1);
size_t readLength = fread(buffer, 1, filesize, fp);
buffer[readLength] = '\0';
fclose(fp);
// 原位解析 buffer 至 d,buffer 内容会被修改。
Document d;
d.ParseInsitu(buffer);
// 在此查询、修改 DOM……
free(buffer);
// 注意:在这个位置,d 可能含有指向已被释放的 buffer 的悬空指针
~~~~~~~~~~
JSON string 会被打上 const-string 的标志。但它们可能并非真正的「常数」。它的生命周期取决于存储 JSON 的缓冲区。
原位解析把分配开销及内存复制减至最小。通常这样做能改善缓存一致性,而这对现代计算机来说是一个重要的性能因素。
原位解析有以下限制:
1. 整个 JSON 须存储在内存之中。
2. 流的来源缓码与文档的目标编码必须相同。
3. 需要保留缓冲区,直至文档不再被使用。
4. 若 DOM 需要在解析后被长期使用,而 DOM 内只有很少 JSON string,保留缓冲区可能造成内存浪费。
原位解析最适合用于短期的、用完即弃的 JSON。实际应用中,这些场合是非常普遍的,例如反序列化 JSON 至 C++ 对象、处理以 JSON 表示的 web 请求等。
## 转码与校验 {#TranscodingAndValidation}
RapidJSON 内部支持不同 Unicode 格式(正式的术语是 UCS 变换格式)间的转换。在 DOM 解析时,流的来源编码与 DOM 的编码可以不同。例如,来源流可能含有 UTF-8 的 JSON,而 DOM 则使用 UTF-16 编码。在 [EncodedInputStream](doc/stream.zh-cn.md) 一节里有一个例子。
当从 DOM 输出一个 JSON 至输出流之时,也可以使用转码功能。在 [EncodedOutputStream](doc/stream.zh-cn.md) 一节里有一个例子。
在转码过程中,会把来源 string 解码成 Unicode 码点,然后把码点编码成目标格式。在解码时,它会校验来源 string 的字节序列是否合法。若遇上非合法序列,解析器会停止并返回 `kParseErrorStringInvalidEncoding` 错误。
当来源编码与 DOM 的编码相同,解析器缺省地 * 不会 * 校验序列。使用者可开启 `kParseValidateEncodingFlag` 去强制校验。
# 技巧 {#Techniques}
这里讨论一些 DOM API 的使用技巧。
## 把 DOM 作为 SAX 事件发表者
在 RapidJSON 中,利用 `Writer` 把 DOM 生成 JSON 的做法,看来有点奇怪。
~~~~~~~~~~cpp
// ...
Writer<StringBuffer> writer(buffer);
d.Accept(writer);
~~~~~~~~~~
实际上,`Value::Accept()` 是负责发布该值相关的 SAX 事件至处理器的。通过这个设计,`Value``Writer` 解除了偶合。`Value` 可生成 SAX 事件,而 `Writer` 则可以处理这些事件。
使用者可以创建自定义的处理器,去把 DOM 转换成其它格式。例如,一个把 DOM 转换成 XML 的处理器。
要知道更多关于 SAX 事件与处理器,可参阅 [SAX](doc/sax.zh-cn.md)
## 使用者缓冲区 {#UserBuffer}
许多应用软件可能需要尽量减少内存分配。
`MemoryPoolAllocator` 可以帮助这方面,它容许使用者提供一个缓冲区。该缓冲区可能置于程序堆栈,或是一个静态分配的「草稿缓冲区(scratch buffer)」(一个静态/全局的数组),用于储存临时数据。
`MemoryPoolAllocator` 会先用使用者缓冲区去解决分配请求。当使用者缓冲区用完,就会从基础分配器(缺省为 `CrtAllocator`)分配一块内存。
以下是使用堆栈内存的例子,第一个分配器用于存储值,第二个用于解析时的临时缓冲。
~~~~~~~~~~cpp
typedef GenericDocument<UTF8<>, MemoryPoolAllocator<>, MemoryPoolAllocator<>> DocumentType;
char valueBuffer[4096];
char parseBuffer[1024];
MemoryPoolAllocator<> valueAllocator(valueBuffer, sizeof(valueBuffer));
MemoryPoolAllocator<> parseAllocator(parseBuffer, sizeof(parseBuffer));
DocumentType d(&valueAllocator, sizeof(parseBuffer), &parseAllocator);
d.Parse(json);
~~~~~~~~~~
若解析时分配总量少于 4096+1024 字节时,这段代码不会造成任何堆内存分配(经 `new``malloc()`)。
使用者可以通过 `MemoryPoolAllocator::Size()` 查询当前已分的内存大小。那么使用者可以拟定使用者缓冲区的合适大小。
# Encoding
According to [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf),
> (in Introduction) JSON text is a sequence of Unicode code points.
The earlier [RFC4627](http://www.ietf.org/rfc/rfc4627.txt) stated that,
> (in §3) JSON text SHALL be encoded in Unicode. The default encoding is UTF-8.
> (in §6) JSON may be represented using UTF-8, UTF-16, or UTF-32. When JSON is written in UTF-8, JSON is 8bit compatible. When JSON is written in UTF-16 or UTF-32, the binary content-transfer-encoding must be used.
RapidJSON supports various encodings. It can also validate the encodings of JSON, and transcoding JSON among encodings. All these features are implemented internally, without the need for external libraries (e.g. [ICU](http://site.icu-project.org/)).
[TOC]
# Unicode {#Unicode}
From [Unicode's official website](http://www.unicode.org/standard/WhatIsUnicode.html):
> Unicode provides a unique number for every character,
> no matter what the platform,
> no matter what the program,
> no matter what the language.
Those unique numbers are called code points, which is in the range `0x0` to `0x10FFFF`.
## Unicode Transformation Format {#UTF}
There are various encodings for storing Unicode code points. These are called Unicode Transformation Format (UTF). RapidJSON supports the most commonly used UTFs, including
* UTF-8: 8-bit variable-width encoding. It maps a code point to 1–4 bytes.
* UTF-16: 16-bit variable-width encoding. It maps a code point to 1–2 16-bit code units (i.e., 2–4 bytes).
* UTF-32: 32-bit fixed-width encoding. It directly maps a code point to a single 32-bit code unit (i.e. 4 bytes).
For UTF-16 and UTF-32, the byte order (endianness) does matter. Within computer memory, they are often stored in the computer's endianness. However, when it is stored in file or transferred over network, we need to state the byte order of the byte sequence, either little-endian (LE) or big-endian (BE).
RapidJSON provide these encodings via the structs in `rapidjson/encodings.h`:
~~~~~~~~~~cpp
namespace rapidjson {
template<typename CharType = char>
struct UTF8;
template<typename CharType = wchar_t>
struct UTF16;
template<typename CharType = wchar_t>
struct UTF16LE;
template<typename CharType = wchar_t>
struct UTF16BE;
template<typename CharType = unsigned>
struct UTF32;
template<typename CharType = unsigned>
struct UTF32LE;
template<typename CharType = unsigned>
struct UTF32BE;
} // namespace rapidjson
~~~~~~~~~~
For processing text in memory, we normally use `UTF8`, `UTF16` or `UTF32`. For processing text via I/O, we may use `UTF8`, `UTF16LE`, `UTF16BE`, `UTF32LE` or `UTF32BE`.
When using the DOM-style API, the `Encoding` template parameter in `GenericValue<Encoding>` and `GenericDocument<Encoding>` indicates the encoding to be used to represent JSON string in memory. So normally we will use `UTF8`, `UTF16` or `UTF32` for this template parameter. The choice depends on operating systems and other libraries that the application is using. For example, Windows API represents Unicode characters in UTF-16, while most Linux distributions and applications prefer UTF-8.
Example of UTF-16 DOM declaration:
~~~~~~~~~~cpp
typedef GenericDocument<UTF16<> > WDocument;
typedef GenericValue<UTF16<> > WValue;
~~~~~~~~~~
For a detail example, please check the example in [DOM's Encoding](doc/stream.md) section.
## Character Type {#CharacterType}
As shown in the declaration, each encoding has a `CharType` template parameter. Actually, it may be a little bit confusing, but each `CharType` stores a code unit, not a character (code point). As mentioned in previous section, a code point may be encoded to 1–4 code units for UTF-8.
For `UTF16(LE|BE)`, `UTF32(LE|BE)`, the `CharType` must be integer type of at least 2 and 4 bytes respectively.
Note that C++11 introduces `char16_t` and `char32_t`, which can be used for `UTF16` and `UTF32` respectively.
## AutoUTF {#AutoUTF}
Previous encodings are statically bound in compile-time. In other words, user must know exactly which encodings will be used in the memory or streams. However, sometimes we may need to read/write files of different encodings. The encoding needed to be decided in runtime.
`AutoUTF` is an encoding designed for this purpose. It chooses which encoding to be used according to the input or output stream. Currently, it should be used with `EncodedInputStream` and `EncodedOutputStream`.
## ASCII {#ASCII}
Although the JSON standards did not mention about [ASCII](http://en.wikipedia.org/wiki/ASCII), sometimes we would like to write 7-bit ASCII JSON for applications that cannot handle UTF-8. Since any JSON can represent unicode characters in escaped sequence `\uXXXX`, JSON can always be encoded in ASCII.
Here is an example for writing a UTF-8 DOM into ASCII:
~~~~~~~~~~cpp
using namespace rapidjson;
Document d; // UTF8<>
// ...
StringBuffer buffer;
Writer<StringBuffer, Document::EncodingType, ASCII<> > writer(buffer);
d.Accept(writer);
std::cout << buffer.GetString();
~~~~~~~~~~
ASCII can be used in input stream. If the input stream contains bytes with values above 127, it will cause `kParseErrorStringInvalidEncoding` error.
ASCII *cannot* be used in memory (encoding of `Document` or target encoding of `Reader`), as it cannot represent Unicode code points.
# Validation & Transcoding {#ValidationTranscoding}
When RapidJSON parses a JSON, it can validate the input JSON, whether it is a valid sequence of a specified encoding. This option can be turned on by adding `kParseValidateEncodingFlag` in `parseFlags` template parameter.
If the input encoding and output encoding is different, `Reader` and `Writer` will automatically transcode (convert) the text. In this case, `kParseValidateEncodingFlag` is not necessary, as it must decode the input sequence. And if the sequence was unable to be decoded, it must be invalid.
## Transcoder {#Transcoder}
Although the encoding functions in RapidJSON are designed for JSON parsing/generation, user may abuse them for transcoding of non-JSON strings.
Here is an example for transcoding a string from UTF-8 to UTF-16:
~~~~~~~~~~cpp
#include "rapidjson/encodings.h"
using namespace rapidjson;
const char* s = "..."; // UTF-8 string
StringStream source(s);
GenericStringBuffer<UTF16<> > target;
bool hasError = false;
while (source.Peek() != '\0')
if (!Transcoder<UTF8<>, UTF16<> >::Transcode(source, target)) {
hasError = true;
break;
}
if (!hasError) {
const wchar_t* t = target.GetString();
// ...
}
~~~~~~~~~~
You may also use `AutoUTF` and the associated streams for setting source/target encoding in runtime.
# 编码
根据 [ECMA-404](http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf)
> (in Introduction) JSON text is a sequence of Unicode code points.
>
> 翻译:JSON 文本是 Unicode 码点的序列。
较早的 [RFC4627](http://www.ietf.org/rfc/rfc4627.txt) 申明:
> (in §3) JSON text SHALL be encoded in Unicode. The default encoding is UTF-8.
>
> 翻译:JSON 文本应该以 Unicode 编码。缺省的编码为 UTF-8。
> (in §6) JSON may be represented using UTF-8, UTF-16, or UTF-32. When JSON is written in UTF-8, JSON is 8bit compatible. When JSON is written in UTF-16 or UTF-32, the binary content-transfer-encoding must be used.
>
> 翻译:JSON 可使用 UTF-8、UTF-16 或 UTF-32 表示。当 JSON 以 UTF-8 写入,该 JSON 是 8 位兼容的。当 JSON 以 UTF-16 或 UTF-32 写入,就必须使用二进制的内容传送编码。
RapidJSON 支持多种编码。它也能检查 JSON 的编码,以及在不同编码中进行转码。所有这些功能都是在内部实现,无需使用外部的程序库(如 [ICU](http://site.icu-project.org/))。
[TOC]
# Unicode {#Unicode}
根据 [Unicode 的官方网站](http://www.unicode.org/standard/translations/t-chinese.html)
>Unicode 给每个字符提供了一个唯一的数字,
不论是什么平台、
不论是什么程序、
不论是什么语言。
这些唯一数字称为码点(code point),其范围介乎 `0x0``0x10FFFF` 之间。
## Unicode 转换格式 {#UTF}
存储 Unicode 码点有多种编码方式。这些称为 Unicode 转换格式(Unicode Transformation Format, UTF)。RapidJSON 支持最常用的 UTF,包括:
* UTF-8:8 位可变长度编码。它把一个码点映射至 1 至 4 个字节。
* UTF-16:16 位可变长度编码。它把一个码点映射至 1 至 2 个 16 位编码单元(即 2 至 4 个字节)。
* UTF-32:32 位固定长度编码。它直接把码点映射至单个 32 位编码单元(即 4 字节)。
对于 UTF-16 及 UTF-32 来说,字节序(endianness)是有影响的。在内存中,它们通常都是以该计算机的字节序来存储。然而,当要储存在文件中或在网上传输,我们需要指明字节序列的字节序,是小端(little endian, LE)还是大端(big-endian, BE)。
RapidJSON 通过 `rapidjson/encodings.h` 中的 struct 去提供各种编码:
~~~~~~~~~~cpp
namespace rapidjson {
template<typename CharType = char>
struct UTF8;
template<typename CharType = wchar_t>
struct UTF16;
template<typename CharType = wchar_t>
struct UTF16LE;
template<typename CharType = wchar_t>
struct UTF16BE;
template<typename CharType = unsigned>
struct UTF32;
template<typename CharType = unsigned>
struct UTF32LE;
template<typename CharType = unsigned>
struct UTF32BE;
} // namespace rapidjson
~~~~~~~~~~
对于在内存中的文本,我们正常会使用 `UTF8``UTF16``UTF32`。对于处理经过 I/O 的文本,我们可使用 `UTF8``UTF16LE``UTF16BE``UTF32LE``UTF32BE`
当使用 DOM 风格的 API,`GenericValue<Encoding>``GenericDocument<Encoding>` 里的 `Encoding` 模板参数是用于指明内存中存储的 JSON 字符串使用哪种编码。因此通常我们会在此参数中使用 `UTF8``UTF16``UTF32`。如何选择,视乎应用软件所使用的操作系统及其他程序库。例如,Windows API 使用 UTF-16 表示 Unicode 字符,而多数的 Linux 发行版本及应用软件则更喜欢 UTF-8。
使用 UTF-16 的 DOM 声明例子:
~~~~~~~~~~cpp
typedef GenericDocument<UTF16<> > WDocument;
typedef GenericValue<UTF16<> > WValue;
~~~~~~~~~~
可以在 [DOM's Encoding](doc/stream.zh-cn.md) 一节看到更详细的使用例子。
## 字符类型 {#CharacterType}
从之前的声明中可以看到,每个编码都有一个 `CharType` 模板参数。这可能比较容易混淆,实际上,每个 `CharType` 存储一个编码单元,而不是一个字符(码点)。如之前所谈及,在 UTF-8 中一个码点可能会编码成 1 至 4 个编码单元。
对于 `UTF16(LE|BE)``UTF32(LE|BE)` 来说,`CharType` 必须分别是一个至少 2 及 4 字节的整数类型。
注意 C++11 新添了 `char16_t``char32_t` 类型,也可分别用于 `UTF16``UTF32`
## AutoUTF {#AutoUTF}
上述所介绍的编码都是在编译期静态挷定的。换句话说,使用者必须知道内存或流之中使用了哪种编码。然而,有时候我们可能需要读写不同编码的文件,而且这些编码需要在运行时才能决定。
`AutoUTF` 是为此而设计的编码。它根据输入或输出流来选择使用哪种编码。目前它应该与 `EncodedInputStream``EncodedOutputStream` 结合使用。
## ASCII {#ASCII}
虽然 JSON 标准并未提及 [ASCII](http://en.wikipedia.org/wiki/ASCII),有时候我们希望写入 7 位的 ASCII JSON,以供未能处理 UTF-8 的应用程序使用。由于任 JSON 都可以把 Unicode 字符表示为 `\uXXXX` 转义序列,JSON 总是可用 ASCII 来编码。
以下的例子把 UTF-8 的 DOM 写成 ASCII 的 JSON:
~~~~~~~~~~cpp
using namespace rapidjson;
Document d; // UTF8<>
// ...
StringBuffer buffer;
Writer<StringBuffer, Document::EncodingType, ASCII<> > writer(buffer);
d.Accept(writer);
std::cout << buffer.GetString();
~~~~~~~~~~
ASCII 可用于输入流。当输入流包含大于 127 的字节,就会导致 `kParseErrorStringInvalidEncoding` 错误。
ASCII * 不能 * 用于内存(`Document` 的编码,或 `Reader` 的目标编码),因为它不能表示 Unicode 码点。
# 校验及转码 {#ValidationTranscoding}
当 RapidJSON 解析一个 JSON 时,它能校验输入 JSON,判断它是否所标明编码的合法序列。要开启此选项,请把 `kParseValidateEncodingFlag` 加入 `parseFlags` 模板参数。
若输入编码和输出编码并不相同,`Reader``Writer` 会算把文本转码。在这种情况下,并不需要 `kParseValidateEncodingFlag`,因为它必须解码输入序列。若序列不能被解码,它必然是不合法的。
## 转码器 {#Transcoder}
虽然 RapidJSON 的编码功能是为 JSON 解析/生成而设计,使用者也可以“滥用”它们来为非 JSON 字符串转码。
以下的例子把 UTF-8 字符串转码成 UTF-16:
~~~~~~~~~~cpp
#include "rapidjson/encodings.h"
using namespace rapidjson;
const char* s = "..."; // UTF-8 string
StringStream source(s);
GenericStringBuffer<UTF16<> > target;
bool hasError = false;
while (source.Peek() != '\0')
if (!Transcoder<UTF8<>, UTF16<> >::Transcode(source, target)) {
hasError = true;
break;
}
if (!hasError) {
const wchar_t* t = target.GetString();
// ...
}
~~~~~~~~~~
你也可以用 `AutoUTF` 及对应的流来在运行时设置内源/目的之编码。
# FAQ
[TOC]
## General
1. What is RapidJSON?
RapidJSON is a C++ library for parsing and generating JSON. You may check all [features](doc/features.md) of it.
2. Why is RapidJSON named so?
It is inspired by [RapidXML](http://rapidxml.sourceforge.net/), which is a fast XML DOM parser.
3. Is RapidJSON similar to RapidXML?
RapidJSON borrowed some designs of RapidXML, including *in situ* parsing, header-only library. But the two APIs are completely different. Also RapidJSON provide many features that are not in RapidXML.
4. Is RapidJSON free?
Yes, it is free under MIT license. It can be used in commercial applications. Please check the details in [license.txt](https://github.com/Tencent/rapidjson/blob/master/license.txt).
5. Is RapidJSON small? What are its dependencies?
Yes. A simple executable which parses a JSON and prints its statistics is less than 30KB on Windows.
RapidJSON depends on C++ standard library only.
6. How to install RapidJSON?
Check [Installation section](https://miloyip.github.io/rapidjson/).
7. Can RapidJSON run on my platform?
RapidJSON has been tested in many combinations of operating systems, compilers and CPU architecture by the community. But we cannot ensure that it can be run on your particular platform. Building and running the unit test suite will give you the answer.
8. Does RapidJSON support C++03? C++11?
RapidJSON was firstly implemented for C++03. Later it added optional support of some C++11 features (e.g., move constructor, `noexcept`). RapidJSON shall be compatible with C++03 or C++11 compliant compilers.
9. Does RapidJSON really work in real applications?
Yes. It is deployed in both client and server real applications. A community member reported that RapidJSON in their system parses 50 million JSONs daily.
10. How RapidJSON is tested?
RapidJSON contains a unit test suite for automatic testing. [Travis](https://travis-ci.org/Tencent/rapidjson/)(for Linux) and [AppVeyor](https://ci.appveyor.com/project/Tencent/rapidjson/)(for Windows) will compile and run the unit test suite for all modifications. The test process also uses Valgrind (in Linux) to detect memory leaks.
11. Is RapidJSON well documented?
RapidJSON provides user guide and API documentationn.
12. Are there alternatives?
Yes, there are a lot alternatives. For example, [nativejson-benchmark](https://github.com/miloyip/nativejson-benchmark) has a listing of open-source C/C++ JSON libraries. [json.org](http://www.json.org/) also has a list.
## JSON
1. What is JSON?
JSON (JavaScript Object Notation) is a lightweight data-interchange format. It uses human readable text format. More details of JSON can be referred to [RFC7159](http://www.ietf.org/rfc/rfc7159.txt) and [ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm).
2. What are applications of JSON?
JSON are commonly used in web applications for transferring structured data. It is also used as a file format for data persistence.
3. Does RapidJSON conform to the JSON standard?
Yes. RapidJSON is fully compliance with [RFC7159](http://www.ietf.org/rfc/rfc7159.txt) and [ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm). It can handle corner cases, such as supporting null character and surrogate pairs in JSON strings.
4. Does RapidJSON support relaxed syntax?
Currently no. RapidJSON only support the strict standardized format. Support on related syntax is under discussion in this [issue](https://github.com/Tencent/rapidjson/issues/36).
## DOM and SAX
1. What is DOM style API?
Document Object Model (DOM) is an in-memory representation of JSON for query and manipulation.
2. What is SAX style API?
SAX is an event-driven API for parsing and generation.
3. Should I choose DOM or SAX?
DOM is easy for query and manipulation. SAX is very fast and memory-saving but often more difficult to be applied.
4. What is *in situ* parsing?
*in situ* parsing decodes the JSON strings directly into the input JSON. This is an optimization which can reduce memory consumption and improve performance, but the input JSON will be modified. Check [in-situ parsing](doc/dom.md) for details.
5. When does parsing generate an error?
The parser generates an error when the input JSON contains invalid syntax, or a value can not be represented (a number is too big), or the handler of parsers terminate the parsing. Check [parse error](doc/dom.md) for details.
6. What error information is provided?
The error is stored in `ParseResult`, which includes the error code and offset (number of characters from the beginning of JSON). The error code can be translated into human-readable error message.
7. Why not just using `double` to represent JSON number?
Some applications use 64-bit unsigned/signed integers. And these integers cannot be converted into `double` without loss of precision. So the parsers detects whether a JSON number is convertible to different types of integers and/or `double`.
8. How to clear-and-minimize a document or value?
Call one of the `SetXXX()` methods - they call destructor which deallocates DOM data:
~~~~~~~~~~cpp
Document d;
...
d.SetObject(); // clear and minimize
~~~~~~~~~~
Alternatively, use equivalent of the [C++ swap with temporary idiom](https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Clear-and-minimize):
~~~~~~~~~~cpp
Value(kObjectType).Swap(d);
~~~~~~~~~~
or equivalent, but slightly longer to type:
~~~~~~~~~~cpp
d.Swap(Value(kObjectType).Move());
~~~~~~~~~~
9. How to insert a document node into another document?
Let's take the following two DOM trees represented as JSON documents:
~~~~~~~~~~cpp
Document person;
person.Parse("{\"person\":{\"name\":{\"first\":\"Adam\",\"last\":\"Thomas\"}}}");
Document address;
address.Parse("{\"address\":{\"city\":\"Moscow\",\"street\":\"Quiet\"}}");
~~~~~~~~~~
Let's assume we want to merge them in such way that the whole `address` document becomes a node of the `person`:
~~~~~~~~~~js
{ "person": {
"name": { "first": "Adam", "last": "Thomas" },
"address": { "city": "Moscow", "street": "Quiet" }
}
}
~~~~~~~~~~
The most important requirement to take care of document and value life-cycle as well as consistent memory management using the right allocator during the value transfer.
Simple yet most efficient way to achieve that is to modify the `address` definition above to initialize it with allocator of the `person` document, then we just add the root member of the value:
~~~~~~~~~~cpp
Document address(&person.GetAllocator());
...
person["person"].AddMember("address", address["address"], person.GetAllocator());
~~~~~~~~~~
Alternatively, if we don't want to explicitly refer to the root value of `address` by name, we can refer to it via iterator:
~~~~~~~~~~cpp
auto addressRoot = address.MemberBegin();
person["person"].AddMember(addressRoot->name, addressRoot->value, person.GetAllocator());
~~~~~~~~~~
Second way is to deep-clone the value from the address document:
~~~~~~~~~~cpp
Value addressValue = Value(address["address"], person.GetAllocator());
person["person"].AddMember("address", addressValue, person.GetAllocator());
~~~~~~~~~~
## Document/Value (DOM)
1. What is move semantics? Why?
Instead of copy semantics, move semantics is used in `Value`. That means, when assigning a source value to a target value, the ownership of source value is moved to the target value.
Since moving is faster than copying, this design decision forces user to aware of the copying overhead.
2. How to copy a value?
There are two APIs: constructor with allocator, and `CopyFrom()`. See [Deep Copy Value](doc/tutorial.md) for an example.
3. Why do I need to provide the length of string?
Since C string is null-terminated, the length of string needs to be computed via `strlen()`, with linear runtime complexity. This incurs an unnecessary overhead of many operations, if the user already knows the length of string.
Also, RapidJSON can handle `\u0000` (null character) within a string. If a string contains null characters, `strlen()` cannot return the true length of it. In such case user must provide the length of string explicitly.
4. Why do I need to provide allocator parameter in many DOM manipulation API?
Since the APIs are member functions of `Value`, we do not want to save an allocator pointer in every `Value`.
5. Does it convert between numerical types?
When using `GetInt()`, `GetUint()`, ... conversion may occur. For integer-to-integer conversion, it only convert when it is safe (otherwise it will assert). However, when converting a 64-bit signed/unsigned integer to double, it will convert but be aware that it may lose precision. A number with fraction, or an integer larger than 64-bit, can only be obtained by `GetDouble()`.
## Reader/Writer (SAX)
1. Why don't we just `printf` a JSON? Why do we need a `Writer`?
Most importantly, `Writer` will ensure the output JSON is well-formed. Calling SAX events incorrectly (e.g. `StartObject()` pairing with `EndArray()`) will assert. Besides, `Writer` will escapes strings (e.g., `\n`). Finally, the numeric output of `printf()` may not be a valid JSON number, especially in some locale with digit delimiters. And the number-to-string conversion in `Writer` is implemented with very fast algorithms, which outperforms than `printf()` or `iostream`.
2. Can I pause the parsing process and resume it later?
This is not directly supported in the current version due to performance consideration. However, if the execution environment supports multi-threading, user can parse a JSON in a separate thread, and pause it by blocking in the input stream.
## Unicode
1. Does it support UTF-8, UTF-16 and other format?
Yes. It fully support UTF-8, UTF-16 (LE/BE), UTF-32 (LE/BE) and ASCII.
2. Can it validate the encoding?
Yes, just pass `kParseValidateEncodingFlag` to `Parse()`. If there is invalid encoding in the stream, it will generate `kParseErrorStringInvalidEncoding` error.
3. What is surrogate pair? Does RapidJSON support it?
JSON uses UTF-16 encoding when escaping unicode character, e.g. `\u5927` representing Chinese character "big". To handle characters other than those in basic multilingual plane (BMP), UTF-16 encodes those characters with two 16-bit values, which is called UTF-16 surrogate pair. For example, the Emoji character U+1F602 can be encoded as `\uD83D\uDE02` in JSON.
RapidJSON fully support parsing/generating UTF-16 surrogates.
4. Can it handle `\u0000` (null character) in JSON string?
Yes. RapidJSON fully support null character in JSON string. However, user need to be aware of it and using `GetStringLength()` and related APIs to obtain the true length of string.
5. Can I output `\uxxxx` for all non-ASCII character?
Yes, use `ASCII<>` as output encoding template parameter in `Writer` can enforce escaping those characters.
## Stream
1. I have a big JSON file. Should I load the whole file to memory?
User can use `FileReadStream` to read the file chunk-by-chunk. But for *in situ* parsing, the whole file must be loaded.
2. Can I parse JSON while it is streamed from network?
Yes. User can implement a custom stream for this. Please refer to the implementation of `FileReadStream`.
3. I don't know what encoding will the JSON be. How to handle them?
You may use `AutoUTFInputStream` which detects the encoding of input stream automatically. However, it will incur some performance overhead.
4. What is BOM? How RapidJSON handle it?
[Byte order mark (BOM)](http://en.wikipedia.org/wiki/Byte_order_mark) sometimes reside at the beginning of file/stream to indicate the UTF encoding type of it.
RapidJSON's `EncodedInputStream` can detect/consume BOM. `EncodedOutputStream` can optionally write a BOM. See [Encoded Streams](doc/stream.md) for example.
5. Why little/big endian is related?
little/big endian of stream is an issue for UTF-16 and UTF-32 streams, but not UTF-8 stream.
## Performance
1. Is RapidJSON really fast?
Yes. It may be the fastest open source JSON library. There is a [benchmark](https://github.com/miloyip/nativejson-benchmark) for evaluating performance of C/C++ JSON libraries.
2. Why is it fast?
Many design decisions of RapidJSON is aimed at time/space performance. These may reduce user-friendliness of APIs. Besides, it also employs low-level optimizations (intrinsics, SIMD) and special algorithms (custom double-to-string, string-to-double conversions).
3. What is SIMD? How it is applied in RapidJSON?
[SIMD](http://en.wikipedia.org/wiki/SIMD) instructions can perform parallel computation in modern CPUs. RapidJSON support Intel's SSE2/SSE4.2 and ARM's Neon to accelerate whitespace/tabspace/carriage-return/line-feed skipping. This improves performance of parsing indent formatted JSON. Define `RAPIDJSON_SSE2`, `RAPIDJSON_SSE42` or `RAPIDJSON_NEON` macro to enable this feature. However, running the executable on a machine without such instruction set support will make it crash.
4. Does it consume a lot of memory?
The design of RapidJSON aims at reducing memory footprint.
In the SAX API, `Reader` consumes memory proportional to maximum depth of JSON tree, plus maximum length of JSON string.
In the DOM API, each `Value` consumes exactly 16/24 bytes for 32/64-bit architecture respectively. RapidJSON also uses a special memory allocator to minimize overhead of allocations.
5. What is the purpose of being high performance?
Some applications need to process very large JSON files. Some server-side applications need to process huge amount of JSONs. Being high performance can improve both latency and throughput. In a broad sense, it will also save energy.
## Gossip
1. Who are the developers of RapidJSON?
Milo Yip ([miloyip](https://github.com/miloyip)) is the original author of RapidJSON. Many contributors from the world have improved RapidJSON. Philipp A. Hartmann ([pah](https://github.com/pah)) has implemented a lot of improvements, setting up automatic testing and also involves in a lot of discussions for the community. Don Ding ([thebusytypist](https://github.com/thebusytypist)) implemented the iterative parser. Andrii Senkovych ([jollyroger](https://github.com/jollyroger)) completed the CMake migration. Kosta ([Kosta-Github](https://github.com/Kosta-Github)) provided a very neat short-string optimization. Thank you for all other contributors and community members as well.
2. Why do you develop RapidJSON?
It was just a hobby project initially in 2011. Milo Yip is a game programmer and he just knew about JSON at that time and would like to apply JSON in future projects. As JSON seems very simple he would like to write a header-only and fast library.
3. Why there is a long empty period of development?
It is basically due to personal issues, such as getting new family members. Also, Milo Yip has spent a lot of spare time on translating "Game Engine Architecture" by Jason Gregory into Chinese.
4. Why did the repository move from Google Code to GitHub?
This is the trend. And GitHub is much more powerful and convenient.
# 常见问题
[TOC]
## 一般问题
1. RapidJSON 是什么?
RapidJSON 是一个 C++ 库,用于解析及生成 JSON。读者可参考它的所有 [特点](doc/features.zh-cn.md)
2. 为什么称作 RapidJSON?
它的灵感来自于 [RapidXML](http://rapidxml.sourceforge.net/),RapidXML 是一个高速的 XML DOM 解析器。
3. RapidJSON 与 RapidXML 相似么?
RapidJSON 借镜了 RapidXML 的一些设计, 包括原位(*in situ*)解析、只有头文件的库。但两者的 API 是完全不同的。此外 RapidJSON 也提供许多 RapidXML 没有的特点。
4. RapidJSON 是免费的么?
是的,它在 MIT 协议下免费。它可用于商业软件。详情请参看 [license.txt](https://github.com/Tencent/rapidjson/blob/master/license.txt)
5. RapidJSON 很小么?它有何依赖?
是的。在 Windows 上,一个解析 JSON 并打印出统计的可执行文件少于 30KB。
RapidJSON 仅依赖于 C++ 标准库。
6. 怎样安装 RapidJSON?
[安装一节](../readme.zh-cn.md#安装)
7. RapidJSON 能否运行于我的平台?
社区已在多个操作系统/编译器/CPU 架构的组合上测试 RapidJSON。但我们无法确保它能运行于你特定的平台上。只需要生成及执行单元测试便能获取答案。
8. RapidJSON 支持 C++03 么?C++11 呢?
RapidJSON 开始时在 C++03 上实现。后来加入了可选的 C++11 特性支持(如转移构造函数、`noexcept`)。RapidJSON 应该兼容所有遵从 C++03 或 C++11 的编译器。
9. RapidJSON 是否真的用于实际应用?
是的。它被配置于前台及后台的真实应用中。一个社区成员说 RapidJSON 在他们的系统中每日解析 5 千万个 JSON。
10. RapidJSON 是如何被测试的?
RapidJSON 包含一组单元测试去执行自动测试。[Travis](https://travis-ci.org/Tencent/rapidjson/)(供 Linux 平台)及 [AppVeyor](https://ci.appveyor.com/project/Tencent/rapidjson/)(供 Windows 平台)会对所有修改进行编译及执行单元测试。在 Linux 下还会使用 Valgrind 去检测内存泄漏。
11. RapidJSON 是否有完整的文档?
RapidJSON 提供了使用手册及 API 说明文档。
12. 有没有其他替代品?
有许多替代品。例如 [nativejson-benchmark](https://github.com/miloyip/nativejson-benchmark) 列出了一些开源的 C/C++ JSON 库。[json.org](http://www.json.org/) 也有一个列表。
## JSON
1. 什么是 JSON?
JSON (JavaScript Object Notation) 是一个轻量的数据交换格式。它使用人类可读的文本格式。更多关于 JSON 的细节可考 [RFC7159](http://www.ietf.org/rfc/rfc7159.txt)[ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm)
2. JSON 有什么应用场合?
JSON 常用于网页应用程序,以传送结构化数据。它也可作为文件格式用于数据持久化。
3. RapidJSON 是否符合 JSON 标准?
是。RapidJSON 完全符合 [RFC7159](http://www.ietf.org/rfc/rfc7159.txt)[ECMA-404](http://www.ecma-international.org/publications/standards/Ecma-404.htm)。它能处理一些特殊情况,例如支持 JSON 字符串中含有空字符及代理对(surrogate pair)。
4. RapidJSON 是否支持宽松的语法?
目前不支持。RapidJSON 只支持严格的标准格式。宽松语法可以在这个 [issue](https://github.com/Tencent/rapidjson/issues/36) 中进行讨论。
## DOM 与 SAX
1. 什么是 DOM 风格 API?
Document Object Model(DOM)是一个储存于内存的 JSON 表示方式,用于查询及修改 JSON。
2. 什么是 SAX 风格 API?
SAX 是一个事件驱动的 API,用于解析及生成 JSON。
3. 我应用 DOM 还是 SAX?
DOM 易于查询及修改。SAX 则是非常快及省内存的,但通常较难使用。
4. 什么是原位(*in situ*)解析?
原位解析会把 JSON 字符串直接解码至输入的 JSON 中。这是一个优化,可减少内存消耗及提升性能,但输入的 JSON 会被更改。进一步细节请参考 [原位解析](doc/dom.zh-cn.md)
5. 什么时候会产生解析错误?
当输入的 JSON 包含非法语法,或不能表示一个值(如 Number 太大),或解析器的处理器中断解析过程,解析器都会产生一个错误。详情请参考 [解析错误](doc/dom.zh-cn.md)
6. 有什么错误信息?
错误信息存储在 `ParseResult`,它包含错误代号及偏移值(从 JSON 开始至错误处的字符数目)。可以把错误代号翻译为人类可读的错误讯息。
7. 为何不只使用 `double` 去表示 JSON number?
一些应用需要使用 64 位无号/有号整数。这些整数不能无损地转换成 `double`。因此解析器会检测一个 JSON number 是否能转换至各种整数类型及 `double`
8. 如何清空并最小化 `document``value` 的容量?
调用 `SetXXX()` 方法 - 这些方法会调用析构函数,并重建空的 Object 或 Array:
~~~~~~~~~~cpp
Document d;
...
d.SetObject(); // clear and minimize
~~~~~~~~~~
另外,也可以参考在 [C++ swap with temporary idiom](https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Clear-and-minimize) 中的一种等价的方法:
~~~~~~~~~~cpp
Value(kObjectType).Swap(d);
~~~~~~~~~~
或者,使用这个稍微长一点的代码也能完成同样的事情:
~~~~~~~~~~cpp
d.Swap(Value(kObjectType).Move());
~~~~~~~~~~
9. 如何将一个 `document` 节点插入到另一个 `document` 中?
比如有以下两个 document(DOM):
~~~~~~~~~~cpp
Document person;
person.Parse("{\"person\":{\"name\":{\"first\":\"Adam\",\"last\":\"Thomas\"}}}");
Document address;
address.Parse("{\"address\":{\"city\":\"Moscow\",\"street\":\"Quiet\"}}");
~~~~~~~~~~
假设我们希望将整个 `address` 插入到 `person` 中,作为其的一个子节点:
~~~~~~~~~~js
{ "person": {
"name": { "first": "Adam", "last": "Thomas" },
"address": { "city": "Moscow", "street": "Quiet" }
}
}
~~~~~~~~~~
在插入节点的过程中需要注意 `document``value` 的生命周期并且正确地使用 allocator 进行内存分配和管理。
一个简单有效的方法就是修改上述 `address` 变量的定义,让其使用 `person` 的 allocator 初始化,然后将其添加到根节点。
~~~~~~~~~~cpp
Documnet address(&person.GetAllocator());
...
person["person"].AddMember("address", address["address"], person.GetAllocator());
~~~~~~~~~~
当然,如果你不想通过显式地写出 `address` 的 key 来得到其值,可以使用迭代器来实现:
~~~~~~~~~~cpp
auto addressRoot = address.MemberBegin();
person["person"].AddMember(addressRoot->name, addressRoot->value, person.GetAllocator());
~~~~~~~~~~
此外,还可以通过深拷贝 address document 来实现:
~~~~~~~~~~cpp
Value addressValue = Value(address["address"], person.GetAllocator());
person["person"].AddMember("address", addressValue, person.GetAllocator());
~~~~~~~~~~
## Document/Value (DOM)
1. 什么是转移语义?为什么?
`Value` 不用复制语义,而使用了转移语义。这是指,当把来源值赋值于目标值时,来源值的所有权会转移至目标值。
由于转移快于复制,此设计决定强迫使用者注意到复制的消耗。
2. 怎样去复制一个值?
有两个 API 可用:含 allocator 的构造函数,以及 `CopyFrom()`。可参考 [深复制 Value](doc/tutorial.zh-cn.md) 里的用例。
3. 为什么我需要提供字符串的长度?
由于 C 字符串是空字符结尾的,需要使用 `strlen()` 去计算其长度,这是线性复杂度的操作。若使用者已知字符串的长度,对很多操作来说会造成不必要的消耗。
此外,RapidJSON 可处理含有 `\u0000`(空字符)的字符串。若一个字符串含有空字符,`strlen()` 便不能返回真正的字符串长度。在这种情况下使用者必须明确地提供字符串长度。
4. 为什么在许多 DOM 操作 API 中要提供分配器作为参数?
由于这些 API 是 `Value` 的成员函数,我们不希望为每个 `Value` 储存一个分配器指针。
5. 它会转换各种数值类型么?
当使用 `GetInt()``GetUint()` 等 API 时,可能会发生转换。对于整数至整数转换,仅当保证转换安全才会转换(否则会断言失败)。然而,当把一个 64 位有号/无号整数转换至 double 时,它会转换,但有可能会损失精度。含有小数的数字、或大于 64 位的整数,都只能使用 `GetDouble()` 获取其值。
## Reader/Writer (SAX)
1. 为什么不仅仅用 `printf` 输出一个 JSON?为什么需要 `Writer`
最重要的是,`Writer` 能确保输出的 JSON 是格式正确的。错误地调用 SAX 事件(如 `StartObject()` 错配 `EndArray()`)会造成断言失败。此外,`Writer` 会把字符串进行转义(如 `\n`)。最后,`printf()` 的数值输出可能并不是一个合法的 JSON number,特别是某些 locale 会有数字分隔符。而且 `Writer` 的数值字符串转换是使用非常快的算法来实现的,胜过 `printf()``iostream`
2. 我能否暂停解析过程,并在稍后继续?
基于性能考虑,目前版本并不直接支持此功能。然而,若执行环境支持多线程,使用者可以在另一线程解析 JSON,并通过阻塞输入流去暂停。
## Unicode
1. 它是否支持 UTF-8、UTF-16 及其他格式?
是。它完全支持 UTF-8、UTF-16(大端/小端)、UTF-32(大端/小端)及 ASCII。
2. 它能否检测编码的合法性?
能。只需把 `kParseValidateEncodingFlag` 参考传给 `Parse()`。若发现在输入流中有非法的编码,它就会产生 `kParseErrorStringInvalidEncoding` 错误。
3. 什么是代理对(surrogate pair)?RapidJSON 是否支持?
JSON 使用 UTF-16 编码去转义 Unicode 字符,例如 `\u5927` 表示中文字“大”。要处理基本多文种平面(basic multilingual plane,BMP)以外的字符时,UTF-16 会把那些字符编码成两个 16 位值,这称为 UTF-16 代理对。例如,绘文字字符 U+1F602 在 JSON 中可被编码成 `\uD83D\uDE02`
RapidJSON 完全支持解析及生成 UTF-16 代理对。
4. 它能否处理 JSON 字符串中的 `\u0000`(空字符)?
能。RapidJSON 完全支持 JSON 字符串中的空字符。然而,使用者需要注意到这件事,并使用 `GetStringLength()` 及相关 API 去取得字符串真正长度。
5. 能否对所有非 ASCII 字符输出成 `\uxxxx` 形式?
可以。只要在 `Writer` 中使用 `ASCII<>` 作为输出编码参数,就可以强逼转义那些字符。
## 流
1. 我有一个很大的 JSON 文件。我应否把它整个载入内存中?
使用者可使用 `FileReadStream` 去逐块读入文件。但若使用于原位解析,必须载入整个文件。
2. 我能否解析一个从网络上串流进来的 JSON?
可以。使用者可根据 `FileReadStream` 的实现,去实现一个自定义的流。
3. 我不知道一些 JSON 将会使用哪种编码。怎样处理它们?
你可以使用 `AutoUTFInputStream`,它能自动检测输入流的编码。然而,它会带来一些性能开销。
4. 什么是 BOM?RapidJSON 怎样处理它?
[字节顺序标记(byte order mark, BOM)](http://en.wikipedia.org/wiki/Byte_order_mark) 有时会出现于文件/流的开始,以表示其 UTF 编码类型。
RapidJSON 的 `EncodedInputStream` 可检测/跳过 BOM。`EncodedOutputStream` 可选择是否写入 BOM。可参考 [编码流](doc/stream.zh-cn.md) 中的例子。
5. 为什么会涉及大端/小端?
流的大端/小端是 UTF-16 及 UTF-32 流要处理的问题,而 UTF-8 不需要处理。
## 性能
1. RapidJSON 是否真的快?
是。它可能是最快的开源 JSON 库。有一个 [评测](https://github.com/miloyip/nativejson-benchmark) 评估 C/C++ JSON 库的性能。
2. 为什么它会快?
RapidJSON 的许多设计是针对时间/空间性能来设计的,这些决定可能会影响 API 的易用性。此外,它也使用了许多底层优化(内部函数/intrinsic、SIMD)及特别的算法(自定义的 double 至字符串转换、字符串至 double 的转换)。
3. 什是是 SIMD?它如何用于 RapidJSON?
[SIMD](http://en.wikipedia.org/wiki/SIMD) 指令可以在现代 CPU 中执行并行运算。RapidJSON 支持使用 Intel 的 SSE2/SSE4.2 和 ARM 的 Neon 来加速对空白符、制表符、回车符和换行符的过滤处理。在解析含缩进的 JSON 时,这能提升性能。只要定义名为 `RAPIDJSON_SSE2``RAPIDJSON_SSE42``RAPIDJSON_NEON` 的宏,就能启动这个功能。然而,若在不支持这些指令集的机器上执行这些可执行文件,会导致崩溃。
4. 它会消耗许多内存么?
RapidJSON 的设计目标是减低内存占用。
在 SAX API 中,`Reader` 消耗的内存与 JSON 树深度加上最长 JSON 字符成正比。
在 DOM API 中,每个 `Value` 在 32/64 位架构下分别消耗 16/24 字节。RapidJSON 也使用一个特殊的内存分配器去减少分配的额外开销。
5. 高性能的意义何在?
有些应用程序需要处理非常大的 JSON 文件。而有些后台应用程序需要处理大量的 JSON。达到高性能同时改善延时及吞吐量。更广义来说,这也可以节省能源。
## 八卦
1. 谁是 RapidJSON 的开发者?
叶劲峰(Milo Yip,[miloyip](https://github.com/miloyip))是 RapidJSON 的原作者。全世界许多贡献者一直在改善 RapidJSON。Philipp A. Hartmann([pah](https://github.com/pah))实现了许多改进,也设置了自动化测试,而且还参与许多社区讨论。丁欧南(Don Ding,[thebusytypist](https://github.com/thebusytypist))实现了迭代式解析器。Andrii Senkovych([jollyroger](https://github.com/jollyroger))完成了向 CMake 的迁移。Kosta([Kosta-Github](https://github.com/Kosta-Github))提供了一个非常灵巧的短字符串优化。也需要感谢其他献者及社区成员。
2. 为何你要开发 RapidJSON?
在 2011 年开始这项目时,它只是一个兴趣项目。Milo Yip 是一个游戏程序员,他在那时候认识到 JSON 并希望在未来的项目中使用。由于 JSON 好像很简单,他希望写一个快速的仅有头文件的程序库。
3. 为什么开发中段有一段长期空档?
主要是个人因素,例如加入新家庭成员。另外,Milo Yip 也花了许多业余时间去翻译 Jason Gregory 的《Game Engine Architecture》至中文版《游戏引擎架构》。
4. 为什么这个项目从 Google Code 搬到 GitHub?
这是大势所趋,而且 GitHub 更为强大及方便。
# Features
## General
* Cross-platform
* Compilers: Visual Studio, gcc, clang, etc.
* Architectures: x86, x64, ARM, etc.
* Operating systems: Windows, Mac OS X, Linux, iOS, Android, etc.
* Easy installation
* Header files only library. Just copy the headers to your project.
* Self-contained, minimal dependences
* No STL, BOOST, etc.
* Only included `<cstdio>`, `<cstdlib>`, `<cstring>`, `<inttypes.h>`, `<new>`, `<stdint.h>`.
* Without C++ exception, RTTI
* High performance
* Use template and inline functions to reduce function call overheads.
* Internal optimized Grisu2 and floating point parsing implementations.
* Optional SSE2/SSE4.2 support.
## Standard compliance
* RapidJSON should be fully RFC4627/ECMA-404 compliance.
* Support JSON Pointer (RFC6901).
* Support JSON Schema Draft v4.
* Support Unicode surrogate.
* Support null character (`"\u0000"`)
* For example, `["Hello\u0000World"]` can be parsed and handled gracefully. There is API for getting/setting lengths of string.
* Support optional relaxed syntax.
* Single line (`// ...`) and multiple line (`/* ... */`) comments (`kParseCommentsFlag`).
* Trailing commas at the end of objects and arrays (`kParseTrailingCommasFlag`).
* `NaN`, `Inf`, `Infinity`, `-Inf` and `-Infinity` as `double` values (`kParseNanAndInfFlag`)
* [NPM compliant](http://github.com/Tencent/rapidjson/blob/master/doc/npm.md).
## Unicode
* Support UTF-8, UTF-16, UTF-32 encodings, including little endian and big endian.
* These encodings are used in input/output streams and in-memory representation.
* Support automatic detection of encodings in input stream.
* Support transcoding between encodings internally.
* For example, you can read a UTF-8 file and let RapidJSON transcode the JSON strings into UTF-16 in the DOM.
* Support encoding validation internally.
* For example, you can read a UTF-8 file, and let RapidJSON check whether all JSON strings are valid UTF-8 byte sequence.
* Support custom character types.
* By default the character types are `char` for UTF8, `wchar_t` for UTF16, `uint32_t` for UTF32.
* Support custom encodings.
## API styles
* SAX (Simple API for XML) style API
* Similar to [SAX](http://en.wikipedia.org/wiki/Simple_API_for_XML), RapidJSON provides a event sequential access parser API (`rapidjson::GenericReader`). It also provides a generator API (`rapidjson::Writer`) which consumes the same set of events.
* DOM (Document Object Model) style API
* Similar to [DOM](http://en.wikipedia.org/wiki/Document_Object_Model) for HTML/XML, RapidJSON can parse JSON into a DOM representation (`rapidjson::GenericDocument`), for easy manipulation, and finally stringify back to JSON if needed.
* The DOM style API (`rapidjson::GenericDocument`) is actually implemented with SAX style API (`rapidjson::GenericReader`). SAX is faster but sometimes DOM is easier. Users can pick their choices according to scenarios.
## Parsing
* Recursive (default) and iterative parser
* Recursive parser is faster but prone to stack overflow in extreme cases.
* Iterative parser use custom stack to keep parsing state.
* Support *in situ* parsing.
* Parse JSON string values in-place at the source JSON, and then the DOM points to addresses of those strings.
* Faster than convention parsing: no allocation for strings, no copy (if string does not contain escapes), cache-friendly.
* Support 32-bit/64-bit signed/unsigned integer and `double` for JSON number type.
* Support parsing multiple JSONs in input stream (`kParseStopWhenDoneFlag`).
* Error Handling
* Support comprehensive error code if parsing failed.
* Support error message localization.
## DOM (Document)
* RapidJSON checks range of numerical values for conversions.
* Optimization for string literal
* Only store pointer instead of copying
* Optimization for "short" strings
* Store short string in `Value` internally without additional allocation.
* For UTF-8 string: maximum 11 characters in 32-bit, 21 characters in 64-bit (13 characters in x86-64).
* Optionally support `std::string` (define `RAPIDJSON_HAS_STDSTRING=1`)
## Generation
* Support `rapidjson::PrettyWriter` for adding newlines and indentations.
## Stream
* Support `rapidjson::GenericStringBuffer` for storing the output JSON as string.
* Support `rapidjson::FileReadStream` and `rapidjson::FileWriteStream` for input/output `FILE` object.
* Support custom streams.
## Memory
* Minimize memory overheads for DOM.
* Each JSON value occupies exactly 16/20 bytes for most 32/64-bit machines (excluding text string).
* Support fast default allocator.
* A stack-based allocator (allocate sequentially, prohibit to free individual allocations, suitable for parsing).
* User can provide a pre-allocated buffer. (Possible to parse a number of JSONs without any CRT allocation)
* Support standard CRT(C-runtime) allocator.
* Support custom allocators.
## Miscellaneous
* Some C++11 support (optional)
* Rvalue reference
* `noexcept` specifier
* Range-based for loop
# 特点
## 总体
* 跨平台
* 编译器:Visual Studio、gcc、clang 等
* 架构:x86、x64、ARM 等
* 操作系统:Windows、Mac OS X、Linux、iOS、Android 等
* 容易安装
* 只有头文件的库。只需把头文件复制至你的项目中。
* 独立、最小依赖
* 不需依赖 STL、BOOST 等。
* 只包含 `<cstdio>`, `<cstdlib>`, `<cstring>`, `<inttypes.h>`, `<new>`, `<stdint.h>`
* 没使用 C++ 异常、RTTI
* 高性能
* 使用模版及内联函数去降低函数调用开销。
* 内部经优化的 Grisu2 及浮点数解析实现。
* 可选的 SSE2/SSE4.2 支持。
## 符合标准
* RapidJSON 应完全符合 RFC4627/ECMA-404 标准。
* 支持 JSON Pointer (RFC6901).
* 支持 JSON Schema Draft v4.
* 支持 Unicode 代理对(surrogate pair)。
* 支持空字符(`"\u0000"`)。
* 例如,可以优雅地解析及处理 `["Hello\u0000World"]`。含读写字符串长度的 API。
* 支持可选的放宽语法
* 单行(`// ...`)及多行(`/* ... */`) 注释 (`kParseCommentsFlag`)。
* 在对象和数组结束前含逗号 (`kParseTrailingCommasFlag`)。
* `NaN``Inf``Infinity``-Inf``-Infinity` 作为 `double` 值 (`kParseNanAndInfFlag`)
* [NPM 兼容](https://github.com/Tencent/rapidjson/blob/master/doc/npm.md).
## Unicode
* 支持 UTF-8、UTF-16、UTF-32 编码,包括小端序和大端序。
* 这些编码用于输入输出流,以及内存中的表示。
* 支持从输入流自动检测编码。
* 内部支持编码的转换。
* 例如,你可以读取一个 UTF-8 文件,让 RapidJSON 把 JSON 字符串转换至 UTF-16 的 DOM。
* 内部支持编码校验。
* 例如,你可以读取一个 UTF-8 文件,让 RapidJSON 检查是否所有 JSON 字符串是合法的 UTF-8 字节序列。
* 支持自定义的字符类型。
* 预设的字符类型是:UTF-8 为 `char`,UTF-16 为 `wchar_t`,UTF32 为 `uint32_t`
* 支持自定义的编码。
## API 风格
* SAX(Simple API for XML)风格 API
* 类似于 [SAX](http://en.wikipedia.org/wiki/Simple_API_for_XML), RapidJSON 提供一个事件循序访问的解析器 API(`rapidjson::GenericReader`)。RapidJSON 也提供一个生成器 API(`rapidjson::Writer`),可以处理相同的事件集合。
* DOM(Document Object Model)风格 API
* 类似于 HTML/XML 的 [DOM](http://en.wikipedia.org/wiki/Document_Object_Model),RapidJSON 可把 JSON 解析至一个 DOM 表示方式(`rapidjson::GenericDocument`),以方便操作。如有需要,可把 DOM 转换(stringify)回 JSON。
* DOM 风格 API(`rapidjson::GenericDocument`)实际上是由 SAX 风格 API(`rapidjson::GenericReader`)实现的。SAX 更快,但有时 DOM 更易用。用户可根据情况作出选择。
## 解析
* 递归式(预设)及迭代式解析器
* 递归式解析器较快,但在极端情况下可出现堆栈溢出。
* 迭代式解析器使用自定义的堆栈去维持解析状态。
* 支持原位(*in situ*)解析。
* 把 JSON 字符串的值解析至原 JSON 之中,然后让 DOM 指向那些字符串。
* 比常规分析更快:不需字符串的内存分配、不需复制(如字符串不含转义符)、缓存友好。
* 对于 JSON 数字类型,支持 32-bit/64-bit 的有号/无号整数,以及 `double`
* 错误处理
* 支持详尽的解析错误代号。
* 支持本地化错误信息。
## DOM (Document)
* RapidJSON 在类型转换时会检查数值的范围。
* 字符串字面量的优化
* 只储存指针,不作复制
* 优化“短”字符串
*`Value` 内储存短字符串,无需额外分配。
* 对 UTF-8 字符串来说,32 位架构下可存储最多 11 字符,64 位下 21 字符(x86-64 下 13 字符)。
* 可选地支持 `std::string`(定义 `RAPIDJSON_HAS_STDSTRING=1`
## 生成
* 支持 `rapidjson::PrettyWriter` 去加入换行及缩进。
## 输入输出流
* 支持 `rapidjson::GenericStringBuffer`,把输出的 JSON 储存于字符串内。
* 支持 `rapidjson::FileReadStream``rapidjson::FileWriteStream`,使用 `FILE` 对象作输入输出。
* 支持自定义输入输出流。
## 内存
* 最小化 DOM 的内存开销。
* 对大部分 32/64 位机器而言,每个 JSON 值只占 16 或 20 字节(不包含字符串)。
* 支持快速的预设分配器。
* 它是一个堆栈形式的分配器(顺序分配,不容许单独释放,适合解析过程之用)。
* 使用者也可提供一个预分配的缓冲区。(有可能达至无需 CRT 分配就能解析多个 JSON)
* 支持标准 CRT(C-runtime)分配器。
* 支持自定义分配器。
## 其他
* 一些 C++11 的支持(可选)
* 右值引用(rvalue reference)
* `noexcept` 修饰符
* 范围 for 循环
此差异已折叠。
此差异已折叠。
此差异已折叠。
<!-- HTML footer for doxygen 1.8.7-->
<!-- start footer part -->
<!--BEGIN GENERATE_TREEVIEW-->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
$navpath
</ul>
</div>
<!--END GENERATE_TREEVIEW-->
</body>
</html>
<!-- HTML header for doxygen 1.8.7-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen $doxygenversion"/>
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
<link href="$relpath^tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="$relpath^jquery.js"></script>
<script type="text/javascript" src="$relpath^dynsections.js"></script>
$treeview
$search
$mathjax
<link href="$relpath^$stylesheet" rel="stylesheet" type="text/css" />
$extrastylesheet
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="topbanner"><a href="https://github.com/Tencent/rapidjson" title="RapidJSON GitHub"><i class="githublogo"></i></a></div>
$searchbox
<!--END TITLEAREA-->
<!-- end header part -->
## NPM
# package.json {#package}
~~~~~~~~~~js
{
...
"dependencies": {
...
"rapidjson": "git@github.com:Tencent/rapidjson.git"
},
...
"gypfile": true
}
~~~~~~~~~~
# binding.gyp {#binding}
~~~~~~~~~~js
{
...
'targets': [
{
...
'include_dirs': [
'<!(node -e \'require("rapidjson")\')'
]
}
]
}
~~~~~~~~~~
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
# BUILD: docker build -t rapidjson-debian .
# RUN: docker run -it -v "$PWD"/../..:/rapidjson rapidjson-debian
FROM debian:jessie
RUN apt-get update && apt-get install -y g++ cmake doxygen valgrind
ENTRYPOINT ["/bin/bash"]
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册