diff --git a/CMakeLists.txt b/CMakeLists.txt index 5a4dbf94b465ab79f54309507f661b208a694422..13d40299097b8915cd584a45214dd9a8071a077b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,7 +70,7 @@ MESSAGE("Install target dir is " ${CMAKE_INSTALL_PREFIX}) ADD_SUBDIRECTORY(deps) ADD_SUBDIRECTORY(src) ADD_SUBDIRECTORY(unitest) -ADD_SUBDIRECTORY(test) +ADD_SUBDIRECTORY(test/perf) # install 准备安装的目录是cmakefile 的当前目录, 不是build 后生成的目录 diff --git a/deps/common/conf/ini.cpp b/deps/common/conf/ini.cpp index 3a7ffd89068e0f10ffad02af70dd75686ee9b58c..896490828885c34b6250f36e502198ea793e82ad 100644 --- a/deps/common/conf/ini.cpp +++ b/deps/common/conf/ini.cpp @@ -27,22 +27,23 @@ namespace common { const std::string Ini::DEFAULT_SECTION = std::string(""); const std::map Ini::empty_map_; -Ini::Ini() {} +Ini::Ini() +{} -Ini::~Ini() {} +Ini::~Ini() +{} -void Ini::insert_session(const std::string &session_name) { +void Ini::insert_session(const std::string &session_name) +{ std::map session_map; - std::pair < std::string, std::map < std::string, std::string >> entry = - std::pair < std::string, std::map < std::string, std::string - >> (session_name, - session_map); + std::pair> entry = + std::pair>(session_name, session_map); sections_.insert(entry); } -std::map *Ini::switch_session( - const std::string &session_name) { +std::map *Ini::switch_session(const std::string &session_name) +{ SessionsMap::iterator it = sections_.find(session_name); if (it != sections_.end()) { return &it->second; @@ -59,8 +60,8 @@ std::map *Ini::switch_session( return nullptr; } -const std::map &Ini::get( - const std::string §ion) { +const std::map &Ini::get(const std::string §ion) +{ SessionsMap::iterator it = sections_.find(section); if (it == sections_.end()) { return empty_map_; @@ -69,8 +70,8 @@ const std::map &Ini::get( return it->second; } -std::string Ini::get(const std::string &key, const std::string &defaultValue, - const std::string §ion) { +std::string Ini::get(const std::string &key, const std::string &defaultValue, const std::string §ion) +{ std::map section_map = get(section); std::map::iterator it = section_map.find(key); @@ -81,8 +82,8 @@ std::string Ini::get(const std::string &key, const std::string &defaultValue, return it->second; } -int Ini::put(const std::string &key, const std::string &value, - const std::string §ion) { +int Ini::put(const std::string &key, const std::string &value, const std::string §ion) +{ std::map *section_map = switch_session(section); section_map->insert(std::pair(key, value)); @@ -90,17 +91,15 @@ int Ini::put(const std::string &key, const std::string &value, return 0; } -int Ini::insert_entry(std::map *session_map, - const std::string &line) { +int Ini::insert_entry(std::map *session_map, const std::string &line) +{ if (session_map == nullptr) { - std::cerr << __FILE__ << __FUNCTION__ << " session map is null" - << std::endl; + std::cerr << __FILE__ << __FUNCTION__ << " session map is null" << std::endl; return -1; } size_t equal_pos = line.find_first_of('='); if (equal_pos == std::string::npos) { - std::cerr << __FILE__ << __FUNCTION__ << "Invalid configuration line " - << line << std::endl; + std::cerr << __FILE__ << __FUNCTION__ << "Invalid configuration line " << line << std::endl; return -1; } @@ -115,15 +114,15 @@ int Ini::insert_entry(std::map *session_map, return 0; } -int Ini::load(const std::string &file_name) { +int Ini::load(const std::string &file_name) +{ std::ifstream ifs; try { bool continue_last_line = false; - std::map *current_session = - switch_session(DEFAULT_SECTION); + std::map *current_session = switch_session(DEFAULT_SECTION); char line[MAX_CFG_LINE_LEN]; @@ -148,8 +147,7 @@ int Ini::load(const std::string &file_name) { continue; } - if (read_buf[0] == CFG_SESSION_START_TAG && - read_buf[strlen(read_buf) - 1] == CFG_SESSION_END_TAG) { + if (read_buf[0] == CFG_SESSION_START_TAG && read_buf[strlen(read_buf) - 1] == CFG_SESSION_END_TAG) { read_buf[strlen(read_buf) - 1] = '\0'; std::string session_name = std::string(read_buf + 1); @@ -171,7 +169,7 @@ int Ini::load(const std::string &file_name) { continue_last_line = true; // remove the last character - line_entry = line_entry.substr(0, line_entry.size() -1); + line_entry = line_entry.substr(0, line_entry.size() - 1); continue; } else { continue_last_line = false; @@ -186,21 +184,20 @@ int Ini::load(const std::string &file_name) { if (ifs.is_open()) { ifs.close(); } - std::cerr << "Failed to load " << file_name << SYS_OUTPUT_ERROR - << std::endl; + std::cerr << "Failed to load " << file_name << SYS_OUTPUT_ERROR << std::endl; return -1; } return 0; } -void Ini::to_string(std::string &output_str) { +void Ini::to_string(std::string &output_str) +{ output_str.clear(); output_str += "Begin dump configuration\n"; - for (SessionsMap::iterator it = sections_.begin(); it != sections_.end(); - it++) { + for (SessionsMap::iterator it = sections_.begin(); it != sections_.end(); it++) { output_str += CFG_SESSION_START_TAG; output_str += it->first; output_str += CFG_SESSION_END_TAG; @@ -208,9 +205,8 @@ void Ini::to_string(std::string &output_str) { std::map §ion_map = it->second; - for (std::map::iterator sub_it = - section_map.begin(); - sub_it != section_map.end(); sub_it++) { + for (std::map::iterator sub_it = section_map.begin(); sub_it != section_map.end(); + sub_it++) { output_str += sub_it->first; output_str += "="; output_str += sub_it->second; @@ -225,9 +221,10 @@ void Ini::to_string(std::string &output_str) { } //! Accessor function which wraps global properties object -Ini *&get_properties() { +Ini *&get_properties() +{ static Ini *properties = new Ini(); return properties; } -} //namespace common +} // namespace common diff --git a/deps/common/conf/ini.h b/deps/common/conf/ini.h index 631bd09637aa7ccb2a9d7e658bf97663509fbec1..225a6863e6483a6beea59185df5c71faf67cc354 100644 --- a/deps/common/conf/ini.h +++ b/deps/common/conf/ini.h @@ -31,7 +31,7 @@ namespace common { // VARNAME=VALUE class Ini { - public: +public: /** * To simplify the logic, no lock's when loading configuration * So don't modify the data parallel @@ -50,24 +50,22 @@ class Ini { * get the map of the section * if the section doesn't exist, return one empty section */ - const std::map & - get(const std::string §ion = DEFAULT_SECTION); + const std::map &get(const std::string §ion = DEFAULT_SECTION); /** * get the value of the key in the section, * if the key-value doesn't exist, * use the input default_value */ - std::string get(const std::string &key, const std::string &default_value, - const std::string §ion = DEFAULT_SECTION); + std::string get( + const std::string &key, const std::string &default_value, const std::string §ion = DEFAULT_SECTION); /** * put the key-value pair to the section * if the key-value already exist, just replace it * if the section doesn't exist, it will create this section */ - int put(const std::string &key, const std::string &value, - const std::string §ion = DEFAULT_SECTION); + int put(const std::string &key, const std::string &value, const std::string §ion = DEFAULT_SECTION); /** * output all configuration to one string @@ -92,7 +90,7 @@ class Ini { static const char CFG_SESSION_START_TAG = '['; static const char CFG_SESSION_END_TAG = ']'; - protected: +protected: /** * insert one empty session to sections_ */ @@ -102,21 +100,18 @@ class Ini { * switch session according to the session_name * if the section doesn't exist, it will create one */ - std::map * - switch_session(const std::string &session_name); + std::map *switch_session(const std::string &session_name); /** * insert one entry to session_map * line's format is "key=value" * */ - int insert_entry(std::map *session_map, - const std::string &line); + int insert_entry(std::map *session_map, const std::string &line); - typedef std::map> - SessionsMap; + typedef std::map> SessionsMap; - private: +private: static const std::map empty_map_; std::set file_names_; @@ -129,5 +124,5 @@ class Ini { Ini *&get_properties(); //******************************************************************** -}// namespace common -#endif //__COMMON_CONF_INI_H__ +} // namespace common +#endif //__COMMON_CONF_INI_H__ diff --git a/deps/common/defs.h b/deps/common/defs.h index 18a66de99e24373dc20b31eb68818ee345768de0..fc416556bc001bb282a9f4b5c5098f9a411650f2 100644 --- a/deps/common/defs.h +++ b/deps/common/defs.h @@ -36,7 +36,8 @@ namespace common { #endif -inline const std::string &theSwVersion() { +inline const std::string &theSwVersion() +{ static const std::string swVersion(VERSION_STR); return swVersion; @@ -44,24 +45,24 @@ inline const std::string &theSwVersion() { enum { // General Error Codes - STATUS_SUCCESS = 0, //!< Success status should be zero, - STATUS_INVALID_PARAM, //!< Invalid parameter - STATUS_FAILED_INIT, //!< Failed to init program - STATUS_PROPERTY_ERR, //!< Property error - STATUS_INIT_LOG, //!< log error - STATUS_INIT_THREAD, //!< failed to init thread - STATUS_FAILED_JOB, //!< Failed to do job - STATUS_FAILED_NETWORK, //!< Network failure + STATUS_SUCCESS = 0, //!< Success status should be zero, + STATUS_INVALID_PARAM, //!< Invalid parameter + STATUS_FAILED_INIT, //!< Failed to init program + STATUS_PROPERTY_ERR, //!< Property error + STATUS_INIT_LOG, //!< log error + STATUS_INIT_THREAD, //!< failed to init thread + STATUS_FAILED_JOB, //!< Failed to do job + STATUS_FAILED_NETWORK, //!< Network failure STATUS_UNKNOW_ERROR, - STATUS_LAST_ERR //!< last error code + STATUS_LAST_ERR //!< last error code }; const unsigned int ONE_KILO = 1024; const unsigned int ONE_MILLION = ONE_KILO * ONE_KILO; const unsigned int ONE_GIGA = ONE_MILLION * ONE_KILO; -const unsigned int FILENAME_LENGTH_MAX = 256; // the max filename length +const unsigned int FILENAME_LENGTH_MAX = 256; // the max filename length static const char FILE_PATH_SPLIT = '/'; static const char FILE_PATH_SPLIT_STR[] = "/"; @@ -82,5 +83,5 @@ typedef long long s64_t; #define LOCAL_HOST "localhost" -} // namespace common -#endif //__COMMON_DEFS_H__ +} // namespace common +#endif //__COMMON_DEFS_H__ diff --git a/deps/common/io/io.cpp b/deps/common/io/io.cpp index 459a972b849ff00956506f9acf07e9682bca4f20..e256c19cab1e2f9c972ec46654f4187fd1f00536 100644 --- a/deps/common/io/io.cpp +++ b/deps/common/io/io.cpp @@ -27,13 +27,11 @@ See the Mulan PSL v2 for more details. */ namespace common { - -int readFromFile(const std::string &fileName, char *&outputData, - size_t &fileSize) { +int readFromFile(const std::string &fileName, char *&outputData, size_t &fileSize) +{ FILE *file = fopen(fileName.c_str(), "rb"); if (file == NULL) { - std::cerr << "Failed to open file " << fileName << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to open file " << fileName << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return -1; } @@ -50,8 +48,7 @@ int readFromFile(const std::string &fileName, char *&outputData, memset(buffer, 0, sizeof(buffer)); oneRead = fread(buffer, 1, sizeof(buffer), file); if (ferror(file)) { - std::cerr << "Failed to read data" << fileName << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to read data" << fileName << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; fclose(file); if (data != NULL) { lfree(data); @@ -62,8 +59,7 @@ int readFromFile(const std::string &fileName, char *&outputData, data = (char *)lrealloc(data, readSize + oneRead); if (data == NULL) { - std::cerr << "Failed to alloc memory for " << fileName - << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to alloc memory for " << fileName << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; lfree(data); fclose(file); return -1; @@ -81,12 +77,11 @@ int readFromFile(const std::string &fileName, char *&outputData, return 0; } -int writeToFile(const std::string &fileName, const char *data, u32_t dataSize, - const char *openMode) { +int writeToFile(const std::string &fileName, const char *data, u32_t dataSize, const char *openMode) +{ FILE *file = fopen(fileName.c_str(), openMode); if (file == NULL) { - std::cerr << "Failed to open file " << fileName << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to open file " << fileName << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return -1; } @@ -95,8 +90,7 @@ int writeToFile(const std::string &fileName, const char *data, u32_t dataSize, while (leftSize > 0) { int writeCount = fwrite(buffer, 1, leftSize, file); if (writeCount <= 0) { - std::cerr << "Failed to open file " << fileName << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to open file " << fileName << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; fclose(file); return -1; } else { @@ -110,7 +104,8 @@ int writeToFile(const std::string &fileName, const char *data, u32_t dataSize, return 0; } -int getFileLines(const std::string &fileName, u64_t &lineNum) { +int getFileLines(const std::string &fileName, u64_t &lineNum) +{ lineNum = 0; char line[4 * ONE_KILO] = {0}; @@ -133,14 +128,13 @@ int getFileLines(const std::string &fileName, u64_t &lineNum) { return 0; } -int getFileNum(u64_t &fileNum, const std::string &path, - const std::string &pattern, bool recursive) { +int getFileNum(u64_t &fileNum, const std::string &path, const std::string &pattern, bool recursive) +{ try { DIR *dirp = NULL; dirp = opendir(path.c_str()); if (dirp == NULL) { - std::cerr << "Failed to opendir " << path << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to opendir " << path << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return -1; } @@ -160,8 +154,7 @@ int getFileNum(u64_t &fileNum, const std::string &path, fullPath += entry->d_name; memset(&fs, 0, sizeof(fs)); if (stat(fullPath.c_str(), &fs) < 0) { - std::cout << "Failed to stat " << fullPath << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cout << "Failed to stat " << fullPath << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; continue; } @@ -181,8 +174,7 @@ int getFileNum(u64_t &fileNum, const std::string &path, continue; } - if (pattern.empty() == false && - regex_match(entry->d_name, pattern.c_str())) { + if (pattern.empty() == false && regex_match(entry->d_name, pattern.c_str())) { // Don't match continue; } @@ -194,20 +186,18 @@ int getFileNum(u64_t &fileNum, const std::string &path, return 0; } catch (...) { - std::cerr << "Failed to get file num " << path << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to get file num " << path << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; } return -1; } -int getFileList(std::vector &fileList, const std::string &path, - const std::string &pattern, bool recursive) { +int getFileList(std::vector &fileList, const std::string &path, const std::string &pattern, bool recursive) +{ try { DIR *dirp = NULL; dirp = opendir(path.c_str()); if (dirp == NULL) { - std::cerr << "Failed to opendir " << path << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to opendir " << path << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return -1; } @@ -227,8 +217,7 @@ int getFileList(std::vector &fileList, const std::string &path, fullPath += entry->d_name; memset(&fs, 0, sizeof(fs)); if (stat(fullPath.c_str(), &fs) < 0) { - std::cout << "Failed to stat " << fullPath << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cout << "Failed to stat " << fullPath << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; continue; } @@ -248,8 +237,7 @@ int getFileList(std::vector &fileList, const std::string &path, continue; } - if (pattern.empty() == false && - regex_match(entry->d_name, pattern.c_str())) { + if (pattern.empty() == false && regex_match(entry->d_name, pattern.c_str())) { // Don't match continue; } @@ -260,20 +248,18 @@ int getFileList(std::vector &fileList, const std::string &path, closedir(dirp); return 0; } catch (...) { - std::cerr << "Failed to get file list " << path << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to get file list " << path << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; } return -1; } -int getDirList(std::vector &dirList, const std::string &path, - const std::string &pattern) { +int getDirList(std::vector &dirList, const std::string &path, const std::string &pattern) +{ try { DIR *dirp = NULL; dirp = opendir(path.c_str()); if (dirp == NULL) { - std::cerr << "Failed to opendir " << path << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to opendir " << path << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return -1; } @@ -293,8 +279,7 @@ int getDirList(std::vector &dirList, const std::string &path, fullPath += entry->d_name; memset(&fs, 0, sizeof(fs)); if (stat(fullPath.c_str(), &fs) < 0) { - std::cout << "Failed to stat " << fullPath << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cout << "Failed to stat " << fullPath << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; continue; } @@ -302,8 +287,7 @@ int getDirList(std::vector &dirList, const std::string &path, continue; } - if (pattern.empty() == false && - regex_match(entry->d_name, pattern.c_str())) { + if (pattern.empty() == false && regex_match(entry->d_name, pattern.c_str())) { // Don't match continue; } @@ -314,15 +298,15 @@ int getDirList(std::vector &dirList, const std::string &path, closedir(dirp); return 0; } catch (...) { - std::cerr << "Failed to get file list " << path << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to get file list " << path << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; } return -1; } -int touch(const std::string &path) { - // CWE367: A check occurs on a file's attributes before - // the file is used in a privileged operation, but things +int touch(const std::string &path) +{ + // CWE367: A check occurs on a file's attributes before + // the file is used in a privileged operation, but things // may have changed // struct stat fs; @@ -341,7 +325,8 @@ int touch(const std::string &path) { return 0; } -int getFileSize(const char *filePath, u64_t &fileLen) { +int getFileSize(const char *filePath, u64_t &fileLen) +{ if (filePath == NULL || *filePath == '\0') { std::cerr << "invalid filepath" << std::endl; return -EINVAL; @@ -351,8 +336,7 @@ int getFileSize(const char *filePath, u64_t &fileLen) { int rc = stat(filePath, &statBuf); if (rc) { - std::cerr << "Failed to get stat of " << filePath << "," << errno << ":" - << strerror(errno) << std::endl; + std::cerr << "Failed to get stat of " << filePath << "," << errno << ":" << strerror(errno) << std::endl; return rc; } @@ -365,4 +349,4 @@ int getFileSize(const char *filePath, u64_t &fileLen) { return 0; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/io/io.h b/deps/common/io/io.h index 37e82d2915511fa3daf34a2dd411e531203c96f3..e4f01702ddd8c06dee5eef7f41bd735c78569e07 100644 --- a/deps/common/io/io.h +++ b/deps/common/io/io.h @@ -29,8 +29,7 @@ namespace common { */ int readFromFile(const std::string &fileName, char *&data, size_t &fileSize); -int writeToFile(const std::string &fileName, const char *data, u32_t dataSize, - const char *openMode); +int writeToFile(const std::string &fileName, const char *data, u32_t dataSize, const char *openMode); /** * return the line number which line.strip() isn't empty @@ -47,12 +46,10 @@ int getFileLines(const std::string &fileName, u64_t &lineNum); * @param[in] resursion if this has been set, it will search subdirs * @return 0 if success, error code otherwise */ -int getFileList(std::vector &fileList, const std::string &path, - const std::string &pattern, bool recursive); -int getFileNum(u64_t &fileNum, const std::string &path, - const std::string &pattern, bool recursive); -int getDirList(std::vector &dirList, const std::string &path, - const std::string &pattern); +int getFileList( + std::vector &fileList, const std::string &path, const std::string &pattern, bool recursive); +int getFileNum(u64_t &fileNum, const std::string &path, const std::string &pattern, bool recursive); +int getDirList(std::vector &dirList, const std::string &path, const std::string &pattern); int touch(const std::string &fileName); @@ -61,5 +58,5 @@ int touch(const std::string &fileName); */ int getFileSize(const char *filePath, u64_t &fileLen); -} //namespace common +} // namespace common #endif /* __COMMON_IO_IO_H__ */ diff --git a/deps/common/io/roll_select_dir.cpp b/deps/common/io/roll_select_dir.cpp index 64b642f40d3d4cf44daa1f7bb8c953589e5cf055..0390c5c2e59dd66dd632cf2949fb10aef4637ce6 100644 --- a/deps/common/io/roll_select_dir.cpp +++ b/deps/common/io/roll_select_dir.cpp @@ -17,7 +17,8 @@ See the Mulan PSL v2 for more details. */ #include "common/log/log.h" namespace common { -void RollSelectDir::setBaseDir(std::string baseDir) { +void RollSelectDir::setBaseDir(std::string baseDir) +{ mBaseDir = baseDir; std::vector dirList; @@ -44,7 +45,8 @@ void RollSelectDir::setBaseDir(std::string baseDir) { return; } -std::string RollSelectDir::select() { +std::string RollSelectDir::select() +{ std::string ret; MUTEX_LOCK(&mMutex); @@ -55,4 +57,4 @@ std::string RollSelectDir::select() { return ret; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/io/roll_select_dir.h b/deps/common/io/roll_select_dir.h index 81cffc84a63af4d344063f8851d70e07d3a25610..127bd90f2799a34fae0641e5bf95d4ea574dbbd8 100644 --- a/deps/common/io/roll_select_dir.h +++ b/deps/common/io/roll_select_dir.h @@ -26,8 +26,14 @@ namespace common { class RollSelectDir : public SelectDir { public: - RollSelectDir() { MUTEX_INIT(&mMutex, NULL); } - ~RollSelectDir() { MUTEX_DESTROY(&mMutex); } + RollSelectDir() + { + MUTEX_INIT(&mMutex, NULL); + } + ~RollSelectDir() + { + MUTEX_DESTROY(&mMutex); + } public: /** @@ -43,5 +49,5 @@ public: u32_t mPos; }; -} //namespace common +} // namespace common #endif /* __COMMON_IO_ROLL_SELECT_DIR__ */ diff --git a/deps/common/io/select_dir.h b/deps/common/io/select_dir.h index 2453c07cc050d1715f07f5df93fb621eb3f5dfd1..db6f659af671e941f75f6ac2d3decd33a5becbcd 100644 --- a/deps/common/io/select_dir.h +++ b/deps/common/io/select_dir.h @@ -20,9 +20,12 @@ namespace common { class SelectDir { public: - virtual std::string select() { return std::string(""); }; + virtual std::string select() + { + return std::string(""); + }; virtual void setBaseDir(std::string baseDir){}; }; -} //namespace common +} // namespace common #endif /* __COMMON_IO_SELECT_DIR_H__ */ diff --git a/deps/common/lang/bitmap.cpp b/deps/common/lang/bitmap.cpp index c25cb50721538680f75eadb06433eaf78190bc48..ba00a7a6457cf2a574a76012010d2cfb09d708eb 100644 --- a/deps/common/lang/bitmap.cpp +++ b/deps/common/lang/bitmap.cpp @@ -14,10 +14,10 @@ See the Mulan PSL v2 for more details. */ #include "common/lang/bitmap.h" -namespace common -{ +namespace common { -int find_first_zero(char byte, int start) { +int find_first_zero(char byte, int start) +{ for (int i = start; i < 8; i++) { if ((byte & (1 << i)) == 0) { return i; @@ -26,7 +26,8 @@ int find_first_zero(char byte, int start) { return -1; } -int find_first_setted(char byte, int start) { +int find_first_setted(char byte, int start) +{ for (int i = start; i < 8; i++) { if ((byte & (1 << i)) != 0) { return i; @@ -35,25 +36,29 @@ int find_first_setted(char byte, int start) { return -1; } -Bitmap::Bitmap(char *bitmap, int size) : bitmap_(bitmap), size_(size) { -} +Bitmap::Bitmap(char *bitmap, int size) : bitmap_(bitmap), size_(size) +{} -bool Bitmap::get_bit(int index) { +bool Bitmap::get_bit(int index) +{ char bits = bitmap_[index / 8]; return (bits & (1 << (index % 8))) != 0; } -void Bitmap::set_bit(int index) { +void Bitmap::set_bit(int index) +{ char &bits = bitmap_[index / 8]; bits |= (1 << (index % 8)); } -void Bitmap::clear_bit(int index) { +void Bitmap::clear_bit(int index) +{ char &bits = bitmap_[index / 8]; bits &= ~(1 << (index % 8)); } -int Bitmap::next_unsetted_bit(int start) { +int Bitmap::next_unsetted_bit(int start) +{ int ret = -1; int start_in_byte = start % 8; for (int iter = start / 8, end = (size_ % 8 == 0 ? size_ / 8 : size_ / 8 + 1); iter <= end; iter++) { @@ -75,7 +80,8 @@ int Bitmap::next_unsetted_bit(int start) { return ret; } -int Bitmap::next_setted_bit(int start) { +int Bitmap::next_setted_bit(int start) +{ int ret = -1; int start_in_byte = start % 8; for (int iter = start / 8, end = (size_ % 8 == 0 ? size_ / 8 : size_ / 8 + 1); iter <= end; iter++) { @@ -97,4 +103,4 @@ int Bitmap::next_setted_bit(int start) { return ret; } -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/lang/bitmap.h b/deps/common/lang/bitmap.h index 00e57b34865680e87bbe4e9387e19177485590d7..0563f2c1bf77f0979ab5214d0c4399e6e92b6b27 100644 --- a/deps/common/lang/bitmap.h +++ b/deps/common/lang/bitmap.h @@ -15,8 +15,7 @@ See the Mulan PSL v2 for more details. */ #ifndef __COMMON_LANG_BITMAP_H__ #define __COMMON_LANG_BITMAP_H__ -namespace common -{ +namespace common { class Bitmap { public: @@ -26,14 +25,14 @@ public: void set_bit(int index); void clear_bit(int index); - int next_unsetted_bit(int start); - int next_setted_bit(int start); + int next_unsetted_bit(int start); + int next_setted_bit(int start); private: - char * bitmap_; - int size_; + char *bitmap_; + int size_; }; -} // namespace common +} // namespace common -#endif // __COMMON_LANG_BITMAP_H__ \ No newline at end of file +#endif // __COMMON_LANG_BITMAP_H__ \ No newline at end of file diff --git a/deps/common/lang/mutex.cpp b/deps/common/lang/mutex.cpp index 4fa2e8a8f1b3f395372284604d15e2b58f5de555..8855f53456a40d5c81e68610e7e50c20d9ac0e91 100644 --- a/deps/common/lang/mutex.cpp +++ b/deps/common/lang/mutex.cpp @@ -27,37 +27,48 @@ int LockTrace::mMaxBlockTids = 8; #define CHECK_UNLOCK 0 -void LockTrace::foundDeadLock(LockID ¤t, LockTrace::LockID &other, - pthread_mutex_t *otherWaitMutex) { - std::map::iterator itLocks = - mLocks.find(otherWaitMutex); +void LockTrace::foundDeadLock(LockID ¤t, LockTrace::LockID &other, pthread_mutex_t *otherWaitMutex) +{ + std::map::iterator itLocks = mLocks.find(otherWaitMutex); if (itLocks == mLocks.end()) { LOG_ERROR("Thread %ld own mutex %p and try to get mutex %s:%d, " "other thread %ld own mutex %s:%d and try to get %p", - current.mThreadId, otherWaitMutex, current.mFile.c_str(), - current.mLine, other.mThreadId, current.mFile.c_str(), - current.mLine, otherWaitMutex); + current.mThreadId, + otherWaitMutex, + current.mFile.c_str(), + current.mLine, + other.mThreadId, + current.mFile.c_str(), + current.mLine, + otherWaitMutex); } else { LockTrace::LockID &otherRecusive = itLocks->second; LOG_ERROR("Thread %ld own mutex %p:%s:%d and try to get mutex %s:%d, " "other thread %ld own mutex %s:%d and try to get %p:%s:%d", - current.mThreadId, otherWaitMutex, otherRecusive.mFile.c_str(), - otherRecusive.mLine, current.mFile.c_str(), current.mLine, - other.mThreadId, current.mFile.c_str(), current.mLine, - otherWaitMutex, otherRecusive.mFile.c_str(), otherRecusive.mLine); + current.mThreadId, + otherWaitMutex, + otherRecusive.mFile.c_str(), + otherRecusive.mLine, + current.mFile.c_str(), + current.mLine, + other.mThreadId, + current.mFile.c_str(), + current.mLine, + otherWaitMutex, + otherRecusive.mFile.c_str(), + otherRecusive.mLine); } } -bool LockTrace::deadlockCheck(LockID ¤t, - std::set &ownMutexs, - LockTrace::LockID &other, int recusiveNum) { +bool LockTrace::deadlockCheck( + LockID ¤t, std::set &ownMutexs, LockTrace::LockID &other, int recusiveNum) +{ if (recusiveNum >= mMaxBlockTids) { return false; } - std::map::iterator otherIt = - mWaitLocks.find(other.mThreadId); + std::map::iterator otherIt = mWaitLocks.find(other.mThreadId); if (otherIt == mWaitLocks.end()) { return false; } @@ -69,8 +80,7 @@ bool LockTrace::deadlockCheck(LockID ¤t, return true; } - std::map::iterator itLocks = - mLocks.find(otherWaitMutex); + std::map::iterator itLocks = mLocks.find(otherWaitMutex); if (itLocks == mLocks.end()) { return false; } @@ -79,12 +89,11 @@ bool LockTrace::deadlockCheck(LockID ¤t, return deadlockCheck(current, ownMutexs, otherRecusive, recusiveNum + 1); } -bool LockTrace::deadlockCheck(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line) { +bool LockTrace::deadlockCheck(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line) +{ mWaitLocks[threadId] = mutex; - std::map::iterator itLocks = - mLocks.find(mutex); + std::map::iterator itLocks = mLocks.find(mutex); if (itLocks == mLocks.end()) { return false; } @@ -101,8 +110,7 @@ bool LockTrace::deadlockCheck(pthread_mutex_t *mutex, const long long threadId, } } - std::map>::iterator it = - mOwnLocks.find(threadId); + std::map>::iterator it = mOwnLocks.find(threadId); if (it == mOwnLocks.end()) { return false; } @@ -115,8 +123,8 @@ bool LockTrace::deadlockCheck(pthread_mutex_t *mutex, const long long threadId, return deadlockCheck(current, ownMutexs, other, 1); } -bool LockTrace::checkLockTimes(pthread_mutex_t *mutex, const char *file, - const int line) { +bool LockTrace::checkLockTimes(pthread_mutex_t *mutex, const char *file, const int line) +{ std::map::iterator it = mWaitTimes.find(mutex); if (it == mWaitTimes.end()) { mWaitTimes.insert(std::pair(mutex, 1)); @@ -132,8 +140,13 @@ bool LockTrace::checkLockTimes(pthread_mutex_t *mutex, const char *file, LockTrace::LockID &lockId = mLocks[mutex]; LOG_WARN("mutex %p has been already lock %d times, this time %s:%d, first " "time:%ld:%s:%d", - mutex, lockTimes, file, line, lockId.mThreadId, - lockId.mFile.c_str(), lockId.mLine); + mutex, + lockTimes, + file, + line, + lockId.mThreadId, + lockId.mFile.c_str(), + lockId.mLine); return true; } else { @@ -141,8 +154,8 @@ bool LockTrace::checkLockTimes(pthread_mutex_t *mutex, const char *file, } } -void LockTrace::check(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line) { +void LockTrace::check(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line) +{ MUTEX_LOG("Lock mutex %p, %s:%d", mutex, file, line); pthread_rwlock_rdlock(&mMapMutex); @@ -153,8 +166,8 @@ void LockTrace::check(pthread_mutex_t *mutex, const long long threadId, pthread_rwlock_unlock(&mMapMutex); } -void LockTrace::insertLock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line) { +void LockTrace::insertLock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line) +{ LockID lockID(threadId, file, line); mLocks.insert(std::pair(mutex, lockID)); @@ -174,16 +187,16 @@ void LockTrace::insertLock(pthread_mutex_t *mutex, const long long threadId, } } -void LockTrace::lock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line) { +void LockTrace::lock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line) +{ pthread_rwlock_wrlock(&mMapMutex); insertLock(mutex, threadId, file, line); pthread_rwlock_unlock(&mMapMutex); } -void LockTrace::tryLock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line) { +void LockTrace::tryLock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line) +{ pthread_rwlock_wrlock(&mMapMutex); if (mLocks.find(mutex) != mLocks.end()) { pthread_rwlock_unlock(&mMapMutex); @@ -194,8 +207,8 @@ void LockTrace::tryLock(pthread_mutex_t *mutex, const long long threadId, pthread_rwlock_unlock(&mMapMutex); } -void LockTrace::unlock(pthread_mutex_t *mutex, long long threadId, - const char *file, int line) { +void LockTrace::unlock(pthread_mutex_t *mutex, long long threadId, const char *file, int line) +{ pthread_rwlock_wrlock(&mMapMutex); mLocks.erase(mutex); @@ -206,13 +219,13 @@ void LockTrace::unlock(pthread_mutex_t *mutex, long long threadId, pthread_rwlock_unlock(&mMapMutex); } -void LockTrace::toString(std::string &result) { +void LockTrace::toString(std::string &result) +{ const int TEMP_PAIR_LEN = 24; // pthread_mutex_lock(&mMapMutex); result = " mLocks:\n"; - for (std::map::iterator it = mLocks.begin(); - it != mLocks.end(); it++) { + for (std::map::iterator it = mLocks.begin(); it != mLocks.end(); it++) { result += it->second.toString(); char pointerBuf[TEMP_PAIR_LEN] = {0}; @@ -222,22 +235,21 @@ void LockTrace::toString(std::string &result) { } result += "mWaitTimes:\n"; - for (std::map::iterator it = mWaitTimes.begin(); - it != mWaitTimes.end(); it++) { + for (std::map::iterator it = mWaitTimes.begin(); it != mWaitTimes.end(); it++) { char pointerBuf[TEMP_PAIR_LEN] = {0}; - snprintf(pointerBuf, TEMP_PAIR_LEN, ",mutex:%p, times:%d\n", it->first, - it->second); + snprintf(pointerBuf, TEMP_PAIR_LEN, ",mutex:%p, times:%d\n", it->first, it->second); result += pointerBuf; } result += "mWaitLocks:\n"; - for (std::map::iterator it = mWaitLocks.begin(); - it != mWaitLocks.end(); it++) { + for (std::map::iterator it = mWaitLocks.begin(); it != mWaitLocks.end(); it++) { char pointerBuf[TEMP_PAIR_LEN] = {0}; - snprintf(pointerBuf, TEMP_PAIR_LEN, - "threadID: %llx" - ", mutex:%p\n", - it->first, it->second); + snprintf(pointerBuf, + TEMP_PAIR_LEN, + "threadID: %llx" + ", mutex:%p\n", + it->first, + it->second); result += pointerBuf; } // pthread_mutex_unlock(&mMapMutex); @@ -246,4 +258,4 @@ void LockTrace::toString(std::string &result) { return; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/lang/mutex.h b/deps/common/lang/mutex.h index a678e837d08972d9ce61023ae41cccbaf96f218c..3f3944abe44561e98eb15a816a50c7a04e34b12b 100644 --- a/deps/common/lang/mutex.h +++ b/deps/common/lang/mutex.h @@ -31,61 +31,55 @@ namespace common { #define MUTEX_LOG LOG_DEBUG class LockTrace { - public: - static void check(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line); - static void lock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line); - static void tryLock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line); - static void unlock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line); +public: + static void check(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line); + static void lock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line); + static void tryLock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line); + static void unlock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line); static void toString(std::string &result); class LockID { - public: - LockID(const long long threadId, const char *file, const int line) - : mFile(file), mThreadId(threadId), mLine(line) {} - LockID() : mFile(), mThreadId(0), mLine(0) {} - - std::string toString() { + public: + LockID(const long long threadId, const char *file, const int line) : mFile(file), mThreadId(threadId), mLine(line) + {} + LockID() : mFile(), mThreadId(0), mLine(0) + {} + + std::string toString() + { std::ostringstream oss; - oss << "threaId:" << mThreadId << ",file name:" << mFile - << ",line:" << mLine; + oss << "threaId:" << mThreadId << ",file name:" << mFile << ",line:" << mLine; return oss.str(); } - public: + public: std::string mFile; const long long mThreadId; int mLine; }; - static void foundDeadLock(LockID ¤t, LockID &other, - pthread_mutex_t *otherWaitMutex); + static void foundDeadLock(LockID ¤t, LockID &other, pthread_mutex_t *otherWaitMutex); - static bool deadlockCheck(LockID ¤t, - std::set &ownMutexs, - LockID &other, int recusiveNum); + static bool deadlockCheck(LockID ¤t, std::set &ownMutexs, LockID &other, int recusiveNum); - static bool deadlockCheck(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line); + static bool deadlockCheck(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line); - static bool checkLockTimes(pthread_mutex_t *mutex, const char *file, - const int line); + static bool checkLockTimes(pthread_mutex_t *mutex, const char *file, const int line); - static void insertLock(pthread_mutex_t *mutex, const long long threadId, - const char *file, const int line); + static void insertLock(pthread_mutex_t *mutex, const long long threadId, const char *file, const int line); - static void setMaxBlockThreads(int blockNum) { mMaxBlockTids = blockNum; } + static void setMaxBlockThreads(int blockNum) + { + mMaxBlockTids = blockNum; + } - public: +public: static std::set mEnableRecurisives; - protected: +protected: static std::map mLocks; static std::map mWaitTimes; static std::map mWaitLocks; @@ -108,57 +102,55 @@ class LockTrace { #define COND_INIT(cond, attr) pthread_cond_init(cond, attr) #define COND_DESTROY(cond) pthread_cond_destroy(cond) #define COND_WAIT(cond, mutex) pthread_cond_wait(cond, mutex) -#define COND_WAIT_TIMEOUT(cond, mutex, time, ret) \ - ret = pthread_cond_timedwait(cond, mutex, time) +#define COND_WAIT_TIMEOUT(cond, mutex, time, ret) ret = pthread_cond_timedwait(cond, mutex, time) #define COND_SIGNAL(cond) pthread_cond_signal(cond) #define COND_BRAODCAST(cond) pthread_cond_broadcast(cond) -#else // DEBUG_LOCK - +#else // DEBUG_LOCK -#define MUTEX_STATIC_INIT() \ - PTHREAD_MUTEX_INITIALIZER; \ +#define MUTEX_STATIC_INIT() \ + PTHREAD_MUTEX_INITIALIZER; \ LOG_INFO("PTHREAD_MUTEX_INITIALIZER"); #if defined(__MACH__) -#define MUTEX_INIT(lock, attr) \ - ({ \ - LOG_INFO("pthread_mutex_init %p", lock); \ - if (attr != NULL) { \ - int type; \ - pthread_mutexattr_gettype(attr, &type); \ - if (type == PTHREAD_MUTEX_RECURSIVE) { \ - LockTrace::mEnableRecurisives.insert(lock); \ - } \ - } \ - int result = pthread_mutex_init(lock, attr); \ - result; \ +#define MUTEX_INIT(lock, attr) \ + ({ \ + LOG_INFO("pthread_mutex_init %p", lock); \ + if (attr != NULL) { \ + int type; \ + pthread_mutexattr_gettype(attr, &type); \ + if (type == PTHREAD_MUTEX_RECURSIVE) { \ + LockTrace::mEnableRecurisives.insert(lock); \ + } \ + } \ + int result = pthread_mutex_init(lock, attr); \ + result; \ }) #else -#define MUTEX_INIT(lock, attr) \ - ({ \ - LOG_INFO("pthread_mutex_init %p", lock); \ - if (attr != NULL) { \ - int type; \ - pthread_mutexattr_gettype(attr, &type); \ - if (type == PTHREAD_MUTEX_RECURSIVE_NP) { \ - LockTrace::mEnableRecurisives.insert(lock); \ - } \ - } \ - int result = pthread_mutex_init(lock, attr); \ - result; \ +#define MUTEX_INIT(lock, attr) \ + ({ \ + LOG_INFO("pthread_mutex_init %p", lock); \ + if (attr != NULL) { \ + int type; \ + pthread_mutexattr_gettype(attr, &type); \ + if (type == PTHREAD_MUTEX_RECURSIVE_NP) { \ + LockTrace::mEnableRecurisives.insert(lock); \ + } \ + } \ + int result = pthread_mutex_init(lock, attr); \ + result; \ }) #endif -#define MUTEX_DESTROY(lock) \ - ({ \ - LockTrace::mEnableRecurisives.erase(lock); \ - int result = pthread_mutex_destroy(lock); \ - LOG_INFO("pthread_mutex_destroy %p", lock); \ - result; \ +#define MUTEX_DESTROY(lock) \ + ({ \ + LockTrace::mEnableRecurisives.erase(lock); \ + int result = pthread_mutex_destroy(lock); \ + LOG_INFO("pthread_mutex_destroy %p", lock); \ + result; \ }) #define MUTEX_LOCK(mutex) \ @@ -172,81 +164,80 @@ class LockTrace { result; \ }) -#define MUTEX_TRYLOCK(mutex) \ - ({ \ - LockTrace::check(mutex, gettid(), __FILE__, __LINE__); \ - int result = pthread_mutex_trylock(mutex); \ - if (result == 0) { \ - LockTrace::lock(mutex, gettid(), __FILE__, __LINE__); \ - } \ - result; \ +#define MUTEX_TRYLOCK(mutex) \ + ({ \ + LockTrace::check(mutex, gettid(), __FILE__, __LINE__); \ + int result = pthread_mutex_trylock(mutex); \ + if (result == 0) { \ + LockTrace::lock(mutex, gettid(), __FILE__, __LINE__); \ + } \ + result; \ }) -#define MUTEX_UNLOCK(lock) \ - ({ \ - int result = pthread_mutex_unlock(lock); \ - LockTrace::unlock(lock, gettid(), __FILE__, __LINE__); \ - MUTEX_LOG("mutex:%p has been ulocked", lock); \ - if (result) { \ - LOG_ERROR("Failed to unlock %p, rc %d:%s", lock, errno, \ - strerror(errno)); \ - } \ - result; \ +#define MUTEX_UNLOCK(lock) \ + ({ \ + int result = pthread_mutex_unlock(lock); \ + LockTrace::unlock(lock, gettid(), __FILE__, __LINE__); \ + MUTEX_LOG("mutex:%p has been ulocked", lock); \ + if (result) { \ + LOG_ERROR("Failed to unlock %p, rc %d:%s", lock, errno, strerror(errno)); \ + } \ + result; \ }) -#define COND_INIT(cond, attr) \ - ({ \ - LOG_INFO("pthread_cond_init"); \ - int result = pthread_cond_init(cond, attr); \ - result; \ +#define COND_INIT(cond, attr) \ + ({ \ + LOG_INFO("pthread_cond_init"); \ + int result = pthread_cond_init(cond, attr); \ + result; \ }) -#define COND_DESTROY(cond) \ - ({ \ - int result = pthread_cond_destroy(cond); \ - LOG_INFO("pthread_cond_destroy"); \ - result; \ +#define COND_DESTROY(cond) \ + ({ \ + int result = pthread_cond_destroy(cond); \ + LOG_INFO("pthread_cond_destroy"); \ + result; \ }) -#define COND_WAIT(cond, mutex) \ - ({ \ - MUTEX_LOG("pthread_cond_wait, cond:%p, mutex:%p", cond, mutex); \ - LockTrace::unlock(mutex, gettid(), __FILE__, __LINE__); \ - int result = pthread_cond_wait(cond, mutex); \ - LockTrace::check(mutex, gettid(), __FILE__, __LINE__); \ - LockTrace::lock(mutex, gettid(), __FILE__, __LINE__); \ - MUTEX_LOG("Lock %p under pthread_cond_wait", mutex); \ - result; \ +#define COND_WAIT(cond, mutex) \ + ({ \ + MUTEX_LOG("pthread_cond_wait, cond:%p, mutex:%p", cond, mutex); \ + LockTrace::unlock(mutex, gettid(), __FILE__, __LINE__); \ + int result = pthread_cond_wait(cond, mutex); \ + LockTrace::check(mutex, gettid(), __FILE__, __LINE__); \ + LockTrace::lock(mutex, gettid(), __FILE__, __LINE__); \ + MUTEX_LOG("Lock %p under pthread_cond_wait", mutex); \ + result; \ }) -#define COND_WAIT_TIMEOUT(cond, mutex, time, ret) \ - ({ \ - MUTEX_LOG("pthread_cond_timedwait, cond:%p, mutex:%p", cond, mutex); \ - LockTrace::unlock(mutex, gettid(), __FILE__, __LINE__); \ - int result = pthread_cond_timedwait(cond, mutex, time); \ - if (result == 0) { \ - LockTrace::check(mutex, gettid(), __FILE__, __LINE__); \ - LockTrace::lock(mutex, gettid(), __FILE__, __LINE__); \ - MUTEX_LOG("Lock %p under pthread_cond_wait", mutex); \ - } \ - result; \ +#define COND_WAIT_TIMEOUT(cond, mutex, time, ret) \ + ({ \ + MUTEX_LOG("pthread_cond_timedwait, cond:%p, mutex:%p", cond, mutex); \ + LockTrace::unlock(mutex, gettid(), __FILE__, __LINE__); \ + int result = pthread_cond_timedwait(cond, mutex, time); \ + if (result == 0) { \ + LockTrace::check(mutex, gettid(), __FILE__, __LINE__); \ + LockTrace::lock(mutex, gettid(), __FILE__, __LINE__); \ + MUTEX_LOG("Lock %p under pthread_cond_wait", mutex); \ + } \ + result; \ }) -#define COND_SIGNAL(cond) \ - ({ \ - int result = pthread_cond_signal(cond); \ - MUTEX_LOG("pthread_cond_signal, cond:%p", cond); \ - result; \ +#define COND_SIGNAL(cond) \ + ({ \ + int result = pthread_cond_signal(cond); \ + MUTEX_LOG("pthread_cond_signal, cond:%p", cond); \ + result; \ }) -#define COND_BRAODCAST(cond) \ - ({ \ - int result = pthread_cond_broadcast(cond); \ - MUTEX_LOG("pthread_cond_broadcast, cond:%p", cond); \ - result; \ +#define COND_BRAODCAST(cond) \ + ({ \ + int result = pthread_cond_broadcast(cond); \ + MUTEX_LOG("pthread_cond_broadcast, cond:%p", cond); \ + result; \ }) -#endif // DEBUG_LOCK +#endif // DEBUG_LOCK -} //namespace common -#endif // __COMMON_LANG_MUTEX_H__ +} // namespace common +#endif // __COMMON_LANG_MUTEX_H__ diff --git a/deps/common/lang/serializable.h b/deps/common/lang/serializable.h index 35782268377d644d6400ea64076ec4901da8545c..a1333fa43ff9fc84c3b76a528c762e0df3aa56f2 100644 --- a/deps/common/lang/serializable.h +++ b/deps/common/lang/serializable.h @@ -21,11 +21,7 @@ namespace common { /** * Through this type to determine object type */ -enum { - MESSAGE_BASIC = 100, - MESSAGE_BASIC_REQUEST = 1000, - MESSAGE_BASIC_RESPONSE = -1000 -}; +enum { MESSAGE_BASIC = 100, MESSAGE_BASIC_REQUEST = 1000, MESSAGE_BASIC_RESPONSE = -1000 }; class Deserializable { public: @@ -68,5 +64,5 @@ public: virtual void to_string(std::string &output) const = 0; }; -} //namespace common +} // namespace common #endif /* __COMMON_LANG_SERIALIZABLE_H__ */ diff --git a/deps/common/lang/string.cpp b/deps/common/lang/string.cpp index fe9b462ab27df06cfa101a45923a39795ff25b52..b79310c5edad157505c4e21c1ccf420e60e375ef 100644 --- a/deps/common/lang/string.cpp +++ b/deps/common/lang/string.cpp @@ -28,8 +28,9 @@ See the Mulan PSL v2 for more details. */ #include "common/log/log.h" namespace common { -char *strip(char *str_) { - if (str_ == NULL || *str_ == 0){ +char *strip(char *str_) +{ + if (str_ == NULL || *str_ == 0) { LOG_ERROR("The augument is invalid!"); return str_; } @@ -45,7 +46,8 @@ char *strip(char *str_) { return head; } -void strip(std::string &str) { +void strip(std::string &str) +{ size_t head = 0; while (isspace(str[head])) { @@ -61,24 +63,27 @@ void strip(std::string &str) { } // Translation functions with templates are defined in the header file -std::string size_to_pad_str(int size, int pad) { +std::string size_to_pad_str(int size, int pad) +{ std::ostringstream ss; ss << std::setw(pad) << std::setfill('0') << size; return ss.str(); } -std::string &str_to_upper(std::string &s) { +std::string &str_to_upper(std::string &s) +{ std::transform(s.begin(), s.end(), s.begin(), (int (*)(int)) & std::toupper); return s; } -std::string &str_to_lower(std::string &s) { +std::string &str_to_lower(std::string &s) +{ std::transform(s.begin(), s.end(), s.begin(), (int (*)(int)) & std::tolower); return s; } -void split_string(const std::string &str, std::string delim, - std::set &results) { +void split_string(const std::string &str, std::string delim, std::set &results) +{ int cut_at; std::string tmp_str(str); while ((cut_at = tmp_str.find_first_of(delim)) != (signed)tmp_str.npos) { @@ -93,8 +98,8 @@ void split_string(const std::string &str, std::string delim, } } -void split_string(const std::string &str, std::string delim, - std::vector &results) { +void split_string(const std::string &str, std::string delim, std::vector &results) +{ int cut_at; std::string tmp_str(str); while ((cut_at = tmp_str.find_first_of(delim)) != (signed)tmp_str.npos) { @@ -109,8 +114,8 @@ void split_string(const std::string &str, std::string delim, } } -void split_string(char *str, char dim, std::vector &results, - bool keep_null) { +void split_string(char *str, char dim, std::vector &results, bool keep_null) +{ char *p = str; char *l = p; while (*p) { @@ -127,11 +132,11 @@ void split_string(char *str, char dim, std::vector &results, return; } -void merge_string(std::string &str, std::string delim, - std::vector &source, size_t result_len){ +void merge_string(std::string &str, std::string delim, std::vector &source, size_t result_len) +{ std::ostringstream ss; - if (source.empty() ) { + if (source.empty()) { str = ss.str(); return; } @@ -143,18 +148,17 @@ void merge_string(std::string &str, std::string delim, for (unsigned int i = 0; i < result_len; i++) { if (i == 0) { ss << source[i]; - }else { + } else { ss << delim << source[i]; } - } str = ss.str(); - return ; + return; } -void replace(std::string &str, const std::string &old, - const std::string &new_str) { +void replace(std::string &str, const std::string &old, const std::string &new_str) +{ if (old.compare(new_str) == 0) { return; } @@ -185,7 +189,8 @@ void replace(std::string &str, const std::string &old, return; } -char *bin_to_hex(const char *s, const int len, char *hex_buff) { +char *bin_to_hex(const char *s, const int len, char *hex_buff) +{ int new_len = 0; unsigned char *end = (unsigned char *)s + len; for (unsigned char *p = (unsigned char *)s; p < end; p++) { @@ -196,7 +201,8 @@ char *bin_to_hex(const char *s, const int len, char *hex_buff) { return hex_buff; } -char *hex_to_bin(const char *s, char *bin_buff, int *dest_len) { +char *hex_to_bin(const char *s, char *bin_buff, int *dest_len) +{ char buff[3]; char *src; int src_len; @@ -225,7 +231,8 @@ char *hex_to_bin(const char *s, char *bin_buff, int *dest_len) { return bin_buff; } -bool is_blank(const char *s) { +bool is_blank(const char *s) +{ if (s == nullptr) { return true; } @@ -238,4 +245,4 @@ bool is_blank(const char *s) { return true; } -} //namespace common +} // namespace common diff --git a/deps/common/lang/string.h b/deps/common/lang/string.h index cc39cbbfef00e8250b95b133dbb6360ed65b3c8e..aace2beadca6e6e49abc8f0e9f46b3e4d04b2341 100644 --- a/deps/common/lang/string.h +++ b/deps/common/lang/string.h @@ -65,20 +65,15 @@ std::string &str_to_lower(std::string &s); * @param[in] delims elimiter characters * @param[in,out] results ector containing the split up string */ -void split_string(const std::string &str, std::string delim, - std::set &results); -void split_string(const std::string &str, std::string delim, - std::vector &results); -void split_string(char *str, char dim, std::vector &results, - bool keep_null = false); - -void merge_string(std::string &str, std::string delim, - std::vector &result, size_t result_len = 0); +void split_string(const std::string &str, std::string delim, std::set &results); +void split_string(const std::string &str, std::string delim, std::vector &results); +void split_string(char *str, char dim, std::vector &results, bool keep_null = false); + +void merge_string(std::string &str, std::string delim, std::vector &result, size_t result_len = 0); /** * replace old with new in the string */ -void replace(std::string &str, const std::string &old, - const std::string &new_str); +void replace(std::string &str, const std::string &old, const std::string &new_str); /** * binary to hexadecimal @@ -102,8 +97,7 @@ char *hex_to_bin(const char *s, char *bin_buff, int *dest_len); * number, \c false otherwise */ template -bool str_to_val(const std::string &str, T &val, - std::ios_base &(*radix)(std::ios_base &) = std::dec); +bool str_to_val(const std::string &str, T &val, std::ios_base &(*radix)(std::ios_base &) = std::dec); /** * Convert a numeric value into its string representation @@ -116,17 +110,17 @@ bool str_to_val(const std::string &str, T &val, * (hexidecimal). */ template -void val_to_str(const T &val, std::string &str, - std::ios_base &(*radix)(std::ios_base &) = std::dec); +void val_to_str(const T &val, std::string &str, std::ios_base &(*radix)(std::ios_base &) = std::dec); /** * get type's name */ -template std::string get_type_name(const T &val); +template +std::string get_type_name(const T &val); template -bool str_to_val(const std::string &str, T &val, - std::ios_base &(*radix)(std::ios_base &)/* = std::dec */) { +bool str_to_val(const std::string &str, T &val, std::ios_base &(*radix)(std::ios_base &)/* = std::dec */) +{ bool success = true; std::istringstream is(str); if (!(is >> radix >> val)) { @@ -137,14 +131,16 @@ bool str_to_val(const std::string &str, T &val, } template -void val_to_str(const T &val, std::string &str, - std::ios_base &(*radix)(std::ios_base &)/* = std::dec */) { +void val_to_str(const T &val, std::string &str, std::ios_base &(*radix)(std::ios_base &)/* = std::dec */) +{ std::stringstream strm; strm << radix << val; str = strm.str(); } -template std::string get_type_name(const T &val) { +template +std::string get_type_name(const T &val) +{ int status = 0; char *stmp = abi::__cxa_demangle(typeid(val).name(), 0, 0, &status); if (!stmp) @@ -158,5 +154,5 @@ template std::string get_type_name(const T &val) { bool is_blank(const char *s); -} //namespace common -#endif // __COMMON_LANG_STRING_H__ +} // namespace common +#endif // __COMMON_LANG_STRING_H__ diff --git a/deps/common/log/log.cpp b/deps/common/log/log.cpp index 9937e42043b6c319ca4c26f516b45f648e70f056..1b41f6f1dffc5a304e3edbfcdba967977b38cb35 100644 --- a/deps/common/log/log.cpp +++ b/deps/common/log/log.cpp @@ -23,9 +23,9 @@ namespace common { Log *g_log = nullptr; -Log::Log(const std::string &log_file_name, const LOG_LEVEL log_level, - const LOG_LEVEL console_level) - : log_name_(log_file_name), log_level_(log_level), console_level_(console_level) { +Log::Log(const std::string &log_file_name, const LOG_LEVEL log_level, const LOG_LEVEL console_level) + : log_name_(log_file_name), log_level_(log_level), console_level_(console_level) +{ prefix_map_[LOG_LEVEL_PANIC] = "PANIC:"; prefix_map_[LOG_LEVEL_ERR] = "ERROR:"; prefix_map_[LOG_LEVEL_WARN] = "WARNNING:"; @@ -45,7 +45,8 @@ Log::Log(const std::string &log_file_name, const LOG_LEVEL log_level, check_param_valid(); } -Log::~Log(void) { +Log::~Log(void) +{ pthread_mutex_lock(&lock_); if (ofs_.is_open()) { ofs_.close(); @@ -55,7 +56,8 @@ Log::~Log(void) { pthread_mutex_destroy(&lock_); } -void Log::check_param_valid() { +void Log::check_param_valid() +{ assert(!log_name_.empty()); assert(LOG_LEVEL_PANIC <= log_level_ && log_level_ < LOG_LEVEL_LAST); assert(LOG_LEVEL_PANIC <= console_level_ && console_level_ < LOG_LEVEL_LAST); @@ -63,7 +65,8 @@ void Log::check_param_valid() { return; } -bool Log::check_output(const LOG_LEVEL level, const char *module) { +bool Log::check_output(const LOG_LEVEL level, const char *module) +{ if (LOG_LEVEL_LAST > level && level <= console_level_) { return true; } @@ -77,8 +80,8 @@ bool Log::check_output(const LOG_LEVEL level, const char *module) { return false; } -int Log::output(const LOG_LEVEL level, const char *module, const char *prefix, - const char *f, ...) { +int Log::output(const LOG_LEVEL level, const char *module, const char *prefix, const char *f, ...) +{ bool locked = false; try { va_list args; @@ -127,7 +130,8 @@ int Log::output(const LOG_LEVEL level, const char *module, const char *prefix, return LOG_STATUS_OK; } -int Log::set_console_level(LOG_LEVEL console_level) { +int Log::set_console_level(LOG_LEVEL console_level) +{ if (LOG_LEVEL_PANIC <= console_level && console_level < LOG_LEVEL_LAST) { console_level_ = console_level; return LOG_STATUS_OK; @@ -136,9 +140,13 @@ int Log::set_console_level(LOG_LEVEL console_level) { return LOG_STATUS_ERR; } -LOG_LEVEL Log::get_console_level() { return console_level_; } +LOG_LEVEL Log::get_console_level() +{ + return console_level_; +} -int Log::set_log_level(LOG_LEVEL log_level) { +int Log::set_log_level(LOG_LEVEL log_level) +{ if (LOG_LEVEL_PANIC <= log_level && log_level < LOG_LEVEL_LAST) { log_level_ = log_level; return LOG_STATUS_OK; @@ -147,9 +155,13 @@ int Log::set_log_level(LOG_LEVEL log_level) { return LOG_STATUS_ERR; } -LOG_LEVEL Log::get_log_level() { return log_level_; } +LOG_LEVEL Log::get_log_level() +{ + return log_level_; +} -const char *Log::prefix_msg(LOG_LEVEL level) { +const char *Log::prefix_msg(LOG_LEVEL level) +{ if (LOG_LEVEL_PANIC <= level && level < LOG_LEVEL_LAST) { return prefix_map_[level].c_str(); } @@ -157,22 +169,27 @@ const char *Log::prefix_msg(LOG_LEVEL level) { return empty_prefix; } -void Log::set_default_module(const std::string &modules) { +void Log::set_default_module(const std::string &modules) +{ split_string(modules, ",", default_set_); } -int Log::set_rotate_type(LOG_ROTATE rotate_type) { +int Log::set_rotate_type(LOG_ROTATE rotate_type) +{ if (LOG_ROTATE_BYDAY <= rotate_type && rotate_type < LOG_ROTATE_LAST) { rotate_type_ = rotate_type; } return LOG_STATUS_OK; } -LOG_ROTATE Log::get_rotate_type() { return rotate_type_; } +LOG_ROTATE Log::get_rotate_type() +{ + return rotate_type_; +} -int Log::rotate_by_day(const int year, const int month, const int day) { - if (log_date_.year_ == year && log_date_.mon_ == month && - log_date_.day_ == day) { +int Log::rotate_by_day(const int year, const int month, const int day) +{ + if (log_date_.year_ == year && log_date_.mon_ == month && log_date_.day_ == day) { // Don't need rotate return 0; } @@ -196,7 +213,8 @@ int Log::rotate_by_day(const int year, const int month, const int day) { return 0; } -int Log::rename_old_logs() { +int Log::rename_old_logs() +{ int log_index = 1; int max_log_index = 0; char log_index_str[4] = {0}; @@ -234,7 +252,8 @@ int Log::rename_old_logs() { return LOG_STATUS_OK; } -int Log::rotate_by_size() { +int Log::rotate_by_size() +{ if (log_line_ < 0) { // The first time open log file ofs_.open(log_name_.c_str(), std::ios_base::out | std::ios_base::app); @@ -260,8 +279,7 @@ int Log::rotate_by_size() { std::string log_name_new = log_name_ + "." + log_index_str; result = rename(log_name_.c_str(), log_name_new.c_str()); if (result) { - std::cerr << "Failed to rename " << log_name_ << " to " << log_name_new - << std::endl; + std::cerr << "Failed to rename " << log_name_ << " to " << log_name_new << std::endl; } ofs_.open(log_name_.c_str(), std::ios_base::out | std::ios_base::app); @@ -278,7 +296,8 @@ int Log::rotate_by_size() { return LOG_STATUS_OK; } -int Log::rotate(const int year, const int month, const int day) { +int Log::rotate(const int year, const int month, const int day) +{ int result = 0; pthread_mutex_lock(&lock_); if (rotate_type_ == LOG_ROTATE_BYDAY) { @@ -291,17 +310,19 @@ int Log::rotate(const int year, const int month, const int day) { return result; } -LoggerFactory::LoggerFactory() { +LoggerFactory::LoggerFactory() +{ // Auto-generated constructor stub } -LoggerFactory::~LoggerFactory() { +LoggerFactory::~LoggerFactory() +{ // Auto-generated destructor stub } -int LoggerFactory::init(const std::string &log_file, Log **logger, - LOG_LEVEL log_level, LOG_LEVEL console_level, - LOG_ROTATE rotate_type) { +int LoggerFactory::init( + const std::string &log_file, Log **logger, LOG_LEVEL log_level, LOG_LEVEL console_level, LOG_ROTATE rotate_type) +{ Log *log = new (std::nothrow) Log(log_file, log_level, console_level); if (log == nullptr) { std::cout << "Error: fail to construct a log object!" << std::endl; @@ -314,8 +335,9 @@ int LoggerFactory::init(const std::string &log_file, Log **logger, return 0; } -int LoggerFactory::init_default(const std::string &log_file, LOG_LEVEL log_level, - LOG_LEVEL console_level, LOG_ROTATE rotate_type) { +int LoggerFactory::init_default( + const std::string &log_file, LOG_LEVEL log_level, LOG_LEVEL console_level, LOG_ROTATE rotate_type) +{ if (g_log != nullptr) { LOG_WARN("Default logger has been initialized"); return 0; @@ -324,4 +346,4 @@ int LoggerFactory::init_default(const std::string &log_file, LOG_LEVEL log_level return init(log_file, &g_log, log_level, console_level, rotate_type); } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/log/log.h b/deps/common/log/log.h index edab072c78f23d710e1c20bb80a551f42b829f41..bca3577ea41ea49ed17b2e23bf48409cc2b04c7c 100644 --- a/deps/common/log/log.h +++ b/deps/common/log/log.h @@ -45,14 +45,10 @@ typedef enum { LOG_LEVEL_LAST } LOG_LEVEL; -typedef enum { - LOG_ROTATE_BYDAY = 0, - LOG_ROTATE_BYSIZE, - LOG_ROTATE_LAST -} LOG_ROTATE; +typedef enum { LOG_ROTATE_BYDAY = 0, LOG_ROTATE_BYSIZE, LOG_ROTATE_LAST } LOG_ROTATE; class Log { - public: +public: Log(const std::string &log_name, const LOG_LEVEL log_level = LOG_LEVEL_INFO, const LOG_LEVEL console_level = LOG_LEVEL_WARN); ~Log(void); @@ -65,29 +61,28 @@ class Log { * If the header information should be outputed * please use LOG_PANIC, LOG_ERROR ... */ - template + template Log &operator<<(T message); - template + template int panic(T message); - template + template int error(T message); - template + template int warnning(T message); - template + template int info(T message); - template + template int debug(T message); - template + template int trace(T message); - int output(const LOG_LEVEL level, const char *module, const char *prefix, - const char *f, ...); + int output(const LOG_LEVEL level, const char *module, const char *prefix, const char *f, ...); int set_console_level(const LOG_LEVEL console_level); LOG_LEVEL get_console_level(); @@ -110,17 +105,17 @@ class Log { int rotate(const int year = 0, const int month = 0, const int day = 0); - private: +private: void check_param_valid(); int rotate_by_size(); int rename_old_logs(); int rotate_by_day(const int year, const int month, const int day); - template + template int out(const LOG_LEVEL console_level, const LOG_LEVEL log_level, T &message); - private: +private: pthread_mutex_t lock_; std::ofstream ofs_; std::string log_name_; @@ -145,57 +140,63 @@ class Log { }; class LoggerFactory { - public: +public: LoggerFactory(); virtual ~LoggerFactory(); - static int init(const std::string &log_file, Log **logger, - LOG_LEVEL log_level = LOG_LEVEL_INFO, - LOG_LEVEL console_level = LOG_LEVEL_WARN, - LOG_ROTATE rotate_type = LOG_ROTATE_BYDAY); + static int init(const std::string &log_file, Log **logger, LOG_LEVEL log_level = LOG_LEVEL_INFO, + LOG_LEVEL console_level = LOG_LEVEL_WARN, LOG_ROTATE rotate_type = LOG_ROTATE_BYDAY); - static int init_default(const std::string &log_file, - LOG_LEVEL log_level = LOG_LEVEL_INFO, - LOG_LEVEL console_level = LOG_LEVEL_WARN, - LOG_ROTATE rotate_type = LOG_ROTATE_BYDAY); + static int init_default(const std::string &log_file, LOG_LEVEL log_level = LOG_LEVEL_INFO, + LOG_LEVEL console_level = LOG_LEVEL_WARN, LOG_ROTATE rotate_type = LOG_ROTATE_BYDAY); }; extern Log *g_log; #ifndef __FILE_NAME__ -#define __FILE_NAME__ (strrchr(__FILE__,'/')?strrchr(__FILE__,'/')+1:__FILE__) +#define __FILE_NAME__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #endif - -#define LOG_HEAD(prefix, level) \ - if (common::g_log) { \ - time_t now_time; \ - time(&now_time); \ - struct tm *p = localtime(&now_time); \ - char sz_head[64] = {0}; \ - if (p) { \ - sprintf(sz_head, "%d-%d-%d %d:%d:%u pid:%u tid:%llx ", p->tm_year + 1900, \ - p->tm_mon + 1, p->tm_mday, p->tm_hour, p->tm_min, p->tm_sec, \ - (u32_t)getpid(), gettid()); \ - common::g_log->rotate(p->tm_year + 1900, p->tm_mon + 1, p->tm_mday); \ - } \ - snprintf(prefix, sizeof(prefix), "[%s %s %s %s %u]>>", sz_head, \ - (common::g_log)->prefix_msg(level), __FILE_NAME__, \ - __FUNCTION__, (u32_t)__LINE__); \ +#define LOG_HEAD(prefix, level) \ + if (common::g_log) { \ + time_t now_time; \ + time(&now_time); \ + struct tm *p = localtime(&now_time); \ + char sz_head[64] = {0}; \ + if (p) { \ + sprintf(sz_head, \ + "%d-%d-%d %d:%d:%u pid:%u tid:%llx ", \ + p->tm_year + 1900, \ + p->tm_mon + 1, \ + p->tm_mday, \ + p->tm_hour, \ + p->tm_min, \ + p->tm_sec, \ + (u32_t)getpid(), \ + gettid()); \ + common::g_log->rotate(p->tm_year + 1900, p->tm_mon + 1, p->tm_mday); \ + } \ + snprintf(prefix, \ + sizeof(prefix), \ + "[%s %s %s %s %u]>>", \ + sz_head, \ + (common::g_log)->prefix_msg(level), \ + __FILE_NAME__, \ + __FUNCTION__, \ + (u32_t)__LINE__); \ } -#define LOG_OUTPUT(level, fmt, ...) \ - do { \ - using namespace common; \ - if (g_log && g_log->check_output(level, __FILE_NAME__)) { \ - char prefix[ONE_KILO] = {0}; \ - LOG_HEAD(prefix, level); \ - g_log->output(level, __FILE_NAME__, prefix, fmt, ##__VA_ARGS__); \ - } \ +#define LOG_OUTPUT(level, fmt, ...) \ + do { \ + using namespace common; \ + if (g_log && g_log->check_output(level, __FILE_NAME__)) { \ + char prefix[ONE_KILO] = {0}; \ + LOG_HEAD(prefix, level); \ + g_log->output(level, __FILE_NAME__, prefix, fmt, ##__VA_ARGS__); \ + } \ } while (0) -#define LOG_DEFAULT(fmt, ...) \ - LOG_OUTPUT(common::g_log->get_log_level(), fmt, ##__VA_ARGS__) +#define LOG_DEFAULT(fmt, ...) LOG_OUTPUT(common::g_log->get_log_level(), fmt, ##__VA_ARGS__) #define LOG_PANIC(fmt, ...) LOG_OUTPUT(common::LOG_LEVEL_PANIC, fmt, ##__VA_ARGS__) #define LOG_ERROR(fmt, ...) LOG_OUTPUT(common::LOG_LEVEL_ERR, fmt, ##__VA_ARGS__) #define LOG_WARN(fmt, ...) LOG_OUTPUT(common::LOG_LEVEL_WARN, fmt, ##__VA_ARGS__) @@ -203,48 +204,56 @@ extern Log *g_log; #define LOG_DEBUG(fmt, ...) LOG_OUTPUT(common::LOG_LEVEL_DEBUG, fmt, ##__VA_ARGS__) #define LOG_TRACE(fmt, ...) LOG_OUTPUT(common::LOG_LEVEL_TRACE, fmt, ##__VA_ARGS__) -template -Log &Log::operator<<(T msg) { +template +Log &Log::operator<<(T msg) +{ // at this time, the input level is the default log level out(console_level_, log_level_, msg); return *this; } -template -int Log::panic(T message) { +template +int Log::panic(T message) +{ return out(LOG_LEVEL_PANIC, LOG_LEVEL_PANIC, message); } -template -int Log::error(T message) { +template +int Log::error(T message) +{ return out(LOG_LEVEL_ERR, LOG_LEVEL_ERR, message); } -template -int Log::warnning(T message) { +template +int Log::warnning(T message) +{ return out(LOG_LEVEL_WARN, LOG_LEVEL_WARN, message); } -template -int Log::info(T message) { +template +int Log::info(T message) +{ return out(LOG_LEVEL_INFO, LOG_LEVEL_INFO, message); } -template -int Log::debug(T message) { +template +int Log::debug(T message) +{ return out(LOG_LEVEL_DEBUG, LOG_LEVEL_DEBUG, message); } -template -int Log::trace(T message) { +template +int Log::trace(T message) +{ return out(LOG_LEVEL_TRACE, LOG_LEVEL_TRACE, message); } -template -int Log::out(const LOG_LEVEL console_level, const LOG_LEVEL log_level, T &msg) { +template +int Log::out(const LOG_LEVEL console_level, const LOG_LEVEL log_level, T &msg) +{ bool locked = false; - if (console_level < LOG_LEVEL_PANIC || console_level > console_level_ || - log_level < LOG_LEVEL_PANIC || log_level > log_level_) { + if (console_level < LOG_LEVEL_PANIC || console_level > console_level_ || log_level < LOG_LEVEL_PANIC || + log_level > log_level_) { return LOG_STATUS_OK; } try { @@ -276,21 +285,20 @@ int Log::out(const LOG_LEVEL console_level, const LOG_LEVEL log_level, T &msg) { } #ifndef ASSERT -#define ASSERT(expression, description, ...) \ - do { \ - if (!(expression)) { \ - if (common::g_log) { \ - LOG_PANIC(description, ##__VA_ARGS__); \ - LOG_PANIC("\n"); \ - } \ - assert(expression); \ - } \ +#define ASSERT(expression, description, ...) \ + do { \ + if (!(expression)) { \ + if (common::g_log) { \ + LOG_PANIC(description, ##__VA_ARGS__); \ + LOG_PANIC("\n"); \ + } \ + assert(expression); \ + } \ } while (0) -#endif // ASSERT +#endif // ASSERT -#define SYS_OUTPUT_FILE_POS \ - ", File:" << __FILE__ << ", line:" << __LINE__ << ",function:" << __FUNCTION__ +#define SYS_OUTPUT_FILE_POS ", File:" << __FILE__ << ", line:" << __LINE__ << ",function:" << __FUNCTION__ #define SYS_OUTPUT_ERROR ",error:" << errno << ":" << strerror(errno) -} //namespace common -#endif //__COMMON_LOG_LOG_H__ +} // namespace common +#endif //__COMMON_LOG_LOG_H__ diff --git a/deps/common/math/md5.cpp b/deps/common/math/md5.cpp index 81afb1192fbf45db984cd47d5a22f5a2bcfc9ba0..5777bd32868292b7ffdeebb13e2d30b0f1a23ce3 100644 --- a/deps/common/math/md5.cpp +++ b/deps/common/math/md5.cpp @@ -41,10 +41,70 @@ static void Decode(UINT4 *, unsigned char *, unsigned int); static void MD5_memcpy(POINTER, POINTER, unsigned int); static void MD5_memset(POINTER, int, unsigned int); -static unsigned char PADDING[64] = { - 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; +static unsigned char PADDING[64] = {0x80, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0}; /* * F, G, H and I are basic MD5 functions. @@ -63,35 +123,36 @@ static unsigned char PADDING[64] = { * FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. Rotation is * separate from addition to prevent recomputation. */ -#define FF(a, b, c, d, x, s, ac) \ - { \ - (a) += F((b), (c), (d)) + (x) + (UINT4)(ac); \ - (a) = ROTATE_LEFT((a), (s)); \ - (a) += (b); \ +#define FF(a, b, c, d, x, s, ac) \ + { \ + (a) += F((b), (c), (d)) + (x) + (UINT4)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ } -#define GG(a, b, c, d, x, s, ac) \ - { \ - (a) += G((b), (c), (d)) + (x) + (UINT4)(ac); \ - (a) = ROTATE_LEFT((a), (s)); \ - (a) += (b); \ +#define GG(a, b, c, d, x, s, ac) \ + { \ + (a) += G((b), (c), (d)) + (x) + (UINT4)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ } -#define HH(a, b, c, d, x, s, ac) \ - { \ - (a) += H((b), (c), (d)) + (x) + (UINT4)(ac); \ - (a) = ROTATE_LEFT((a), (s)); \ - (a) += (b); \ +#define HH(a, b, c, d, x, s, ac) \ + { \ + (a) += H((b), (c), (d)) + (x) + (UINT4)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ } -#define II(a, b, c, d, x, s, ac) \ - { \ - (a) += I((b), (c), (d)) + (x) + (UINT4)(ac); \ - (a) = ROTATE_LEFT((a), (s)); \ - (a) += (b); \ +#define II(a, b, c, d, x, s, ac) \ + { \ + (a) += I((b), (c), (d)) + (x) + (UINT4)(ac); \ + (a) = ROTATE_LEFT((a), (s)); \ + (a) += (b); \ } /* * MD5 initialization. Begins an MD5 operation, writing a new context. */ -void MD5Init(MD5_CTX *context) { +void MD5Init(MD5_CTX *context) +{ context->count[0] = context->count[1] = 0; /* * Load magic initialization constants. @@ -106,16 +167,17 @@ void MD5Init(MD5_CTX *context) { * MD5 block update operation. Continues an MD5 message-digest operation, * processing another message block, and updating the context. */ -void MD5Update(MD5_CTX *context, unsigned char *input, unsigned int inputLen) { +void MD5Update(MD5_CTX *context, unsigned char *input, unsigned int inputLen) +{ unsigned int i, index, partLen; /* Compute number of bytes mod 64 */ - index = (unsigned int) ((context->count[0] >> 3) & 0x3F); + index = (unsigned int)((context->count[0] >> 3) & 0x3F); /* update number of bits */ - if ((context->count[0] += ((UINT4) inputLen << 3)) < ((UINT4) inputLen << 3)) + if ((context->count[0] += ((UINT4)inputLen << 3)) < ((UINT4)inputLen << 3)) context->count[1]++; - context->count[1] += ((UINT4) inputLen >> 29); + context->count[1] += ((UINT4)inputLen >> 29); partLen = 64 - index; @@ -123,7 +185,7 @@ void MD5Update(MD5_CTX *context, unsigned char *input, unsigned int inputLen) { * Transform as many times as possible. */ if (inputLen >= partLen) { - MD5_memcpy((POINTER) & context->buffer[index], (POINTER) input, partLen); + MD5_memcpy((POINTER)&context->buffer[index], (POINTER)input, partLen); MD5Transform(context->state, context->buffer); for (i = partLen; i + 63 < inputLen; i += 64) @@ -134,15 +196,15 @@ void MD5Update(MD5_CTX *context, unsigned char *input, unsigned int inputLen) { i = 0; /* Buffer remaining input */ - MD5_memcpy((POINTER) & context->buffer[index], (POINTER) & input[i], - inputLen - i); + MD5_memcpy((POINTER)&context->buffer[index], (POINTER)&input[i], inputLen - i); } /* * MD5 finalization. Ends an MD5 message-digest operation, writing the the * message digest and zeroizing the context. */ -void MD5Final(unsigned char digest[16], MD5_CTX *context) { +void MD5Final(unsigned char digest[16], MD5_CTX *context) +{ unsigned char bits[8]; unsigned int index, padLen; @@ -152,7 +214,7 @@ void MD5Final(unsigned char digest[16], MD5_CTX *context) { /* * Pad out to 56 mod 64. */ - index = (unsigned int) ((context->count[0] >> 3) & 0x3f); + index = (unsigned int)((context->count[0] >> 3) & 0x3f); padLen = (index < 56) ? (56 - index) : (120 - index); MD5Update(context, PADDING, padLen); @@ -164,13 +226,14 @@ void MD5Final(unsigned char digest[16], MD5_CTX *context) { /* * Zeroize sensitive information. */ - MD5_memset((POINTER) context, 0, sizeof(*context)); + MD5_memset((POINTER)context, 0, sizeof(*context)); } /* * MD5 basic transformation. Transforms state based on block. */ -static void MD5Transform(UINT4 state[4], unsigned char block[64]) { +static void MD5Transform(UINT4 state[4], unsigned char block[64]) +{ UINT4 a = state[0], b = state[1], c = state[2], d = state[3], x[16]; Decode(x, block, 64); @@ -256,21 +319,22 @@ static void MD5Transform(UINT4 state[4], unsigned char block[64]) { * Zeroize sensitive information. * */ - MD5_memset((POINTER) x, 0, sizeof(x)); + MD5_memset((POINTER)x, 0, sizeof(x)); } /* * Encodes input (UINT4) into output (unsigned char). Assumes len is a * multiple of 4. */ -static void Encode(unsigned char *output, UINT4 *input, unsigned int len) { +static void Encode(unsigned char *output, UINT4 *input, unsigned int len) +{ unsigned int i, j; for (i = 0, j = 0; j < len; i++, j += 4) { - output[j] = (unsigned char) (input[i] & 0xff); - output[j + 1] = (unsigned char) ((input[i] >> 8) & 0xff); - output[j + 2] = (unsigned char) ((input[i] >> 16) & 0xff); - output[j + 3] = (unsigned char) ((input[i] >> 24) & 0xff); + output[j] = (unsigned char)(input[i] & 0xff); + output[j + 1] = (unsigned char)((input[i] >> 8) & 0xff); + output[j + 2] = (unsigned char)((input[i] >> 16) & 0xff); + output[j + 3] = (unsigned char)((input[i] >> 24) & 0xff); } } @@ -278,19 +342,21 @@ static void Encode(unsigned char *output, UINT4 *input, unsigned int len) { * Decodes input (unsigned char) into output (UINT4). Assumes len is a * multiple of 4. */ -static void Decode(UINT4 *output, unsigned char *input, unsigned int len) { +static void Decode(UINT4 *output, unsigned char *input, unsigned int len) +{ unsigned int i, j; for (i = 0, j = 0; j < len; i++, j += 4) - output[i] = ((UINT4) input[j]) | (((UINT4) input[j + 1]) << 8) | - (((UINT4) input[j + 2]) << 16) | (((UINT4) input[j + 3]) << 24); + output[i] = ((UINT4)input[j]) | (((UINT4)input[j + 1]) << 8) | (((UINT4)input[j + 2]) << 16) | + (((UINT4)input[j + 3]) << 24); } /* * Note: Replace "for loop" with standard memcpy if possible. */ -static void MD5_memcpy(POINTER output, POINTER input, unsigned int len) { +static void MD5_memcpy(POINTER output, POINTER input, unsigned int len) +{ unsigned int i; for (i = 0; i < len; i++) @@ -300,36 +366,40 @@ static void MD5_memcpy(POINTER output, POINTER input, unsigned int len) { /* * Note: Replace "for loop" with standard memset if possible. */ -static void MD5_memset(POINTER output, int value, unsigned int len) { +static void MD5_memset(POINTER output, int value, unsigned int len) +{ unsigned int i; for (i = 0; i < len; i++) - ((char *) output)[i] = (char) value; + ((char *)output)[i] = (char)value; } /* * Digests a string */ -int MD5String(char *string, unsigned char digest[16]) { +int MD5String(char *string, unsigned char digest[16]) +{ MD5_CTX context; unsigned int len = strlen(string); MD5Init(&context); - MD5Update(&context, (unsigned char *) string, len); + MD5Update(&context, (unsigned char *)string, len); MD5Final(digest, &context); return 0; } -int MD5Buffer(char *buffer, unsigned int len, unsigned char digest[16]) { +int MD5Buffer(char *buffer, unsigned int len, unsigned char digest[16]) +{ MD5_CTX context; MD5Init(&context); - MD5Update(&context, (unsigned char *) buffer, len); + MD5Update(&context, (unsigned char *)buffer, len); MD5Final(digest, &context); return 0; } -int MD5File(char *filename, unsigned char digest[16]) { +int MD5File(char *filename, unsigned char digest[16]) +{ FILE *file; MD5_CTX context; int len; @@ -349,4 +419,4 @@ int MD5File(char *filename, unsigned char digest[16]) { return 0; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/math/md5.h b/deps/common/math/md5.h index 2ba4baf088076ce48eb77bd93d1767421e749b04..2c2f0bcd44ee29d967bef556860e8aebe4d4f215 100644 --- a/deps/common/math/md5.h +++ b/deps/common/math/md5.h @@ -66,5 +66,5 @@ void MD5Final(unsigned char[16], MD5_CTX *); } #endif -} //namespace common -#endif //__COMMON_MATH_MD5_H__ +} // namespace common +#endif //__COMMON_MATH_MD5_H__ diff --git a/deps/common/math/random_generator.cpp b/deps/common/math/random_generator.cpp index c2294274231fb24c253bf4653b8b1537af9f93af..b8f2432bd868c0a4a086a5fcd9073fdcdf66a80b 100644 --- a/deps/common/math/random_generator.cpp +++ b/deps/common/math/random_generator.cpp @@ -12,27 +12,26 @@ See the Mulan PSL v2 for more details. */ // Created by Longda on 2021/4/20. // - - - #include #include "common/math/random_generator.h" namespace common { -RandomGenerator::RandomGenerator() - : randomData(std::chrono::system_clock::now().time_since_epoch().count()) {} - -RandomGenerator::~RandomGenerator() {} +RandomGenerator::RandomGenerator() : randomData(std::chrono::system_clock::now().time_since_epoch().count()) +{} -unsigned int RandomGenerator::next() { +RandomGenerator::~RandomGenerator() +{} +unsigned int RandomGenerator::next() +{ return randomData(); } -unsigned int RandomGenerator::next(unsigned int range) { +unsigned int RandomGenerator::next(unsigned int range) +{ if (range > 0) { return next() % range; } else { @@ -40,4 +39,4 @@ unsigned int RandomGenerator::next(unsigned int range) { } } -}//namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/math/random_generator.h b/deps/common/math/random_generator.h index c115cc87b83ab328e77d65a32896e5b76523408c..43b26cd2e26ad6799bf1b1f546c9e51a6c320e10 100644 --- a/deps/common/math/random_generator.h +++ b/deps/common/math/random_generator.h @@ -16,27 +16,25 @@ See the Mulan PSL v2 for more details. */ #include #include -namespace common -{ +namespace common { #define DEFAULT_RANDOM_BUFF_SIZE 512 -class RandomGenerator -{ +class RandomGenerator { public: - RandomGenerator(); - virtual ~RandomGenerator(); + RandomGenerator(); + virtual ~RandomGenerator(); public: - unsigned int next(); - unsigned int next(unsigned int range); + unsigned int next(); + unsigned int next(unsigned int range); private: - // The GUN Extended TLS Version - std::mt19937 randomData; + // The GUN Extended TLS Version + std::mt19937 randomData; }; -} +} // namespace common #endif /* __COMMON_MATH_RANDOM_GENERATOR_H_ */ diff --git a/deps/common/math/regex.cpp b/deps/common/math/regex.cpp index c58354001e4246410db8ea13004e991c89641de0..590add9c608a8a0381d0468f8068a561f269b9ad 100644 --- a/deps/common/math/regex.cpp +++ b/deps/common/math/regex.cpp @@ -19,7 +19,8 @@ See the Mulan PSL v2 for more details. */ #include "common/math/regex.h" namespace common { -int regex_match(const char *str_, const char *pat_) { +int regex_match(const char *str_, const char *pat_) +{ regex_t reg; if (regcomp(®, pat_, REG_EXTENDED | REG_NOSUB)) return -1; @@ -29,4 +30,4 @@ int regex_match(const char *str_, const char *pat_) { return ret; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/math/regex.h b/deps/common/math/regex.h index 5d2e271d24f15eed6c50ceb5a95cb8d05cdd2cd9..08b8e4f83c77e17c52602b44f603ae7f2f9dc789 100644 --- a/deps/common/math/regex.h +++ b/deps/common/math/regex.h @@ -18,5 +18,5 @@ namespace common { int regex_match(const char *str_, const char *pat_); -} //namespace common +} // namespace common #endif /* __COMMON_MATH_REGEX_H__ */ diff --git a/deps/common/metrics/console_reporter.cpp b/deps/common/metrics/console_reporter.cpp index 0c705c657be9c4ea450eb4214f26e83248c92511..36ab11ee970ba5f080454288ffdb2c34b332e9b9 100644 --- a/deps/common/metrics/console_reporter.cpp +++ b/deps/common/metrics/console_reporter.cpp @@ -21,13 +21,15 @@ See the Mulan PSL v2 for more details. */ namespace common { -ConsoleReporter *get_console_reporter() { +ConsoleReporter *get_console_reporter() +{ static ConsoleReporter *instance = new ConsoleReporter(); return instance; } -void ConsoleReporter::report(const std::string &tag, Metric *metric) { +void ConsoleReporter::report(const std::string &tag, Metric *metric) +{ Snapshot *snapshot = metric->get_snapshot(); if (snapshot != NULL) { @@ -37,4 +39,4 @@ void ConsoleReporter::report(const std::string &tag, Metric *metric) { } } -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/metrics/console_reporter.h b/deps/common/metrics/console_reporter.h index 2d962badde89a56b552c42620885c9f44d0385c7..9d0af23b7e1f37966b2baf890b594e12a32ad135 100644 --- a/deps/common/metrics/console_reporter.h +++ b/deps/common/metrics/console_reporter.h @@ -19,12 +19,11 @@ See the Mulan PSL v2 for more details. */ namespace common { - class ConsoleReporter : public Reporter { public: void report(const std::string &tag, Metric *metric); }; -ConsoleReporter* get_console_reporter(); -} //namespace common -#endif //__COMMON_METRICS_CONSOLE_REPORTER_H__ +ConsoleReporter *get_console_reporter(); +} // namespace common +#endif //__COMMON_METRICS_CONSOLE_REPORTER_H__ diff --git a/deps/common/metrics/histogram_snapshot.cpp b/deps/common/metrics/histogram_snapshot.cpp index 7d51d1a63410ad94aea400bd9957912b133bd6cb..48ad01d2e5e4a9ffa01045f7ce329d1a3f62c068 100644 --- a/deps/common/metrics/histogram_snapshot.cpp +++ b/deps/common/metrics/histogram_snapshot.cpp @@ -24,145 +24,129 @@ See the Mulan PSL v2 for more details. */ namespace common { - HistogramSnapShot::HistogramSnapShot() -{ -} +{} -HistogramSnapShot::HistogramSnapShot(const std::vector& collection) +HistogramSnapShot::HistogramSnapShot(const std::vector &collection) { set_collection(collection); } HistogramSnapShot::~HistogramSnapShot() -{ -} +{} -void HistogramSnapShot::set_collection(const std::vector& collection) +void HistogramSnapShot::set_collection(const std::vector &collection) { - if (collection.empty()) - { - return; - } - - data_ = collection; - std::sort(data_.begin(), data_.end()); + if (collection.empty()) { + return; + } + + data_ = collection; + std::sort(data_.begin(), data_.end()); } size_t HistogramSnapShot::size() const { - return data_.size(); + return data_.size(); } double HistogramSnapShot::get_value(double quantile) { - if (quantile > 1.0f) - { - quantile = 1.0f; - } - - if (quantile < 0.0f) - { - quantile = 0.0f; - } - - if (data_.empty()) - { - return 0.0f; - } - - double pos = quantile * (data_.size() + 1); - - if (pos < 1) - { - return data_[0]; - } - - if (pos >= data_.size()) - { - return data_[data_.size() - 1]; - } - - double lower = data_[(int) pos - 1]; - double upper = data_[(int) pos]; - - return lower + (pos - floor(pos)) * (upper - lower); + if (quantile > 1.0f) { + quantile = 1.0f; + } + + if (quantile < 0.0f) { + quantile = 0.0f; + } + + if (data_.empty()) { + return 0.0f; + } + + double pos = quantile * (data_.size() + 1); + + if (pos < 1) { + return data_[0]; + } + + if (pos >= data_.size()) { + return data_[data_.size() - 1]; + } + + double lower = data_[(int)pos - 1]; + double upper = data_[(int)pos]; + + return lower + (pos - floor(pos)) * (upper - lower); } double HistogramSnapShot::get_median() { - return get_value(0.5f); + return get_value(0.5f); } double HistogramSnapShot::get_75th() { - return get_value(0.75f); + return get_value(0.75f); } double HistogramSnapShot::get_90th() { - return get_value(0.90f); + return get_value(0.90f); } double HistogramSnapShot::get_95th() { - return get_value(0.95f); + return get_value(0.95f); } double HistogramSnapShot::get_99th() { - return get_value(0.99f); - + return get_value(0.99f); } double HistogramSnapShot::get_999th() { - return get_value(0.999f); + return get_value(0.999f); } double HistogramSnapShot::get_max() { - if (data_.empty()) - { - return 0.0f; - } - - return static_cast(*data_.rbegin()); + if (data_.empty()) { + return 0.0f; + } + + return static_cast(*data_.rbegin()); } double HistogramSnapShot::get_min() { - if (data_.empty()) - { - return 0.0f; - } - - return static_cast(*data_.begin()); + if (data_.empty()) { + return 0.0f; + } + + return static_cast(*data_.begin()); } double HistogramSnapShot::get_mean() { - if (data_.empty()) - { - return 0.0f; - } - - return std::accumulate(data_.begin(), data_.end(), (double)0) * 1.0f / data_.size(); -} + if (data_.empty()) { + return 0.0f; + } + return std::accumulate(data_.begin(), data_.end(), (double)0) * 1.0f / data_.size(); +} -const std::vector & HistogramSnapShot::get_values() +const std::vector &HistogramSnapShot::get_values() { - return data_; + return data_; } -std::string HistogramSnapShot::to_string() { +std::string HistogramSnapShot::to_string() +{ std::stringstream oss; - oss << "mean:" << get_mean() << ",min:" << get_min() << ",max:" << get_max() - << ",median:" << get_median() << ", 75th:" << get_75th() - << ",90th:" << get_90th() << ",99th:" << get_99th() - << ",999th:" << get_999th(); + oss << "mean:" << get_mean() << ",min:" << get_min() << ",max:" << get_max() << ",median:" << get_median() + << ", 75th:" << get_75th() << ",90th:" << get_90th() << ",99th:" << get_99th() << ",999th:" << get_999th(); return oss.str(); } - -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/metrics/histogram_snapshot.h b/deps/common/metrics/histogram_snapshot.h index a376f82c5c6ed8719d7ee1bed094267cbfb1c132..df6a63b51e7b35c864f2a591c2e1cf03aa94c972 100644 --- a/deps/common/metrics/histogram_snapshot.h +++ b/deps/common/metrics/histogram_snapshot.h @@ -65,10 +65,11 @@ public: const std::vector &get_values(); std::string to_string(); + protected: std::vector data_; }; -} // namespace common +} // namespace common #endif /* __COMMON_METRICS_HISTOGRAM_SNAPSHOT_H_ */ diff --git a/deps/common/metrics/log_reporter.cpp b/deps/common/metrics/log_reporter.cpp index c3604313273a1168558133f24bdb7efcdf2d5635..8873a0432bb2516e708d42a0dff3a860cebeec53 100644 --- a/deps/common/metrics/log_reporter.cpp +++ b/deps/common/metrics/log_reporter.cpp @@ -19,23 +19,24 @@ See the Mulan PSL v2 for more details. */ #include "common/metrics/metric.h" #include "common/log/log.h" - namespace common { -LogReporter* get_log_reporter() { - static LogReporter* instance = new LogReporter(); +LogReporter *get_log_reporter() +{ + static LogReporter *instance = new LogReporter(); return instance; } - void LogReporter::report(const std::string &tag, Metric *metric) { +void LogReporter::report(const std::string &tag, Metric *metric) +{ Snapshot *snapshot = metric->get_snapshot(); if (snapshot != NULL) { LOG_INFO("%s:%s", tag.c_str(), snapshot->to_string().c_str()); - }else { + } else { LOG_WARN("There is no snapshot of %s metrics.", tag.c_str()); } } -}// namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/metrics/log_reporter.h b/deps/common/metrics/log_reporter.h index 2e403cc98f5738af446032c00b3b8449208e1853..99f9357914b05214b16a0105ab2dcf066c83a0c3 100644 --- a/deps/common/metrics/log_reporter.h +++ b/deps/common/metrics/log_reporter.h @@ -19,12 +19,11 @@ See the Mulan PSL v2 for more details. */ namespace common { - class LogReporter : public Reporter { public: void report(const std::string &tag, Metric *metric); }; -LogReporter* get_log_reporter(); -} //namespace common -#endif //__COMMON_METRICS_LOG_REPORTER_H__ +LogReporter *get_log_reporter(); +} // namespace common +#endif //__COMMON_METRICS_LOG_REPORTER_H__ diff --git a/deps/common/metrics/metric.h b/deps/common/metrics/metric.h index 21e188ac82371e42daac1af2327440ff4194d4bd..e5ab5d7c6977254401e7a292141dc1cf57923659 100644 --- a/deps/common/metrics/metric.h +++ b/deps/common/metrics/metric.h @@ -23,11 +23,14 @@ class Metric { public: virtual void snapshot() = 0; - virtual Snapshot *get_snapshot() { return snapshot_value_; } + virtual Snapshot *get_snapshot() + { + return snapshot_value_; + } protected: Snapshot *snapshot_value_; }; -}//namespace common -#endif //__COMMON_METRICS_METRIC_H__ +} // namespace common +#endif //__COMMON_METRICS_METRIC_H__ diff --git a/deps/common/metrics/metrics.cpp b/deps/common/metrics/metrics.cpp index a8ca18c92d9d952b6924492716f4351f8ea4d63d..e97f0357b28ae8e6e3fd6442d28443a02f6c31dd 100644 --- a/deps/common/metrics/metrics.cpp +++ b/deps/common/metrics/metrics.cpp @@ -16,7 +16,8 @@ See the Mulan PSL v2 for more details. */ #include "common/lang/mutex.h" namespace common { -Meter::Meter() { +Meter::Meter() +{ struct timeval start_time; gettimeofday(&start_time, NULL); @@ -24,17 +25,26 @@ Meter::Meter() { value_.store(0l); } -Meter::~Meter() { +Meter::~Meter() +{ if (snapshot_value_ != NULL) { delete snapshot_value_; snapshot_value_ = NULL; } } -void Meter::inc(long increase) { value_.fetch_add(increase); } -void Meter::inc() { inc(1l); } +void Meter::inc(long increase) +{ + value_.fetch_add(increase); +} + +void Meter::inc() +{ + inc(1l); +} -void Meter::snapshot() { +void Meter::snapshot() +{ // lock here struct timeval now; @@ -42,8 +52,7 @@ void Meter::snapshot() { long now_tick = now.tv_sec * 1000000 + now.tv_usec; - double temp_value = - ((double)value_.exchange(0l)) / ((now_tick - snapshot_tick_ ) / 1000000); + double temp_value = ((double)value_.exchange(0l)) / ((now_tick - snapshot_tick_) / 1000000); snapshot_tick_ = now_tick; if (snapshot_value_ == NULL) { @@ -52,21 +61,27 @@ void Meter::snapshot() { ((SnapshotBasic *)snapshot_value_)->setValue(temp_value); } -SimpleTimer::~SimpleTimer() { +SimpleTimer::~SimpleTimer() +{ if (snapshot_value_ != NULL) { delete snapshot_value_; snapshot_value_ = NULL; } } -void SimpleTimer::inc(long increase) { +void SimpleTimer::inc(long increase) +{ value_.fetch_add(increase); times_.fetch_add(1); } -void SimpleTimer::update(long one) { inc(one); } +void SimpleTimer::update(long one) +{ + inc(one); +} -void SimpleTimer::snapshot() { +void SimpleTimer::snapshot() +{ // lock here struct timeval now; @@ -81,7 +96,7 @@ void SimpleTimer::snapshot() { double mean = 0; if (times_snapshot > 0) { - tps = ((double)times_snapshot )/ ((now_tick - snapshot_tick_) / 1000000); + tps = ((double)times_snapshot) / ((now_tick - snapshot_tick_) / 1000000); mean = ((double)value_snapshot) / times_snapshot; } @@ -93,21 +108,22 @@ void SimpleTimer::snapshot() { ((SimplerTimerSnapshot *)snapshot_value_)->setValue(mean, tps); } -Histogram::Histogram(RandomGenerator &random) : UniformReservoir(random) {} - -Histogram::Histogram(RandomGenerator &random, size_t size) - : UniformReservoir(random, size) {} +Histogram::Histogram(RandomGenerator &random) : UniformReservoir(random) +{} -Histogram::~Histogram() { +Histogram::Histogram(RandomGenerator &random, size_t size) : UniformReservoir(random, size) +{} -} +Histogram::~Histogram() +{} -void Histogram::snapshot() { +void Histogram::snapshot() +{ UniformReservoir::snapshot(); } -Timer::Timer(RandomGenerator &random) - : UniformReservoir(random){ +Timer::Timer(RandomGenerator &random) : UniformReservoir(random) +{ struct timeval start_time; gettimeofday(&start_time, NULL); @@ -115,8 +131,8 @@ Timer::Timer(RandomGenerator &random) value_.store(0l); } -Timer::Timer(RandomGenerator &random, size_t size) - : UniformReservoir(random, size){ +Timer::Timer(RandomGenerator &random, size_t size) : UniformReservoir(random, size) +{ struct timeval start_time; gettimeofday(&start_time, NULL); @@ -124,19 +140,22 @@ Timer::Timer(RandomGenerator &random, size_t size) value_.store(0l); } -Timer::~Timer() { +Timer::~Timer() +{ if (snapshot_value_ == NULL) { delete snapshot_value_; snapshot_value_ = NULL; } } -void Timer::update(double ms) { +void Timer::update(double ms) +{ UniformReservoir::update(ms); value_.fetch_add(1l); } -void Timer::snapshot() { +void Timer::snapshot() +{ if (snapshot_value_ == NULL) { snapshot_value_ = new TimerSnapshot(); } @@ -147,8 +166,7 @@ void Timer::snapshot() { long now_tick = now.tv_sec * 1000000 + now.tv_usec; - double tps = - ((double)value_.exchange(0l) )/ ((now_tick - snapshot_tick_ ) / 1000000); + double tps = ((double)value_.exchange(0l)) / ((now_tick - snapshot_tick_) / 1000000); snapshot_tick_ = now_tick; MUTEX_LOCK(&mutex); @@ -159,13 +177,14 @@ void Timer::snapshot() { timer_snapshot->set_tps(tps); } -TimerStat::TimerStat(SimpleTimer &other_st) - : st_(other_st), start_tick_(0), end_tick_(0) { +TimerStat::TimerStat(SimpleTimer &other_st) : st_(other_st), start_tick_(0), end_tick_(0) +{ start(); } -TimerStat::~TimerStat() { +TimerStat::~TimerStat() +{ if (end_tick_ == 0) { end(); } @@ -173,18 +192,20 @@ TimerStat::~TimerStat() { st_.update((end_tick_ - start_tick_) / 1000); } -void TimerStat::start() { +void TimerStat::start() +{ struct timeval now; gettimeofday(&now, NULL); start_tick_ = now.tv_sec * 1000000 + now.tv_usec; } -void TimerStat::end() { +void TimerStat::end() +{ struct timeval now; gettimeofday(&now, NULL); end_tick_ = now.tv_sec * 1000000 + now.tv_usec; } -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/metrics/metrics.h b/deps/common/metrics/metrics.h index 4dc393e997a92e014ad4d1846aca946c3125d38e..98c2195f8a661842ce3caf16e5f3647da64eaf55 100644 --- a/deps/common/metrics/metrics.h +++ b/deps/common/metrics/metrics.h @@ -27,17 +27,23 @@ namespace common { class Gauge : public Metric { public: // user implement snapshot function - void set_snapshot(Snapshot *value) { snapshot_value_ = value; } + void set_snapshot(Snapshot *value) + { + snapshot_value_ = value; + } }; class Counter : public Metric { - void set_snapshot(SnapshotBasic *value) { snapshot_value_ = value; } + void set_snapshot(SnapshotBasic *value) + { + snapshot_value_ = value; + } }; class Meter : public Metric { public: Meter(); - virtual ~Meter(); + virtual ~Meter(); void inc(long increase); void inc(); @@ -76,7 +82,6 @@ public: virtual ~Histogram(); void snapshot(); - }; // timeunit is ms @@ -109,5 +114,5 @@ public: long end_tick_; }; -} // namespace common -#endif //__COMMON_METRICS_METRICS_H__ +} // namespace common +#endif //__COMMON_METRICS_METRICS_H__ diff --git a/deps/common/metrics/metrics_registry.cpp b/deps/common/metrics/metrics_registry.cpp index eec3776546b01db2b98226a22230537a02cf586d..e2a6cf668a80cd8405967e247f8a5e169eac52ca 100644 --- a/deps/common/metrics/metrics_registry.cpp +++ b/deps/common/metrics/metrics_registry.cpp @@ -12,32 +12,33 @@ See the Mulan PSL v2 for more details. */ // Created by Longda on 2021/4/20. // - - #include "common/metrics/metrics_registry.h" #include "common/log/log.h" namespace common { -MetricsRegistry& get_metrics_registry() { +MetricsRegistry &get_metrics_registry() +{ static MetricsRegistry instance; return instance; } -void MetricsRegistry::register_metric(const std::string &tag, Metric *metric) { - std::map::iterator it = metrics.find(tag); +void MetricsRegistry::register_metric(const std::string &tag, Metric *metric) +{ + std::map::iterator it = metrics.find(tag); if (it != metrics.end()) { LOG_WARN("%s has been registered!", tag.c_str()); return; } - //metrics[tag] = metric; + // metrics[tag] = metric; metrics.insert(std::pair(tag, metric)); LOG_INFO("Successfully register metric :%s", tag.c_str()); } -void MetricsRegistry::unregister(const std::string &tag) { +void MetricsRegistry::unregister(const std::string &tag) +{ unsigned int num = metrics.erase(tag); if (num == 0) { LOG_WARN("There is no %s metric!", tag.c_str()); @@ -46,22 +47,22 @@ void MetricsRegistry::unregister(const std::string &tag) { LOG_INFO("Successfully remove metric of %s", tag.c_str()); } -void MetricsRegistry::snapshot() { - std::map::iterator it = metrics.begin(); +void MetricsRegistry::snapshot() +{ + std::map::iterator it = metrics.begin(); for (; it != metrics.end(); it++) { it->second->snapshot(); } } -void MetricsRegistry::report() { - for (std::list::iterator reporterIt = reporters.begin(); - reporterIt != reporters.end(); reporterIt++) { - for (std::map::iterator it = metrics.begin(); - it != metrics.end(); it++) { +void MetricsRegistry::report() +{ + for (std::list::iterator reporterIt = reporters.begin(); reporterIt != reporters.end(); reporterIt++) { + for (std::map::iterator it = metrics.begin(); it != metrics.end(); it++) { (*reporterIt)->report(it->first, it->second); } } } -} // namespace common +} // namespace common diff --git a/deps/common/metrics/metrics_registry.h b/deps/common/metrics/metrics_registry.h index 15fc9b1d7ea6e39c131043613607d6227056976a..f4ba40b0e783a1fafdcbb26f004eae288ad8ff99 100644 --- a/deps/common/metrics/metrics_registry.h +++ b/deps/common/metrics/metrics_registry.h @@ -26,7 +26,7 @@ namespace common { class MetricsRegistry { public: - MetricsRegistry() {}; + MetricsRegistry(){}; virtual ~MetricsRegistry(){}; void register_metric(const std::string &tag, Metric *metric); @@ -36,18 +36,16 @@ public: void report(); - void add_reporter(Reporter *reporter) { + void add_reporter(Reporter *reporter) + { reporters.push_back(reporter); } - protected: std::map metrics; std::list reporters; - - }; -MetricsRegistry& get_metrics_registry(); -}//namespace common -#endif //__COMMON_METRICS_METRICS_REGISTRY_H__ +MetricsRegistry &get_metrics_registry(); +} // namespace common +#endif //__COMMON_METRICS_METRICS_REGISTRY_H__ diff --git a/deps/common/metrics/reporter.h b/deps/common/metrics/reporter.h index 017774ede63172bea757b55bf7e3ec67d2ae4571..b5c957c0b8adfb976cd41492283cd10b1e4dd21f 100644 --- a/deps/common/metrics/reporter.h +++ b/deps/common/metrics/reporter.h @@ -20,10 +20,9 @@ See the Mulan PSL v2 for more details. */ namespace common { - class Reporter { public: virtual void report(const std::string &tag, Metric *metric) = 0; }; -} // namespace Reporter -#endif //__COMMON_METRICS_REPORTER_H__ +} // namespace common +#endif //__COMMON_METRICS_REPORTER_H__ diff --git a/deps/common/metrics/reservoir.cpp b/deps/common/metrics/reservoir.cpp index 5d2cd83d5fee287a9a2908eef4dab44a5ea06723..be4b546321b857f3f12deb001b19ddae82b0844a 100644 --- a/deps/common/metrics/reservoir.cpp +++ b/deps/common/metrics/reservoir.cpp @@ -16,17 +16,13 @@ See the Mulan PSL v2 for more details. */ using namespace common; -Reservoir::Reservoir(RandomGenerator& random) : - random(random) -{ -} +Reservoir::Reservoir(RandomGenerator &random) : random(random) +{} Reservoir::~Reservoir() -{ -} +{} size_t Reservoir::next(size_t range) { - return random.next(range); + return random.next(range); } - diff --git a/deps/common/metrics/reservoir.h b/deps/common/metrics/reservoir.h index a7884a6ab37866e8624e7d745ad072652751185a..6102a60b91a5b821172e666000a680de7860e014 100644 --- a/deps/common/metrics/reservoir.h +++ b/deps/common/metrics/reservoir.h @@ -21,8 +21,6 @@ See the Mulan PSL v2 for more details. */ #include "common/metrics/metric.h" #include "common/metrics/snapshot.h" - - namespace common { class Reservoir : public Metric { @@ -45,6 +43,6 @@ private: RandomGenerator &random; }; -} // namespace common +} // namespace common #endif /* __COMMON_METRICS_RESERVOIR_H_ */ diff --git a/deps/common/metrics/sampler.cpp b/deps/common/metrics/sampler.cpp index f5bae3bd1c1d3b07b7c57a266f8512232af594b7..a7efe246a961823abbe2cd116b7a1b1661a3ee42 100644 --- a/deps/common/metrics/sampler.cpp +++ b/deps/common/metrics/sampler.cpp @@ -19,17 +19,21 @@ See the Mulan PSL v2 for more details. */ namespace common { -Sampler *&get_sampler() { +Sampler *&get_sampler() +{ static Sampler *g_sampler = new Sampler(); return g_sampler; } -Sampler::Sampler():random_() {} +Sampler::Sampler() : random_() +{} -Sampler::~Sampler() {} +Sampler::~Sampler() +{} -bool Sampler::sampling() { +bool Sampler::sampling() +{ int v = random_.next(RANGE_SIZE); if (v <= ratio_num_) { return true; @@ -38,9 +42,13 @@ bool Sampler::sampling() { } } -double Sampler::get_ratio() { return ratio_; } +double Sampler::get_ratio() +{ + return ratio_; +} -void Sampler::set_ratio(double ratio) { +void Sampler::set_ratio(double ratio) +{ if (0 <= ratio && ratio <= 1) { this->ratio_ = ratio; ratio_num_ = ratio * RANGE_SIZE; @@ -49,5 +57,4 @@ void Sampler::set_ratio(double ratio) { } } -}//namespace common - +} // namespace common diff --git a/deps/common/metrics/sampler.h b/deps/common/metrics/sampler.h index 6fe529c3da66e359430e42ce595a14cbeb5ee509..06c7047296cfd3567895b723ab30cd1bee86d401 100644 --- a/deps/common/metrics/sampler.h +++ b/deps/common/metrics/sampler.h @@ -19,7 +19,6 @@ See the Mulan PSL v2 for more details. */ namespace common { - /** * The most simple sample function */ @@ -40,5 +39,5 @@ private: }; Sampler *&get_sampler(); -} //namespace common -#endif //__COMMON_METRICS_SAMPLER_H__ +} // namespace common +#endif //__COMMON_METRICS_SAMPLER_H__ diff --git a/deps/common/metrics/snapshot.h b/deps/common/metrics/snapshot.h index 1aed7c5c75c187845f0d09da8758a17e9a2820a7..44664c8acaf8069ef9d0b4e759300204e07c616c 100644 --- a/deps/common/metrics/snapshot.h +++ b/deps/common/metrics/snapshot.h @@ -20,25 +20,30 @@ See the Mulan PSL v2 for more details. */ namespace common { - class Snapshot { public: - virtual ~Snapshot() {}; + virtual ~Snapshot(){}; virtual std::string to_string() = 0; }; template class SnapshotBasic : public Snapshot { public: - SnapshotBasic() : value(){ + SnapshotBasic() + : value(){ - }; + }; - virtual ~SnapshotBasic() {} + virtual ~SnapshotBasic() + {} - void setValue(T &input) { value = input; } + void setValue(T &input) + { + value = input; + } - std::string to_string() { + std::string to_string() + { std::string ret; val_to_str(value, ret); return ret; @@ -48,28 +53,31 @@ private: T value; }; -class SimplerTimerSnapshot: public Snapshot{ +class SimplerTimerSnapshot : public Snapshot { public: - SimplerTimerSnapshot() { + SimplerTimerSnapshot() + {} - } + virtual ~SimplerTimerSnapshot() + {} - virtual ~SimplerTimerSnapshot() {} - - void setValue(double mean, double tps) { + void setValue(double mean, double tps) + { this->mean = mean; this->tps = tps; } - std::string to_string() { + std::string to_string() + { std::stringstream oss; - oss << "mean:" << mean << ",tps:"<tps = tps; } +void TimerSnapshot::set_tps(double tps) +{ + this->tps = tps; +} -std::string TimerSnapshot::to_string() { +std::string TimerSnapshot::to_string() +{ std::stringstream oss; oss << HistogramSnapShot::to_string() << ",tps:" << tps; return oss.str(); } -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/metrics/timer_snapshot.h b/deps/common/metrics/timer_snapshot.h index ac40f10d777b9f172b417e8cc20272c541bb3cf5..fe074e3fac9b420877042ac3fc7c03f8380577be 100644 --- a/deps/common/metrics/timer_snapshot.h +++ b/deps/common/metrics/timer_snapshot.h @@ -27,8 +27,9 @@ public: void set_tps(double tps); std::string to_string(); + protected: double tps = 1.0; }; -}//namespace common -#endif //__COMMON_METRICS_TIMER_SNAPSHOT_H__ +} // namespace common +#endif //__COMMON_METRICS_TIMER_SNAPSHOT_H__ diff --git a/deps/common/metrics/uniform_reservoir.cpp b/deps/common/metrics/uniform_reservoir.cpp index 7b2a56662d66ac47f27ceaa4c139fa064649fc5d..c2c05e9555b4e66edf9fdcffdb7ee9f1aaf4300d 100644 --- a/deps/common/metrics/uniform_reservoir.cpp +++ b/deps/common/metrics/uniform_reservoir.cpp @@ -23,8 +23,8 @@ namespace common { #define DEFAULT_SIZE 1023 -UniformReservoir::UniformReservoir(RandomGenerator &random) - : Reservoir(random), counter(0) { +UniformReservoir::UniformReservoir(RandomGenerator &random) : Reservoir(random), counter(0) +{ pthread_mutexattr_t mutexatr; pthread_mutexattr_init(&mutexatr); pthread_mutexattr_settype(&mutexatr, PTHREAD_MUTEX_RECURSIVE); @@ -34,8 +34,8 @@ UniformReservoir::UniformReservoir(RandomGenerator &random) init(DEFAULT_SIZE); } -UniformReservoir::UniformReservoir(RandomGenerator &random, size_t size) - : Reservoir(random), counter(0) { +UniformReservoir::UniformReservoir(RandomGenerator &random, size_t size) : Reservoir(random), counter(0) +{ pthread_mutexattr_t mutexatr; pthread_mutexattr_init(&mutexatr); @@ -45,35 +45,40 @@ UniformReservoir::UniformReservoir(RandomGenerator &random, size_t size) init(size); } -UniformReservoir::~UniformReservoir() { +UniformReservoir::~UniformReservoir() +{ if (snapshot_value_ == NULL) { delete snapshot_value_; snapshot_value_ = NULL; } } -void UniformReservoir::init(size_t size) { +void UniformReservoir::init(size_t size) +{ MUTEX_LOCK(&mutex); counter = 0; data.resize(size); MUTEX_UNLOCK(&mutex); } -size_t UniformReservoir::size() { +size_t UniformReservoir::size() +{ MUTEX_LOCK(&mutex); size_t size = (counter < data.size()) ? counter : data.size(); MUTEX_UNLOCK(&mutex); return size; } -size_t UniformReservoir::get_count() { +size_t UniformReservoir::get_count() +{ MUTEX_LOCK(&mutex); size_t ret = counter; MUTEX_UNLOCK(&mutex); return ret; } -void UniformReservoir::update(double value) { +void UniformReservoir::update(double value) +{ MUTEX_LOCK(&mutex); size_t count = ++counter; @@ -87,7 +92,8 @@ void UniformReservoir::update(double value) { MUTEX_UNLOCK(&mutex); } -void UniformReservoir::snapshot() { +void UniformReservoir::snapshot() +{ MUTEX_LOCK(&mutex); std::vector output = data; MUTEX_UNLOCK(&mutex); @@ -98,7 +104,8 @@ void UniformReservoir::snapshot() { ((HistogramSnapShot *)snapshot_value_)->set_collection(output); } -void UniformReservoir::reset() { +void UniformReservoir::reset() +{ MUTEX_LOCK(&mutex); counter = 0; @@ -108,4 +115,4 @@ void UniformReservoir::reset() { MUTEX_UNLOCK(&mutex); } -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/metrics/uniform_reservoir.h b/deps/common/metrics/uniform_reservoir.h index 80eb1215318241fcb321f2b994e30c649b14e55b..1e30679c032238fcab2985a429f1c20780a8900d 100644 --- a/deps/common/metrics/uniform_reservoir.h +++ b/deps/common/metrics/uniform_reservoir.h @@ -38,8 +38,8 @@ public: virtual ~UniformReservoir(); public: - size_t size(); // data buffer size - size_t get_count(); // how many items have been insert? + size_t size(); // data buffer size + size_t get_count(); // how many items have been insert? void update(double one); void snapshot(); @@ -51,11 +51,11 @@ protected: protected: pthread_mutex_t mutex; - size_t counter; // counter is likely to be bigger than data.size() + size_t counter; // counter is likely to be bigger than data.size() std::vector data; RandomGenerator random; }; -} // namespace common +} // namespace common #endif /* __COMMON_METRICS_UNIFORM_RESERVOIR_H_ */ diff --git a/deps/common/mm/debug_new.cpp.skip b/deps/common/mm/debug_new.cpp.skip index ec55091f9620c5a714432ffcbc430af77c7da733..1f9e79b3db201db30cb6065c9cdad84ca0284ab5 100644 --- a/deps/common/mm/debug_new.cpp.skip +++ b/deps/common/mm/debug_new.cpp.skip @@ -58,7 +58,8 @@ static new_ptr_list_t *new_ptr_list[DEBUG_NEW_HASHTABLESIZE]; bool new_verbose_flag = false; bool new_autocheck_flag = true; -bool check_leaks() { +bool check_leaks() +{ bool fLeaked = false; for (int i = 0; i < DEBUG_NEW_HASHTABLESIZE; ++i) { new_ptr_list_t *ptr = new_ptr_list[i]; @@ -67,8 +68,10 @@ bool check_leaks() { fLeaked = true; while (ptr) { printf("Leaked object at %p (size %llu, %s:%d)\n", - (char *)ptr + sizeof(new_ptr_list_t), - (unsigned long long)ptr->size, ptr->file, ptr->line); + (char *)ptr + sizeof(new_ptr_list_t), + (unsigned long long)ptr->size, + ptr->file, + ptr->line); ptr = ptr->next; } } @@ -78,7 +81,8 @@ bool check_leaks() { return false; } -void *operator new(size_t size, const char *file, int line) { +void *operator new(size_t size, const char *file, int line) +{ size_t s = size + sizeof(new_ptr_list_t); new_ptr_list_t *ptr = (new_ptr_list_t *)malloc(s); if (ptr == NULL) { @@ -102,23 +106,33 @@ void *operator new(size_t size, const char *file, int line) { return pointer; } -void *operator new[](size_t size, const char *file, int line) { +void *operator new[](size_t size, const char *file, int line) +{ return operator new(size, file, line); } -void *operator new(size_t size) { return operator new(size, "", 0); } +void *operator new(size_t size) +{ + return operator new(size, "", 0); +} -void *operator new[](size_t size) { return operator new(size); } +void *operator new[](size_t size) +{ + return operator new(size); +} -void *operator new(size_t size, const std::nothrow_t &) throw() { +void *operator new(size_t size, const std::nothrow_t &) throw() +{ return operator new(size); } -void *operator new[](size_t size, const std::nothrow_t &) throw() { +void *operator new[](size_t size, const std::nothrow_t &) throw() +{ return operator new[](size); } -void operator delete(void *pointer) { +void operator delete(void *pointer) +{ if (pointer == NULL) return; size_t hash_index = DEBUG_NEW_HASH(pointer); @@ -142,7 +156,10 @@ void operator delete(void *pointer) { abort(); } -void operator delete[](void *pointer) { operator delete(pointer); } +void operator delete[](void *pointer) +{ + operator delete(pointer); +} // Some older compilers like Borland C++ Compiler 5.5.1 and Digital Mars // Compiler 8.29 do not support placement delete operators. @@ -151,31 +168,36 @@ void operator delete[](void *pointer) { operator delete(pointer); } // is thrown in the initialization (constructor) of a dynamically // created object. #ifndef NO_PLACEMENT_DELETE -void operator delete(void *pointer, const char *file, int line) { +void operator delete(void *pointer, const char *file, int line) +{ if (new_verbose_flag) - printf("info: exception thrown on initializing object at %p (%s:%d)\n", - pointer, file, line); + printf("info: exception thrown on initializing object at %p (%s:%d)\n", pointer, file, line); operator delete(pointer); } -void operator delete[](void *pointer, const char *file, int line) { +void operator delete[](void *pointer, const char *file, int line) +{ operator delete(pointer, file, line); } -void operator delete(void *pointer, const std::nothrow_t &) { +void operator delete(void *pointer, const std::nothrow_t &) +{ operator delete(pointer, "", 0); } -void operator delete[](void *pointer, const std::nothrow_t &) { +void operator delete[](void *pointer, const std::nothrow_t &) +{ operator delete(pointer, std::nothrow); } -#endif // NO_PLACEMENT_DELETE +#endif // NO_PLACEMENT_DELETE // Proxy class to automatically call check_leaks if new_autocheck_flag is set class new_check_t { public: - new_check_t() {} - ~new_check_t() { + new_check_t() + {} + ~new_check_t() + { if (new_autocheck_flag) { // Check for leakage. // If any leaks are found, set new_verbose_flag so that any diff --git a/deps/common/mm/debug_new.h b/deps/common/mm/debug_new.h index 1493562d5eaa0282498067a2f44cd9672850091c..761f152932a51bec7b036b166b0426353c15307f 100644 --- a/deps/common/mm/debug_new.h +++ b/deps/common/mm/debug_new.h @@ -26,8 +26,8 @@ void *operator new[](size_t size, const char *file, int line); #ifndef NO_PLACEMENT_DELETE void operator delete(void *pointer, const char *file, int line); void operator delete[](void *pointer, const char *file, int line); -#endif // NO_PLACEMENT_DELETE -void operator delete[](void *); // MSVC 6 requires this declaration +#endif // NO_PLACEMENT_DELETE +void operator delete[](void *); // MSVC 6 requires this declaration /* Macros */ #ifndef DEBUG_NEW_NO_NEW_REDEFINITION @@ -36,16 +36,16 @@ void operator delete[](void *); // MSVC 6 requires this declaration #define debug_new new #else #define debug_new new (__FILE__, __LINE__) -#endif // DEBUG_NEW_NO_NEW_REDEFINITION +#endif // DEBUG_NEW_NO_NEW_REDEFINITION #ifdef DEBUG_NEW_EMULATE_MALLOC #define malloc(s) ((void *)(debug_new char[s])) #define free(p) delete[](char *)(p) -#endif // DEBUG_NEW_EMULATE_MALLOC +#endif // DEBUG_NEW_EMULATE_MALLOC /* Control flags */ -extern bool new_verbose_flag; // default to false: no verbose information -extern bool new_autocheck_flag; // default to true: call check_leaks() on exit +extern bool new_verbose_flag; // default to false: no verbose information +extern bool new_autocheck_flag; // default to true: call check_leaks() on exit -} //namespace common -#endif // __COMMON_MM_DEBUG_NEW_H__ +} // namespace common +#endif // __COMMON_MM_DEBUG_NEW_H__ diff --git a/deps/common/mm/mem.cpp.skip b/deps/common/mm/mem.cpp.skip index ead70735dc8e6a10570b9192a18d71a49ff84709..f50cbf4379353d07af0bbddc7acab60af7a85a08 100644 --- a/deps/common/mm/mem.cpp.skip +++ b/deps/common/mm/mem.cpp.skip @@ -28,8 +28,8 @@ MemID *CLMemTrace::mMemIDs[MEM_HASHTABLE_SIZE] = {0}; bool CLMemTrace::mVerbose = false; ; -void *CLMemTrace::malloc(size_t size, const char *file, const int line, - bool retry) throw(std::bad_alloc) { +void *CLMemTrace::malloc(size_t size, const char *file, const int line, bool retry) throw(std::bad_alloc) +{ size_t allocSize = size + sizeof(MemID); void *usedPointer = NULL; @@ -81,8 +81,8 @@ void *CLMemTrace::malloc(size_t size, const char *file, const int line, return NULL; } -void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, - const int line) { +void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, const int line) +{ if (pointer == NULL && size == 0) { return NULL; } else if (pointer == NULL && size != 0) { @@ -172,8 +172,7 @@ void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, /** * Secondly, add the new one to table */ - u64_t newHashIndex = - (u64_t)MEM_ID_HASH((char *)pNewMemID + sizeof(MemID)); + u64_t newHashIndex = (u64_t)MEM_ID_HASH((char *)pNewMemID + sizeof(MemID)); pNewMemID->mNext = mMemIDs[newHashIndex]; mMemIDs[newHashIndex] = pNewMemID; @@ -182,8 +181,7 @@ void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, * Third, do memory copy * to simplify the old logic, copy memory here */ - memcpy((char *)pNewMemID + sizeof(MemID), - (char *)pFreeMemID + sizeof(MemID), pFreeMemID->mSize); + memcpy((char *)pNewMemID + sizeof(MemID), (char *)pFreeMemID + sizeof(MemID), pFreeMemID->mSize); break; } } @@ -191,9 +189,7 @@ void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, MUTEX_UNLOCK(&mMutex); if (foundOld == false) { - LOG_WARN( - "Something is wrong, the old pointer %p isn't found, so alloc new one", - pointer); + LOG_WARN("Something is wrong, the old pointer %p isn't found, so alloc new one", pointer); try { return malloc(size, file, line, false); } catch (std::bad_alloc &e) { @@ -203,8 +199,7 @@ void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, } if (mVerbose) { - LOG_INFO("Delete %p, file:%s, line:%u, size:%llu", pointer, oldMemID.mFile, - oldMemID.mLine, oldMemID.mSize); + LOG_INFO("Delete %p, file:%s, line:%u, size:%llu", pointer, oldMemID.mFile, oldMemID.mLine, oldMemID.mSize); } if (pFreeMemID) { @@ -215,8 +210,10 @@ void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, if (mVerbose) { LOG_INFO("Alloc %p, file:%s, line:%u, size:%llu", - (char *)pNewMemID + sizeof(MemID), pNewMemID->mFile, - pNewMemID->mLine, pNewMemID->mSize); + (char *)pNewMemID + sizeof(MemID), + pNewMemID->mFile, + pNewMemID->mLine, + pNewMemID->mSize); } return pNewMemID; } @@ -224,7 +221,8 @@ void *CLMemTrace::realloc(void *pointer, size_t size, const char *file, return NULL; } -void CLMemTrace::free(void *pointer) { +void CLMemTrace::free(void *pointer) +{ if (pointer == NULL) { LOG_WARN("Free one empty pointer"); return; @@ -260,8 +258,7 @@ void CLMemTrace::free(void *pointer) { if (pMemID) { if (mVerbose) { - LOG_INFO("Delete %p, file:%s, line:%u, size:%llu", pointer, pMemID->mFile, - pMemID->mLine, pMemID->mSize); + LOG_INFO("Delete %p, file:%s, line:%u, size:%llu", pointer, pMemID->mFile, pMemID->mLine, pMemID->mSize); } ::free(pMemID); return; @@ -273,7 +270,8 @@ void CLMemTrace::free(void *pointer) { return; } -std::new_handler CLMemTrace::getNewHandler() { +std::new_handler CLMemTrace::getNewHandler() +{ std::new_handler newHandler = NULL; MUTEX_LOCK(&mMutex); @@ -285,7 +283,8 @@ std::new_handler CLMemTrace::getNewHandler() { return newHandler; } -void CLMemTrace::output() { +void CLMemTrace::output() +{ for (int i = 0; i < MEM_HASHTABLE_SIZE; ++i) { // Don't lock outside of the loop // 1. avoid output too long to alloc/free memory @@ -298,8 +297,8 @@ void CLMemTrace::output() { } while (ptr) { // if LOG_INFO alloc memory, it will easy leading to dead lock - LOG_INFO("Exist %p, file:%s, line:%u, size:%llu", - (char *)ptr + sizeof(MemID), ptr->mFile, ptr->mLine, ptr->mSize); + LOG_INFO( + "Exist %p, file:%s, line:%u, size:%llu", (char *)ptr + sizeof(MemID), ptr->mFile, ptr->mLine, ptr->mSize); ptr = ptr->mNext; } @@ -307,23 +306,28 @@ void CLMemTrace::output() { } } -void *operator new(std::size_t size, const char *file, int line) { +void *operator new(std::size_t size, const char *file, int line) +{ return CLMemTrace::malloc(size, file, line, true); } -void *operator new[](std::size_t size, const char *file, int line) { +void *operator new[](std::size_t size, const char *file, int line) +{ return operator new(size, file, line); } -void *operator new(std::size_t size) throw(std::bad_alloc) { +void *operator new(std::size_t size) throw(std::bad_alloc) +{ return operator new(size, "", 0); } -void *operator new[](std::size_t size) throw(std::bad_alloc) { +void *operator new[](std::size_t size) throw(std::bad_alloc) +{ return operator new(size); } -void *operator new(std::size_t size, const std::nothrow_t &) throw() { +void *operator new(std::size_t size, const std::nothrow_t &) throw() +{ void *pointer = NULL; try { pointer = operator new(size); @@ -334,7 +338,8 @@ void *operator new(std::size_t size, const std::nothrow_t &) throw() { return pointer; } -void *operator new[](std::size_t size, const std::nothrow_t &) throw() { +void *operator new[](std::size_t size, const std::nothrow_t &) throw() +{ void *pointer = NULL; try { pointer = operator[] new(size); @@ -345,9 +350,15 @@ void *operator new[](std::size_t size, const std::nothrow_t &) throw() { return pointer; } -void operator delete(void *pointer) { CLMemTrace::free(pointer); } +void operator delete(void *pointer) +{ + CLMemTrace::free(pointer); +} -void operator delete[](void *pointer) { operator delete(pointer); } +void operator delete[](void *pointer) +{ + operator delete(pointer); +} // Some older compilers like Borland C++ Compiler 5.5.1 and Digital Mars // Compiler 8.29 do not support placement delete operators. @@ -355,23 +366,28 @@ void operator delete[](void *pointer) { operator delete(pointer); } // Also note that in that case memory leakage will occur if an exception // is thrown in the initialization (constructor) of a dynamically // created object. -void operator delete(void *pointer, const char *file, int line) { +void operator delete(void *pointer, const char *file, int line) +{ operator delete(pointer); } -void operator delete[](void *pointer, const char *file, int line) { +void operator delete[](void *pointer, const char *file, int line) +{ operator delete(pointer, file, line); } -void operator delete(void *pointer, const std::nothrow_t &) { +void operator delete(void *pointer, const std::nothrow_t &) +{ operator delete(pointer, "", 0); } -void operator delete[](void *pointer, const std::nothrow_t &) { +void operator delete[](void *pointer, const std::nothrow_t &) +{ operator delete(pointer, std::nothrow); } -void *Lcalloc(size_t nmemb, size_t size, const char *file, const int line) { +void *Lcalloc(size_t nmemb, size_t size, const char *file, const int line) +{ try { void *point = CLMemTrace::malloc(size * nmemb, file, line, false); if (point) { @@ -384,7 +400,8 @@ void *Lcalloc(size_t nmemb, size_t size, const char *file, const int line) { return pointer; } -void *Lmalloc(size_t size, const char *file, const int line) { +void *Lmalloc(size_t size, const char *file, const int line) +{ try { void *point = CLMemTrace::malloc(size, file, line, false); } catch (std::bad_alloc &e) { @@ -394,8 +411,12 @@ void *Lmalloc(size_t size, const char *file, const int line) { return pointer; } -void Lfree(void *ptr) { CLMemTrace::free(pointer); } -void *Lrealloc(void *ptr, size_t size, const char *file, const int line) { +void Lfree(void *ptr) +{ + CLMemTrace::free(pointer); +} +void *Lrealloc(void *ptr, size_t size, const char *file, const int line) +{ // simplify the logic return CLMemTrace::realloc(ptr, size, file, line); } diff --git a/deps/common/mm/mem.h b/deps/common/mm/mem.h index cf05231c5ebe2dbb1dfbb93d20c14dafa94852b6..7d001982c5592ac2f00ab5cad31e08afe98ceffd 100644 --- a/deps/common/mm/mem.h +++ b/deps/common/mm/mem.h @@ -31,8 +31,6 @@ namespace common { #else - - typedef struct MemID_t { public: const static int MEM_FILENAME_LEN = 32; @@ -44,12 +42,10 @@ public: class CLMemTrace { public: - static void *malloc(size_t size, const char *file, const int line, - bool retry = false) throw(std::bad_alloc); + static void *malloc(size_t size, const char *file, const int line, bool retry = false) throw(std::bad_alloc); // just use for realloc, same functionality as realloc - static void *realloc(void *ptr, size_t size, const char *file, - const int line); + static void *realloc(void *ptr, size_t size, const char *file, const int line); static void free(void *ptr); @@ -58,7 +54,10 @@ public: /** * set whether show every details */ - static void setVerbose(bool verbose) { mVerbose = verbose; } + static void setVerbose(bool verbose) + { + mVerbose = verbose; + } protected: static std::new_handler getNewHandler(); @@ -132,5 +131,5 @@ static void operator delete[](void *pointer); #endif /* MEM_DEBUG */ -} //namespace common +} // namespace common #endif /* __COMMON_MM_MEM_H__ */ diff --git a/deps/common/os/os.cpp b/deps/common/os/os.cpp index d09e6c3457df0c04f2b083bb308a450f088d642c..102fb1d1d49a5b71ba7de4970e65484dff8b97e9 100644 --- a/deps/common/os/os.cpp +++ b/deps/common/os/os.cpp @@ -21,7 +21,8 @@ See the Mulan PSL v2 for more details. */ namespace common { // Don't care windows -u32_t getCpuNum() { +u32_t getCpuNum() +{ return std::thread::hardware_concurrency(); } @@ -30,14 +31,13 @@ u32_t getCpuNum() { void print_stacktrace() { int size = MAX_STACK_SIZE; - void * array[MAX_STACK_SIZE]; + void *array[MAX_STACK_SIZE]; int stack_num = backtrace(array, size); - char ** stacktrace = backtrace_symbols(array, stack_num); - for (int i = 0; i < stack_num; ++i) - { + char **stacktrace = backtrace_symbols(array, stack_num); + for (int i = 0; i < stack_num; ++i) { LOG_INFO("%d ----- %s\n", i, stacktrace[i]); } free(stacktrace); } -}//namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/os/os.h b/deps/common/os/os.h index 6150d7b17809edb9f050b8faf026b9ec1b437883..0840d1eafcfed8ea6438b3671919c0bd26e56a79 100644 --- a/deps/common/os/os.h +++ b/deps/common/os/os.h @@ -20,5 +20,5 @@ u32_t getCpuNum(); void print_stacktrace(); -} //namespace common +} // namespace common #endif /* __COMMON_OS_OS_H__ */ diff --git a/deps/common/os/path.h b/deps/common/os/path.h index 8ba0d109b272ce2232e2416a1115b8409657fb6d..6b3d668615c733fed9d81dbca0bccdc6212f2721 100644 --- a/deps/common/os/path.h +++ b/deps/common/os/path.h @@ -18,7 +18,6 @@ See the Mulan PSL v2 for more details. */ #include namespace common { - /** * get file name from full path * example @@ -66,7 +65,7 @@ bool check_directory(std::string &path); * @param filter_pattern 示例 ^miniob.*bin$ * @return 成功返回找到的文件个数,否则返回-1 */ -int list_file(const char *path, const char *filter_pattern, std::vector &files); // io/io.h::getFileList +int list_file(const char *path, const char *filter_pattern, std::vector &files); // io/io.h::getFileList -} //namespace common -#endif //__COMMON_OS_PATH_H__ +} // namespace common +#endif //__COMMON_OS_PATH_H__ diff --git a/deps/common/os/pidfile.cpp b/deps/common/os/pidfile.cpp index e75e04ad5e48b190ef2848fa222c140c04b28d31..313b0e6a97b618b924ac6f6289e54af137a278cd 100644 --- a/deps/common/os/pidfile.cpp +++ b/deps/common/os/pidfile.cpp @@ -26,25 +26,26 @@ See the Mulan PSL v2 for more details. */ #include "common/os/pidfile.h" namespace common { -std::string& getPidPath() { +std::string &getPidPath() +{ static std::string path; return path; } -void setPidPath(const char *progName) { +void setPidPath(const char *progName) +{ std::string &path = getPidPath(); if (progName != NULL) { path = std::string(_PATH_TMP) + progName + ".pid"; - }else { + } else { path = ""; } - } - -int writePidFile(const char *progName) { +int writePidFile(const char *progName) +{ assert(progName); std::ofstream ostr; int rv = 1; @@ -58,15 +59,14 @@ int writePidFile(const char *progName) { rv = 0; } else { rv = errno; - std::cerr << "error opening PID file " << path.c_str() << SYS_OUTPUT_ERROR - << std::endl; + std::cerr << "error opening PID file " << path.c_str() << SYS_OUTPUT_ERROR << std::endl; } return rv; } - -void removePidFile(void) { +void removePidFile(void) +{ std::string path = getPidPath(); if (!path.empty()) { unlink(path.c_str()); @@ -75,5 +75,4 @@ void removePidFile(void) { return; } - -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/os/pidfile.h b/deps/common/os/pidfile.h index 089e6f3f756e36ecbf0f3f6df9f67fd09d45f5ef..629ee280df352dd526a4d980fa308878393b5d06 100644 --- a/deps/common/os/pidfile.h +++ b/deps/common/os/pidfile.h @@ -16,7 +16,6 @@ See the Mulan PSL v2 for more details. */ #define __COMMON_OS_PIDFILE_H__ namespace common { - //! Generates a PID file for the current component /** * Gets the process ID (PID) of the calling process and writes a file @@ -35,7 +34,7 @@ int writePidFile(const char *progName); */ void removePidFile(void); -std::string& getPidPath(); +std::string &getPidPath(); -} //namespace common -#endif // __COMMON_OS_PIDFILE_H__ +} // namespace common +#endif // __COMMON_OS_PIDFILE_H__ diff --git a/deps/common/os/process.cpp b/deps/common/os/process.cpp index 8d146b32362cf141c39222533c5e006ace3c7d35..c54fe09febb568a3c8a4d4c9e9fdb6e1d7eceb31 100644 --- a/deps/common/os/process.cpp +++ b/deps/common/os/process.cpp @@ -30,12 +30,13 @@ namespace common { #include #endif -#define MAX_ERR_OUTPUT 10000000 // 10M -#define MAX_STD_OUTPUT 10000000 // 10M +#define MAX_ERR_OUTPUT 10000000 // 10M +#define MAX_STD_OUTPUT 10000000 // 10M #define RWRR (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) -std::string get_process_name(const char *prog_name) { +std::string get_process_name(const char *prog_name) +{ std::string process_name; int buf_len = strlen(prog_name); @@ -44,8 +45,7 @@ std::string get_process_name(const char *prog_name) { char *buf = new char[buf_len + 1]; if (buf == NULL) { - std::cerr << "Failed to alloc memory for program name." - << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to alloc memory for program name." << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return ""; } memset(buf, 0, buf_len + 1); @@ -59,7 +59,8 @@ std::string get_process_name(const char *prog_name) { // Background the process by detaching it from the console and redirecting // std in, out, and err to /dev/null -int daemonize_service(bool close_std_streams) { +int daemonize_service(bool close_std_streams) +{ int nochdir = 1; int noclose = close_std_streams ? 0 : 1; int rc = daemon(nochdir, noclose); @@ -70,7 +71,8 @@ int daemonize_service(bool close_std_streams) { return rc; } -int daemonize_service(const char *std_out_file, const char *std_err_file) { +int daemonize_service(const char *std_out_file, const char *std_err_file) +{ int rc = daemonize_service(false); if (rc != 0) { @@ -83,7 +85,8 @@ int daemonize_service(const char *std_out_file, const char *std_err_file) { return 0; } -void sys_log_redirect(const char *std_out_file, const char *std_err_file) { +void sys_log_redirect(const char *std_out_file, const char *std_err_file) +{ int rc = 0; // Redirect stdin to /dev/null @@ -107,7 +110,7 @@ void sys_log_redirect(const char *std_out_file, const char *std_err_file) { std::string err_file = getAboslutPath(std_err_file); - // CWE367: A check occurs on a file's attributes before the file is + // CWE367: A check occurs on a file's attributes before the file is // used in a privileged operation, but things may have changed // Redirect stderr to std_err_file // struct stat st; @@ -117,16 +120,15 @@ void sys_log_redirect(const char *std_out_file, const char *std_err_file) { // std_err_flag |= O_TRUNC; // Remove old content if any. // } - std_err_flag |= O_TRUNC; // Remove old content if any. + std_err_flag |= O_TRUNC; // Remove old content if any. int errfd = open(err_file.c_str(), std_err_flag, RWRR); if (errfd >= 0) { dup2(errfd, STDERR_FILENO); close(errfd); } - setvbuf(stderr, NULL, _IONBF, 0); // Make sure stderr is not buffering - std::cerr << "Process " << getpid() << " built error output at " << tv.tv_sec - << std::endl; + setvbuf(stderr, NULL, _IONBF, 0); // Make sure stderr is not buffering + std::cerr << "Process " << getpid() << " built error output at " << tv.tv_sec << std::endl; std::string outFile = getAboslutPath(std_out_file); @@ -137,17 +139,16 @@ void sys_log_redirect(const char *std_out_file, const char *std_err_file) { // std_out_flag |= O_TRUNC; // Remove old content if any. // } - std_out_flag |= O_TRUNC; // Remove old content if any. + std_out_flag |= O_TRUNC; // Remove old content if any. int outfd = open(outFile.c_str(), std_out_flag, RWRR); if (outfd >= 0) { dup2(outfd, STDOUT_FILENO); close(outfd); } - setvbuf(stdout, NULL, _IONBF, 0); // Make sure stdout not buffering - std::cout << "Process " << getpid() << " built standard output at " - << tv.tv_sec << std::endl; + setvbuf(stdout, NULL, _IONBF, 0); // Make sure stdout not buffering + std::cout << "Process " << getpid() << " built standard output at " << tv.tv_sec << std::endl; return; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/os/process.h b/deps/common/os/process.h index 2d97b9fa400a8b19bdfe6aa8bcc22b91fb816fe4..15db7eabd2a69d339d682347fa0afa21a13abb06 100644 --- a/deps/common/os/process.h +++ b/deps/common/os/process.h @@ -16,7 +16,6 @@ See the Mulan PSL v2 for more details. */ #define __COMMON_OS_PROCESS_H__ namespace common { - //! Get process Name /** * @param[in] prog_full_name process full name with full path @@ -43,5 +42,5 @@ int daemonize_service(const char *std_out_file, const char *std_err_file); void sys_log_redirect(const char *std_out_file, const char *std_err_file); -} //namespace common -#endif //__COMMON_OS_PROCESS_H__ +} // namespace common +#endif //__COMMON_OS_PROCESS_H__ diff --git a/deps/common/os/process_param.cpp b/deps/common/os/process_param.cpp index 14c73948f106b4899c8110f5966d812552cc5bd8..a3606c59223de9fa00b1f59954fec357f02c0b50 100644 --- a/deps/common/os/process_param.cpp +++ b/deps/common/os/process_param.cpp @@ -17,14 +17,15 @@ See the Mulan PSL v2 for more details. */ namespace common { //! Global process config -ProcessParam*& the_process_param() +ProcessParam *&the_process_param() { - static ProcessParam* process_cfg = new ProcessParam(); + static ProcessParam *process_cfg = new ProcessParam(); return process_cfg; } -void ProcessParam::init_default(std::string &process_name) { +void ProcessParam::init_default(std::string &process_name) +{ assert(process_name.empty() == false); this->process_name_ = process_name; if (std_out_.empty()) { @@ -40,6 +41,4 @@ void ProcessParam::init_default(std::string &process_name) { demon = false; } - - -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/os/process_param.h b/deps/common/os/process_param.h index 280d8605b1fe7fb879ae1aa9833311d7e0c91781..614beb846213278471dc2425e90cbb1a7bc7af15 100644 --- a/deps/common/os/process_param.h +++ b/deps/common/os/process_param.h @@ -21,69 +21,107 @@ namespace common { class ProcessParam { - public: - ProcessParam() {} +public: + ProcessParam() + {} - virtual ~ProcessParam() {} + virtual ~ProcessParam() + {} void init_default(std::string &process_name); - const std::string &get_std_out() const { return std_out_; } + const std::string &get_std_out() const + { + return std_out_; + } - void set_std_out(const std::string &std_out) { ProcessParam::std_out_ = std_out; } + void set_std_out(const std::string &std_out) + { + ProcessParam::std_out_ = std_out; + } - const std::string &get_std_err() const { return std_err_; } + const std::string &get_std_err() const + { + return std_err_; + } - void set_std_err(const std::string &std_err) { ProcessParam::std_err_ = std_err; } + void set_std_err(const std::string &std_err) + { + ProcessParam::std_err_ = std_err; + } - const std::string &get_conf() const { return conf; } + const std::string &get_conf() const + { + return conf; + } - void set_conf(const std::string &conf) { ProcessParam::conf = conf; } + void set_conf(const std::string &conf) + { + ProcessParam::conf = conf; + } - const std::string &get_process_name() const { return process_name_; } + const std::string &get_process_name() const + { + return process_name_; + } - void set_process_name(const std::string &processName) { + void set_process_name(const std::string &processName) + { ProcessParam::process_name_ = processName; } - bool is_demon() const { return demon; } + bool is_demon() const + { + return demon; + } - void set_demon(bool demon) { ProcessParam::demon = demon; } + void set_demon(bool demon) + { + ProcessParam::demon = demon; + } - const std::vector &get_args() const { return args; } + const std::vector &get_args() const + { + return args; + } - void set_args(const std::vector &args) { + void set_args(const std::vector &args) + { ProcessParam::args = args; } - void set_server_port(int port) { + void set_server_port(int port) + { server_port_ = port; } - int get_server_port() const { + int get_server_port() const + { return server_port_; } - void set_unix_socket_path(const char *unix_socket_path) { + void set_unix_socket_path(const char *unix_socket_path) + { unix_socket_path_ = unix_socket_path; } - - const std::string &get_unix_socket_path() const { + + const std::string &get_unix_socket_path() const + { return unix_socket_path_; } - private: - std::string std_out_; // The output file - std::string std_err_; // The err output file - std::string conf; // The configuration file - std::string process_name_; // The process name - bool demon = false; // whether demon or not - std::vector args; // arguments - int server_port_ = -1; // server port(if valid, will overwrite the port in the config file) +private: + std::string std_out_; // The output file + std::string std_err_; // The err output file + std::string conf; // The configuration file + std::string process_name_; // The process name + bool demon = false; // whether demon or not + std::vector args; // arguments + int server_port_ = -1; // server port(if valid, will overwrite the port in the config file) std::string unix_socket_path_; }; -ProcessParam*& the_process_param(); +ProcessParam *&the_process_param(); -} //namespace common -#endif //__COMMON_OS_PROCESS_PARAM_H__ +} // namespace common +#endif //__COMMON_OS_PROCESS_PARAM_H__ diff --git a/deps/common/os/signal.cpp b/deps/common/os/signal.cpp index e20d293034da69f2d6356fcbd9c73f47f3fc2a98..a4b2c674995253e3db1468ef7c0fe3136dec9661 100644 --- a/deps/common/os/signal.cpp +++ b/deps/common/os/signal.cpp @@ -17,29 +17,31 @@ See the Mulan PSL v2 for more details. */ #include "pthread.h" namespace common { -void setSignalHandler(int sig, sighandler_t func) { +void setSignalHandler(int sig, sighandler_t func) +{ struct sigaction newsa, oldsa; sigemptyset(&newsa.sa_mask); newsa.sa_flags = 0; newsa.sa_handler = func; int rc = sigaction(sig, &newsa, &oldsa); if (rc) { - std::cerr << "Failed to set signal " << sig << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to set signal " << sig << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; } } /* ** Set Singal handling Fucntion */ -void setSignalHandler(sighandler_t func) { +void setSignalHandler(sighandler_t func) +{ setSignalHandler(SIGQUIT, func); setSignalHandler(SIGINT, func); setSignalHandler(SIGHUP, func); setSignalHandler(SIGTERM, func); } -void blockDefaultSignals(sigset_t *signal_set, sigset_t *old_set) { +void blockDefaultSignals(sigset_t *signal_set, sigset_t *old_set) +{ sigemptyset(signal_set); #ifndef DEBUG // SIGINT will effect our gdb debugging @@ -50,7 +52,8 @@ void blockDefaultSignals(sigset_t *signal_set, sigset_t *old_set) { pthread_sigmask(SIG_BLOCK, signal_set, old_set); } -void unBlockDefaultSignals(sigset_t *signal_set, sigset_t *old_set) { +void unBlockDefaultSignals(sigset_t *signal_set, sigset_t *old_set) +{ sigemptyset(signal_set); #ifndef DEBUG sigaddset(signal_set, SIGINT); @@ -60,7 +63,8 @@ void unBlockDefaultSignals(sigset_t *signal_set, sigset_t *old_set) { pthread_sigmask(SIG_UNBLOCK, signal_set, old_set); } -void *waitForSignals(void *args) { +void *waitForSignals(void *args) +{ LOG_INFO("Start thread to wait signals."); sigset_t *signal_set = (sigset_t *)args; int sig_number = -1; @@ -68,7 +72,7 @@ void *waitForSignals(void *args) { errno = 0; int ret = sigwait(signal_set, &sig_number); LOG_INFO("sigwait return value: %d, %d \n", ret, sig_number); - if (ret != 0) { + if (ret != 0) { char errstr[256]; strerror_r(errno, errstr, sizeof(errstr)); LOG_ERROR("error (%d) %s\n", errno, errstr); @@ -77,7 +81,8 @@ void *waitForSignals(void *args) { return NULL; } -void startWaitForSignals(sigset_t *signal_set) { +void startWaitForSignals(sigset_t *signal_set) +{ pthread_t pThread; pthread_attr_t pThreadAttrs; @@ -86,6 +91,5 @@ void startWaitForSignals(sigset_t *signal_set) { pthread_attr_setdetachstate(&pThreadAttrs, PTHREAD_CREATE_DETACHED); pthread_create(&pThread, &pThreadAttrs, waitForSignals, (void *)signal_set); - } -} // namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/os/signal.h b/deps/common/os/signal.h index 40e11223e4260902eb5d0202341fa91445a1bf24..70801f16e204ca3a216e3913df8b9ffec856c2c6 100644 --- a/deps/common/os/signal.h +++ b/deps/common/os/signal.h @@ -30,7 +30,6 @@ void blockDefaultSignals(sigset_t *signal_set, sigset_t *old_set); */ void unBlockDefaultSignals(sigset_t *signal_set, sigset_t *old_set); - void *waitForSignals(sigset_t *signal_set); void startWaitForSignals(sigset_t *signal_set); @@ -42,5 +41,5 @@ typedef void (*sighandler_t)(int); void setSignalHandler(sighandler_t func); void setSignalHandler(int sig, sighandler_t func); -} //namespace common +} // namespace common #endif /* __COMMON_OS_SIGNAL_H__ */ diff --git a/deps/common/seda/callback.cpp b/deps/common/seda/callback.cpp index 99f197a234d1d73426c9ae49f79a5d7dd326ab8a..afee205cec8a8a8734cbfe72e417effaf9a331c0 100644 --- a/deps/common/seda/callback.cpp +++ b/deps/common/seda/callback.cpp @@ -33,11 +33,12 @@ extern bool &get_event_history_flag(); // Constructor CompletionCallback::CompletionCallback(Stage *trgt, CallbackContext *ctx) - : target_stage_(trgt), context_(ctx), next_cb_(NULL), - ev_hist_flag_(get_event_history_flag()) {} + : target_stage_(trgt), context_(ctx), next_cb_(NULL), ev_hist_flag_(get_event_history_flag()) +{} // Destructor -CompletionCallback::~CompletionCallback() { +CompletionCallback::~CompletionCallback() +{ if (context_) { delete context_; } @@ -47,14 +48,16 @@ CompletionCallback::~CompletionCallback() { } // Push onto a callback stack -void CompletionCallback::push_callback(CompletionCallback *next) { +void CompletionCallback::push_callback(CompletionCallback *next) +{ ASSERT((!next_cb_), "%s", "cannot push a callback twice"); next_cb_ = next; } // Pop off of a callback stack -CompletionCallback *CompletionCallback::pop_callback() { +CompletionCallback *CompletionCallback::pop_callback() +{ CompletionCallback *ret_val = next_cb_; next_cb_ = NULL; @@ -62,7 +65,8 @@ CompletionCallback *CompletionCallback::pop_callback() { } // One event is complete -void CompletionCallback::event_done(StageEvent *ev) { +void CompletionCallback::event_done(StageEvent *ev) +{ if (ev_hist_flag_) { ev->save_stage(target_stage_, StageEvent::CALLBACK_EV); @@ -71,11 +75,13 @@ void CompletionCallback::event_done(StageEvent *ev) { } // Reschedule callback on target stage thread -void CompletionCallback::event_reschedule(StageEvent *ev) { +void CompletionCallback::event_reschedule(StageEvent *ev) +{ target_stage_->add_event(ev); } -void CompletionCallback::event_timeout(StageEvent *ev) { +void CompletionCallback::event_timeout(StageEvent *ev) +{ LOG_DEBUG("to call event_timeout for stage %s", target_stage_->get_name()); if (ev_hist_flag_) { ev->save_stage(target_stage_, StageEvent::TIMEOUT_EV); @@ -83,4 +89,4 @@ void CompletionCallback::event_timeout(StageEvent *ev) { target_stage_->timeout_event(ev, context_); } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/callback.h b/deps/common/seda/callback.h index b95a68ab22c7e1b0b92e24baeb84bc688195e4f1..0f6372b8077b5ac6ede3cd013bfbaa042dba9f89 100644 --- a/deps/common/seda/callback.h +++ b/deps/common/seda/callback.h @@ -59,7 +59,7 @@ class CompletionCallback { // public interface operations - public: +public: // Constructor CompletionCallback(Stage *trgt, CallbackContext *ctx = NULL); @@ -84,12 +84,12 @@ class CompletionCallback { // Complete this event if it has timed out void event_timeout(StageEvent *ev); - protected: +protected: // implementation state - Stage *target_stage_; // stage which is setting this callback - CallbackContext *context_; // argument to pass when invoking cb - CompletionCallback *next_cb_; // next event in the chain + Stage *target_stage_; // stage which is setting this callback + CallbackContext *context_; // argument to pass when invoking cb + CompletionCallback *next_cb_; // next event in the chain bool ev_hist_flag_; // true if event histories are enabled }; @@ -101,19 +101,25 @@ class CompletionCallback { * callback context class from this base. */ class CallbackContext { - public: - virtual ~CallbackContext() {} +public: + virtual ~CallbackContext() + {} }; class CallbackContextEvent : public CallbackContext { - public: - CallbackContextEvent(StageEvent *event = NULL) : ev_(event) {} - ~CallbackContextEvent() {} - StageEvent *get_event() { return ev_; } - - private: +public: + CallbackContextEvent(StageEvent *event = NULL) : ev_(event) + {} + ~CallbackContextEvent() + {} + StageEvent *get_event() + { + return ev_; + } + +private: StageEvent *ev_; }; -} //namespace common -#endif // __COMMON_SEDA_CALLBACK_H__ +} // namespace common +#endif // __COMMON_SEDA_CALLBACK_H__ diff --git a/deps/common/seda/class_factory.h b/deps/common/seda/class_factory.h index 65481e086e05d41fe042a9fa52b7e4cf5550bf83..8b5582499182d0ca303622f22c09972211146623 100644 --- a/deps/common/seda/class_factory.h +++ b/deps/common/seda/class_factory.h @@ -21,7 +21,6 @@ See the Mulan PSL v2 for more details. */ #include "common/log/log.h" namespace common { - /** * A class to construct arbitrary subclass instances * @@ -42,10 +41,10 @@ namespace common { * with static linkage in a global initialization routine. */ -template +template class ClassFactory { - public: +public: typedef T *(*FactoryFunc)(const std::string &); /** @@ -69,13 +68,13 @@ class ClassFactory { */ static T *make_instance(const std::string &tag); - private: +private: // Accessor function that gets the head of the factory list static ClassFactory *&fact_list_head(); - std::string identifier_; // identifier for this factory + std::string identifier_; // identifier for this factory FactoryFunc fact_func_; // factory function for this class - ClassFactory *next_; // next factory in global list + ClassFactory *next_; // next factory in global list }; /** @@ -88,8 +87,9 @@ class ClassFactory { * as static. C++ guarantees that the first time the function is * invoked (from anywhere) the static local will be initialized. */ -template -ClassFactory *&ClassFactory::fact_list_head() { +template +ClassFactory *&ClassFactory::fact_list_head() +{ static ClassFactory *fact_list = NULL; return fact_list; } @@ -99,16 +99,17 @@ ClassFactory *&ClassFactory::fact_list_head() { * Implementation notes: * constructor places current instance on the global factory list. */ -template -ClassFactory::ClassFactory(const std::string &tag, FactoryFunc func) - : identifier_(tag), fact_func_(func) { +template +ClassFactory::ClassFactory(const std::string &tag, FactoryFunc func) : identifier_(tag), fact_func_(func) +{ next_ = fact_list_head(); fact_list_head() = this; } // Destructor -template -ClassFactory::~ClassFactory() {} +template +ClassFactory::~ClassFactory() +{} /** * Construct an instance of a specified sub-class @@ -116,8 +117,9 @@ ClassFactory::~ClassFactory() {} * scan global list to find matching tag and use the factory func to * create an instance. */ -template -T *ClassFactory::make_instance(const std::string &tag) { +template +T *ClassFactory::make_instance(const std::string &tag) +{ T *instance = NULL; ClassFactory *current = fact_list_head(); @@ -136,5 +138,5 @@ T *ClassFactory::make_instance(const std::string &tag) { return instance; } -} //namespace common -#endif // __COMMON_SEDA_CLASS_FACTORY_H__ +} // namespace common +#endif // __COMMON_SEDA_CLASS_FACTORY_H__ diff --git a/deps/common/seda/event_dispatcher.cpp b/deps/common/seda/event_dispatcher.cpp index 6b9e6cc9adaea9d2dbd049cd06e864c5233b45d5..59d6dc4662236f91c53c26f58ccb40b6c2f18515 100644 --- a/deps/common/seda/event_dispatcher.cpp +++ b/deps/common/seda/event_dispatcher.cpp @@ -17,8 +17,8 @@ See the Mulan PSL v2 for more details. */ namespace common { // Constructor -EventDispatcher::EventDispatcher(const char *tag) - : Stage(tag), event_store_(), next_stage_(NULL) { +EventDispatcher::EventDispatcher(const char *tag) : Stage(tag), event_store_(), next_stage_(NULL) +{ LOG_TRACE("enter\n"); pthread_mutexattr_t attr; @@ -31,7 +31,8 @@ EventDispatcher::EventDispatcher(const char *tag) } // Destructor -EventDispatcher::~EventDispatcher() { +EventDispatcher::~EventDispatcher() +{ LOG_TRACE("enter\n"); pthread_mutex_destroy(&event_lock_); LOG_TRACE("exit\n"); @@ -42,7 +43,8 @@ EventDispatcher::~EventDispatcher() { * Check if the event can be dispatched. If not, hash it and store * it. If so, send it on to the next stage. */ -void EventDispatcher::handle_event(StageEvent *event) { +void EventDispatcher::handle_event(StageEvent *event) +{ LOG_TRACE("enter\n"); std::string hash; @@ -67,7 +69,8 @@ void EventDispatcher::handle_event(StageEvent *event) { } // Initialize stage params and validate outputs -bool EventDispatcher::initialize() { +bool EventDispatcher::initialize() +{ bool ret_val = true; if (next_stage_list_.size() != 1) { @@ -82,15 +85,15 @@ bool EventDispatcher::initialize() { * Cleanup stage after disconnection * Call done() on any events left over in the event_store_. */ -void EventDispatcher::cleanup() { +void EventDispatcher::cleanup() +{ pthread_mutex_lock(&event_lock_); // for each hash chain... for (EventHash::iterator i = event_store_.begin(); i != event_store_.end(); i++) { // for each event on the chain - for (std::list::iterator j = i->second.begin(); - j != i->second.end(); j++) { + for (std::list::iterator j = i->second.begin(); j != i->second.end(); j++) { j->first->done(); } i->second.clear(); @@ -101,7 +104,8 @@ void EventDispatcher::cleanup() { } // Wake up a stored event -bool EventDispatcher::wakeup_event(std::string hashkey) { +bool EventDispatcher::wakeup_event(std::string hashkey) +{ bool sent = false; EventHash::iterator i; @@ -131,4 +135,4 @@ bool EventDispatcher::wakeup_event(std::string hashkey) { return sent; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/event_dispatcher.h b/deps/common/seda/event_dispatcher.h index 44346ad48d197d0f208d1a7e3ce83c8bf0c91d07..5f93ae56cec6835701fc78e54aff42ec28a23677 100644 --- a/deps/common/seda/event_dispatcher.h +++ b/deps/common/seda/event_dispatcher.h @@ -55,7 +55,7 @@ class EventDispatcher : public Stage { // public interface operations - public: +public: typedef enum { SEND_EVENT = 0, STORE_EVENT, FAIL_EVENT } status_t; /** @@ -77,7 +77,7 @@ class EventDispatcher : public Stage { // Note, EventDispatcher is an abstract class and needs no make_stage() - protected: +protected: /** * Constructor * @param[in] tag The label that identifies this stage. @@ -96,7 +96,10 @@ class EventDispatcher : public Stage { bool initialize(); // set properties for this object - bool set_properties() { return true; } + bool set_properties() + { + return true; + } /** * Cleanup stage after disconnection @@ -119,8 +122,7 @@ class EventDispatcher : public Stage { * FAIL_EVENT if failure, and event has been completed; * ctx is NULL */ - virtual status_t dispatch_event(StageEvent *ev, DispatchContext *&ctx, - std::string &hash) = 0; + virtual status_t dispatch_event(StageEvent *ev, DispatchContext *&ctx, std::string &hash) = 0; /** * Wake up a stored event @@ -136,11 +138,11 @@ class EventDispatcher : public Stage { typedef std::pair StoredEvent; typedef std::map> EventHash; - EventHash event_store_; // events stored here while waiting - pthread_mutex_t event_lock_; // protects access to event_store_ - Stage *next_stage_; // target for dispatched events + EventHash event_store_; // events stored here while waiting + pthread_mutex_t event_lock_; // protects access to event_store_ + Stage *next_stage_; // target for dispatched events - protected: +protected: }; /** @@ -148,9 +150,10 @@ class EventDispatcher : public Stage { * derive from this base class. */ class DispatchContext { - public: - virtual ~DispatchContext() {} +public: + virtual ~DispatchContext() + {} }; -} //namespace common -#endif // __COMMON_SEDA_EVENT_DISPATCHER_H__ +} // namespace common +#endif // __COMMON_SEDA_EVENT_DISPATCHER_H__ diff --git a/deps/common/seda/example_stage.cpp b/deps/common/seda/example_stage.cpp index 371dd94c51216f67b57f02c53ff3189cea1c7cd5..bf8411086b1236925a29245509f0fec50ebd42b4 100644 --- a/deps/common/seda/example_stage.cpp +++ b/deps/common/seda/example_stage.cpp @@ -26,13 +26,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; // Constructor -ExampleStage::ExampleStage(const char *tag) : Stage(tag) {} +ExampleStage::ExampleStage(const char *tag) : Stage(tag) +{} // Destructor -ExampleStage::~ExampleStage() {} +ExampleStage::~ExampleStage() +{} // Parse properties, instantiate a stage object -Stage *ExampleStage::make_stage(const std::string &tag) { +Stage *ExampleStage::make_stage(const std::string &tag) +{ ExampleStage *stage = new ExampleStage(tag.c_str()); if (stage == NULL) { LOG_ERROR("new ExampleStage failed"); @@ -43,7 +46,8 @@ Stage *ExampleStage::make_stage(const std::string &tag) { } // Set properties for this object set in stage specific properties -bool ExampleStage::set_properties() { +bool ExampleStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -56,7 +60,8 @@ bool ExampleStage::set_properties() { } // Initialize stage params and validate outputs -bool ExampleStage::initialize() { +bool ExampleStage::initialize() +{ LOG_TRACE("Enter"); // std::list::iterator stgp = next_stage_list_.begin(); @@ -68,20 +73,23 @@ bool ExampleStage::initialize() { } // Cleanup after disconnection -void ExampleStage::cleanup() { +void ExampleStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void ExampleStage::handle_event(StageEvent *event) { +void ExampleStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); LOG_TRACE("Exit\n"); return; } -void ExampleStage::callback_event(StageEvent *event, CallbackContext *context) { +void ExampleStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); LOG_TRACE("Exit\n"); diff --git a/deps/common/seda/example_stage.h b/deps/common/seda/example_stage.h index 50e34b35479d5cfb3ae95d0b367c021143cdb42e..41a7f5df7dcaea300099b9513439ce1e37ca6acd 100644 --- a/deps/common/seda/example_stage.h +++ b/deps/common/seda/example_stage.h @@ -33,7 +33,6 @@ protected: void cleanup(); void handle_event(StageEvent *event); void callback_event(StageEvent *event, CallbackContext *context); - }; -} // namespace common -#endif //__COMMON_SEDA_EXAMPLE_STAGE_H__ +} // namespace common +#endif //__COMMON_SEDA_EXAMPLE_STAGE_H__ diff --git a/deps/common/seda/init.cpp b/deps/common/seda/init.cpp index 0f169205ee2b56f6f44971d395e5e7629c0c7c88..8eb1d9371a57f9a052220e6f977eab5c5558b39c 100644 --- a/deps/common/seda/init.cpp +++ b/deps/common/seda/init.cpp @@ -37,17 +37,15 @@ See the Mulan PSL v2 for more details. */ #include "common/seda/timer_stage.h" namespace common { - -int init_seda(ProcessParam *process_cfg) { +int init_seda(ProcessParam *process_cfg) +{ // Initialize the static data structures of threadpool Threadpool::create_pool_key(); // initialize class factory instances here - static StageFactory kill_thread_factory("KillThreads", - &KillThreadStage::make_stage); + static StageFactory kill_thread_factory("KillThreads", &KillThreadStage::make_stage); static StageFactory timer_factory("TimerStage", &TimerStage::make_stage); - static StageFactory seda_stats_factory("MetricsStage", - &MetricsStage::make_stage); + static StageFactory seda_stats_factory("MetricsStage", &MetricsStage::make_stage); // try to parse the seda configuration files SedaConfig *config = SedaConfig::get_instance(); @@ -55,16 +53,14 @@ int init_seda(ProcessParam *process_cfg) { config_stat = config->parse(); if (config_stat != SedaConfig::SUCCESS) { - LOG_ERROR("Error: unable to parse file %s", - process_cfg->get_process_name().c_str()); + LOG_ERROR("Error: unable to parse file %s", process_cfg->get_process_name().c_str()); return errno; } // Log a message to indicate that we are restarting, when looking // at a log we can see if mmon is restarting us because we keep // crashing. - LOG_INFO("(Re)Starting State: Pid: %u Time: %s", (unsigned int)getpid(), - DateTime::now().to_string_local().c_str()); + LOG_INFO("(Re)Starting State: Pid: %u Time: %s", (unsigned int)getpid(), DateTime::now().to_string_local().c_str()); LOG_INFO("The process Name is %s", process_cfg->get_process_name().c_str()); // try to initialize the seda configuration @@ -79,10 +75,11 @@ int init_seda(ProcessParam *process_cfg) { return 0; } -void cleanup_seda() { +void cleanup_seda() +{ SedaConfig *seda_config = SedaConfig::get_instance(); delete seda_config; SedaConfig::get_instance() = NULL; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/init.h b/deps/common/seda/init.h index 4b864024278f58bbc039037051d1f70a22feed02..640c04c9259715d6b211d9389910e8991ae88cd4 100644 --- a/deps/common/seda/init.h +++ b/deps/common/seda/init.h @@ -36,5 +36,5 @@ int init_seda(ProcessParam *process_cfg); void cleanup_seda(); -} //namespace common -#endif // __COMMON_SEDA_INIT_H__ +} // namespace common +#endif // __COMMON_SEDA_INIT_H__ diff --git a/deps/common/seda/kill_thread.cpp b/deps/common/seda/kill_thread.cpp index bb2f0d6e67839f3ea06e3e95e46b3f37cb1c383f..0e63ebb5128e12b73fbde6fdc075c3ddb1bf901e 100644 --- a/deps/common/seda/kill_thread.cpp +++ b/deps/common/seda/kill_thread.cpp @@ -19,14 +19,14 @@ See the Mulan PSL v2 for more details. */ #include "common/seda/thread_pool.h" namespace common { - /** * Notify the pool and kill the thread * @param[in] event Pointer to event that must be handled. * * @post Call never returns. Thread is killed. Pool is notified. */ -void KillThreadStage::handle_event(StageEvent *event) { +void KillThreadStage::handle_event(StageEvent *event) +{ get_pool()->thread_kill(); event->done(); this->release_event(); @@ -39,13 +39,15 @@ void KillThreadStage::handle_event(StageEvent *event) { * @post initializing the class members * @return the class object */ -Stage *KillThreadStage::make_stage(const std::string &tag) { +Stage *KillThreadStage::make_stage(const std::string &tag) +{ return new KillThreadStage(tag.c_str()); } -bool KillThreadStage::set_properties() { +bool KillThreadStage::set_properties() +{ // nothing to do return true; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/kill_thread.h b/deps/common/seda/kill_thread.h index 35a8a92e7d9c57efd2c0716ccea8dc2766e4b821..5bda3c002354abaf01096af46ad02a92c836faef 100644 --- a/deps/common/seda/kill_thread.h +++ b/deps/common/seda/kill_thread.h @@ -21,7 +21,6 @@ See the Mulan PSL v2 for more details. */ #include "common/seda/stage.h" namespace common { - /** * @file * @author Longda @@ -56,7 +55,8 @@ protected: * @post event queue is empty * @post stage is not connected */ - KillThreadStage(const char *tag) : Stage(tag) {} + KillThreadStage(const char *tag) : Stage(tag) + {} /** * Notify the pool and kill the thread @@ -70,7 +70,10 @@ protected: * Handle the callback * Nothing special for callbacks in this stage. */ - void callback_event(StageEvent *event, CallbackContext *context) { return; } + void callback_event(StageEvent *event, CallbackContext *context) + { + return; + } /** * Initialize stage params @@ -79,7 +82,10 @@ protected: * @pre Stage not connected * @return true */ - bool initialize() { return true; } + bool initialize() + { + return true; + } /** * set properties for this object @@ -92,5 +98,5 @@ protected: friend class Threadpool; }; -} //namespace common -#endif // __COMMON_SEDA_KILL_THREAD_H__ +} // namespace common +#endif // __COMMON_SEDA_KILL_THREAD_H__ diff --git a/deps/common/seda/metrics_report_event.h b/deps/common/seda/metrics_report_event.h index 4290b5982dddb89c46c1200d6507703b16fc20e4..d2fecb0a10bbe202e3869ac220095bd40d5da256 100644 --- a/deps/common/seda/metrics_report_event.h +++ b/deps/common/seda/metrics_report_event.h @@ -28,5 +28,5 @@ public: }; }; -} //namespace common -#endif //__COMMON_SEDA_METRICS_REPORT_EVENT_H__ +} // namespace common +#endif //__COMMON_SEDA_METRICS_REPORT_EVENT_H__ diff --git a/deps/common/seda/metrics_stage.cpp b/deps/common/seda/metrics_stage.cpp index cf59be8282a4c617419b1ca95e61c07b9ebe8964..bc3ec6cb7603b937f78d6e392cd2956c327e7bc7 100644 --- a/deps/common/seda/metrics_stage.cpp +++ b/deps/common/seda/metrics_stage.cpp @@ -29,20 +29,24 @@ See the Mulan PSL v2 for more details. */ using namespace common; -MetricsRegistry &get_metric_registry() { +MetricsRegistry &get_metric_registry() +{ static MetricsRegistry metrics_registry; return metrics_registry; } // Constructor -MetricsStage::MetricsStage(const char *tag) : Stage(tag) {} +MetricsStage::MetricsStage(const char *tag) : Stage(tag) +{} // Destructor -MetricsStage::~MetricsStage() {} +MetricsStage::~MetricsStage() +{} // Parse properties, instantiate a stage object -Stage *MetricsStage::make_stage(const std::string &tag) { +Stage *MetricsStage::make_stage(const std::string &tag) +{ MetricsStage *stage = new MetricsStage(tag.c_str()); if (stage == NULL) { LOG_ERROR("new MetricsStage failed"); @@ -53,10 +57,10 @@ Stage *MetricsStage::make_stage(const std::string &tag) { } // Set properties for this object set in stage specific properties -bool MetricsStage::set_properties() { +bool MetricsStage::set_properties() +{ std::string stage_name_str(stage_name_); - std::map section = - get_properties()->get(stage_name_str); + std::map section = get_properties()->get(stage_name_str); metric_report_interval_ = DateTime::SECONDS_PER_MIN; @@ -70,7 +74,8 @@ bool MetricsStage::set_properties() { } // Initialize stage params and validate outputs -bool MetricsStage::initialize() { +bool MetricsStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -84,13 +89,15 @@ bool MetricsStage::initialize() { } // Cleanup after disconnection -void MetricsStage::cleanup() { +void MetricsStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void MetricsStage::handle_event(StageEvent *event) { +void MetricsStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); CompletionCallback *cb = new CompletionCallback(this, NULL); @@ -102,8 +109,7 @@ void MetricsStage::handle_event(StageEvent *event) { return; } - TimerRegisterEvent *tm_event = - new TimerRegisterEvent(event, metric_report_interval_ * USEC_PER_SEC); + TimerRegisterEvent *tm_event = new TimerRegisterEvent(event, metric_report_interval_ * USEC_PER_SEC); if (tm_event == NULL) { LOG_ERROR("Failed to new TimerRegisterEvent"); @@ -121,7 +127,8 @@ void MetricsStage::handle_event(StageEvent *event) { return; } -void MetricsStage::callback_event(StageEvent *event, CallbackContext *context) { +void MetricsStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); MetricsRegistry &metrics_registry = get_metrics_registry(); diff --git a/deps/common/seda/metrics_stage.h b/deps/common/seda/metrics_stage.h index 365c1bd6bbaefd06c3d806ad9b651f23f2e24bec..601cbe96a5d14642fccf6749da0209f0aa578fe9 100644 --- a/deps/common/seda/metrics_stage.h +++ b/deps/common/seda/metrics_stage.h @@ -37,8 +37,8 @@ protected: protected: private: Stage *timer_stage_ = nullptr; - //report metrics every @metric_report_interval_ seconds - int metric_report_interval_ = 10; + // report metrics every @metric_report_interval_ seconds + int metric_report_interval_ = 10; }; -} // namespace common -#endif //__COMMON_SEDA_METRICS_STAGE_H__ +} // namespace common +#endif //__COMMON_SEDA_METRICS_STAGE_H__ diff --git a/deps/common/seda/seda_config.cpp b/deps/common/seda/seda_config.cpp index adab1bc9d85320aed79ec305471e06974bba519a..c46a04487fa989dd76dee0c2ae1673920fea8514 100644 --- a/deps/common/seda/seda_config.cpp +++ b/deps/common/seda/seda_config.cpp @@ -28,9 +28,8 @@ namespace common { SedaConfig *SedaConfig::instance_ = NULL; - - -SedaConfig *&SedaConfig::get_instance() { +SedaConfig *&SedaConfig::get_instance() +{ if (instance_ == NULL) { instance_ = new SedaConfig(); ASSERT((instance_ != NULL), "failed to allocate SedaConfig"); @@ -39,12 +38,14 @@ SedaConfig *&SedaConfig::get_instance() { } // Constructor -SedaConfig::SedaConfig() : cfg_file_(), cfg_str_(), thread_pools_(), stages_() { +SedaConfig::SedaConfig() : cfg_file_(), cfg_str_(), thread_pools_(), stages_() +{ return; } // Destructor -SedaConfig::~SedaConfig() { +SedaConfig::~SedaConfig() +{ ASSERT(instance_, "Instance should not be null"); // check to see if clean-up is necessary if ((!thread_pools_.empty()) || (!stages_.empty())) { @@ -55,7 +56,8 @@ SedaConfig::~SedaConfig() { } // Set the file holding the configuration -void SedaConfig::set_cfg_filename(const char *filename) { +void SedaConfig::set_cfg_filename(const char *filename) +{ cfg_str_.clear(); cfg_file_.clear(); if (filename != NULL) { @@ -65,7 +67,8 @@ void SedaConfig::set_cfg_filename(const char *filename) { } // Set the string holding the configuration -void SedaConfig::set_cfg_string(const char *config_str) { +void SedaConfig::set_cfg_string(const char *config_str) +{ cfg_str_.clear(); cfg_file_.clear(); if (config_str != NULL) { @@ -75,7 +78,8 @@ void SedaConfig::set_cfg_string(const char *config_str) { } // Parse config file or string -SedaConfig::status_t SedaConfig::parse() { +SedaConfig::status_t SedaConfig::parse() +{ // first parse the config try { // skip parse in this implementation @@ -91,7 +95,8 @@ SedaConfig::status_t SedaConfig::parse() { } // instantiate the parsed SEDA configuration -SedaConfig::status_t SedaConfig::instantiate_cfg() { +SedaConfig::status_t SedaConfig::instantiate_cfg() +{ status_t stat = SUCCESS; // instantiate the configuration @@ -101,7 +106,8 @@ SedaConfig::status_t SedaConfig::instantiate_cfg() { } // start the configuration - puts the stages_ into action -SedaConfig::status_t SedaConfig::start() { +SedaConfig::status_t SedaConfig::start() +{ status_t stat = SUCCESS; ASSERT(thread_pools_.size(), "Configuration not yet instantiated"); @@ -127,7 +133,8 @@ SedaConfig::status_t SedaConfig::start() { } // Initialize the thread_pools_ and stages_ -SedaConfig::status_t SedaConfig::init() { +SedaConfig::status_t SedaConfig::init() +{ status_t stat = SUCCESS; // check the preconditions @@ -150,7 +157,8 @@ SedaConfig::status_t SedaConfig::init() { } // Clean-up the threadpool and stages_ -void SedaConfig::cleanup() { +void SedaConfig::cleanup() +{ // first disconnect all stages_ if (stages_.empty() == false) { std::map::iterator iter = stages_.begin(); @@ -171,9 +179,9 @@ void SedaConfig::cleanup() { clear_config(); } -void SedaConfig::init_event_history() { - std::map base_section = - get_properties()->get(SEDA_BASE_NAME); +void SedaConfig::init_event_history() +{ + std::map base_section = get_properties()->get(SEDA_BASE_NAME); std::map::iterator it; std::string key; @@ -198,16 +206,15 @@ void SedaConfig::init_event_history() { } get_max_event_hops() = max_event_hops; - LOG_INFO("Successfully init_event_history, EventHistory:%d, MaxEventHops:%u", - (int) ev_hist, max_event_hops); + LOG_INFO("Successfully init_event_history, EventHistory:%d, MaxEventHops:%u", (int)ev_hist, max_event_hops); return; } -SedaConfig::status_t SedaConfig::init_thread_pool() { +SedaConfig::status_t SedaConfig::init_thread_pool() +{ try { - std::map base_section = - get_properties()->get(SEDA_BASE_NAME); + std::map base_section = get_properties()->get(SEDA_BASE_NAME); std::map::iterator it; std::string key; @@ -239,8 +246,7 @@ SedaConfig::status_t SedaConfig::init_thread_pool() { int thread_count = 1; str_to_val(count_str, thread_count); if (thread_count < 1) { - LOG_INFO("Thread number of %s is %d, it is same as cpu's cores.", - thread_name.c_str(), cpu_num); + LOG_INFO("Thread number of %s is %d, it is same as cpu's cores.", thread_name.c_str(), cpu_num); thread_count = cpu_num; } const int max_thread_count = 1000000; @@ -249,7 +255,7 @@ SedaConfig::status_t SedaConfig::init_thread_pool() { return INITFAIL; } - Threadpool * thread_pool = new Threadpool(thread_count, thread_name); + Threadpool *thread_pool = new Threadpool(thread_count, thread_name); if (thread_pool == NULL) { LOG_ERROR("Failed to new %s threadpool\n", thread_name.c_str()); return INITFAIL; @@ -257,9 +263,8 @@ SedaConfig::status_t SedaConfig::init_thread_pool() { thread_pools_[thread_name] = thread_pool; } - if (thread_pools_.find(DEFAULT_THREAD_POOL) == thread_pools_.end()) { - LOG_ERROR("There is no default thread pool %s, please add it.", - DEFAULT_THREAD_POOL); + if (thread_pools_.find(DEFAULT_THREAD_POOL) == thread_pools_.end()) { + LOG_ERROR("There is no default thread pool %s, please add it.", DEFAULT_THREAD_POOL); return INITFAIL; } @@ -279,39 +284,42 @@ SedaConfig::status_t SedaConfig::init_thread_pool() { return SUCCESS; } -std::string SedaConfig::get_thread_pool(std::string &stage_name) { +std::string SedaConfig::get_thread_pool(std::string &stage_name) +{ std::string ret = DEFAULT_THREAD_POOL; // Get thread pool - std::map stage_section = - get_properties()->get(stage_name); + std::map stage_section = get_properties()->get(stage_name); std::map::iterator itt; std::string thread_pool_id = THREAD_POOL_ID; itt = stage_section.find(thread_pool_id); if (itt == stage_section.end()) { - LOG_INFO("Not set thread_pool_id for %s, use default threadpool %s", - stage_name.c_str(), DEFAULT_THREAD_POOL); + LOG_INFO("Not set thread_pool_id for %s, use default threadpool %s", stage_name.c_str(), DEFAULT_THREAD_POOL); return ret; } std::string thread_name = itt->second; if (thread_name.empty()) { LOG_ERROR("Failed to set %s of the %s, use the default threadpool %s", - thread_pool_id.c_str(), stage_name.c_str(), DEFAULT_THREAD_POOL); + thread_pool_id.c_str(), + stage_name.c_str(), + DEFAULT_THREAD_POOL); return ret; } if (thread_pools_.find(thread_name) == thread_pools_.end()) { LOG_ERROR("The stage %s's threadpool %s is invalid, use the default " "threadpool %s", - stage_name.c_str(), thread_name.c_str(), DEFAULT_THREAD_POOL); + stage_name.c_str(), + thread_name.c_str(), + DEFAULT_THREAD_POOL); } return ret; } -SedaConfig::status_t SedaConfig::init_stages() { +SedaConfig::status_t SedaConfig::init_stages() +{ try { - std::map base_section = - get_properties()->get(SEDA_BASE_NAME); + std::map base_section = get_properties()->get(SEDA_BASE_NAME); std::map::iterator it; std::string key; @@ -328,8 +336,7 @@ SedaConfig::status_t SedaConfig::init_stages() { split_tag.assign(1, Ini::CFG_DELIMIT_TAG); split_string(it->second, split_tag, stage_names_); - for (std::vector::iterator it = stage_names_.begin(); - it != stage_names_.end(); it++) { + for (std::vector::iterator it = stage_names_.begin(); it != stage_names_.end(); it++) { std::string stage_name(*it); std::string thread_name = get_thread_pool(stage_name); @@ -344,13 +351,11 @@ SedaConfig::status_t SedaConfig::init_stages() { stages_[stage_name] = stage; stage->set_pool(t); - LOG_INFO("Stage %s use threadpool %s.", - stage_name.c_str(), thread_name.c_str()); - } // end for stage + LOG_INFO("Stage %s use threadpool %s.", stage_name.c_str(), thread_name.c_str()); + } // end for stage } catch (std::exception &e) { - LOG_ERROR("Failed to parse stages information, please check, err:%s", - e.what()); + LOG_ERROR("Failed to parse stages information, please check, err:%s", e.what()); clear_config(); return INITFAIL; } @@ -364,16 +369,15 @@ SedaConfig::status_t SedaConfig::init_stages() { return SUCCESS; } -SedaConfig::status_t SedaConfig::gen_next_stages() { +SedaConfig::status_t SedaConfig::gen_next_stages() +{ try { - for (std::vector::iterator it_name = stage_names_.begin(); - it_name != stage_names_.end(); it_name++) { + for (std::vector::iterator it_name = stage_names_.begin(); it_name != stage_names_.end(); it_name++) { std::string stage_name(*it_name); Stage *stage = stages_[stage_name]; - std::map stage_section = - get_properties()->get(stage_name); + std::map stage_section = get_properties()->get(stage_name); std::map::iterator it; std::string next_stage_id = NEXT_STAGES; it = stage_section.find(next_stage_id); @@ -388,15 +392,15 @@ SedaConfig::status_t SedaConfig::gen_next_stages() { split_tag.assign(1, Ini::CFG_DELIMIT_TAG); split_string(next_stage_names, split_tag, next_stage_name_list); - for (std::vector::iterator next_it = - next_stage_name_list.begin(); - next_it != next_stage_name_list.end(); next_it++) { + for (std::vector::iterator next_it = next_stage_name_list.begin(); + next_it != next_stage_name_list.end(); + next_it++) { std::string &next_stage_name = *next_it; Stage *next_stage = stages_[next_stage_name]; stage->push_stage(next_stage); } - } // end for stage + } // end for stage } catch (std::exception &e) { LOG_ERROR("Failed to get next stages"); clear_config(); @@ -406,7 +410,8 @@ SedaConfig::status_t SedaConfig::gen_next_stages() { } // instantiate the thread_pools_ and stages_ -SedaConfig::status_t SedaConfig::instantiate() { +SedaConfig::status_t SedaConfig::instantiate() +{ init_event_history(); @@ -432,7 +437,8 @@ SedaConfig::status_t SedaConfig::instantiate() { } // delete all thread_pools_ and stages_ -void SedaConfig::clear_config() { +void SedaConfig::clear_config() +{ // delete stages_ std::map::iterator s_iter = stages_.begin(); std::map::iterator s_end = stages_.end(); @@ -441,8 +447,7 @@ void SedaConfig::clear_config() { Stage *stg = s_iter->second; LOG_INFO("Stage %s deleted.", stg->get_name()); - ASSERT((!stg->is_connected()), "%s%s", "Stage connected in clear_config ", - stg->get_name()); + ASSERT((!stg->is_connected()), "%s%s", "Stage connected in clear_config ", stg->get_name()); delete stg; s_iter->second = NULL; } @@ -466,23 +471,25 @@ void SedaConfig::clear_config() { LOG_INFO("Seda thread pools released"); } -void SedaConfig::get_stage_names(std::vector &names) const { +void SedaConfig::get_stage_names(std::vector &names) const +{ names = stage_names_; } -void SedaConfig::get_stage_queue_status(std::vector &stats) const { - for (std::map::const_iterator i = stages_.begin(); - i != stages_.end(); ++i) { +void SedaConfig::get_stage_queue_status(std::vector &stats) const +{ + for (std::map::const_iterator i = stages_.begin(); i != stages_.end(); ++i) { Stage *stg = (*i).second; stats.push_back(stg->qlen()); } } // Global seda config object -SedaConfig *&get_seda_config() { +SedaConfig *&get_seda_config() +{ static SedaConfig *seda_config = NULL; return seda_config; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/seda_config.h b/deps/common/seda/seda_config.h index ab20702bba3e904e8edaf121ec83156b45685a45..1d260911688ff134c75af71620e5ee5bb326c173 100644 --- a/deps/common/seda/seda_config.h +++ b/deps/common/seda/seda_config.h @@ -26,8 +26,7 @@ See the Mulan PSL v2 for more details. */ namespace common { -//keywords of sedaconfig - +// keywords of sedaconfig /** * A class to configure seda stages @@ -45,7 +44,7 @@ namespace common { class SedaConfig { - public: +public: typedef enum { SUCCESS = 0, INITFAIL, PARSEFAIL } status_t; static SedaConfig *&get_instance(); @@ -157,7 +156,7 @@ class SedaConfig { std::map::iterator begin(); std::map::iterator end(); - private: +private: // Constructor SedaConfig(); @@ -192,7 +191,7 @@ class SedaConfig { void init_event_history(); SedaConfig &operator=(const SedaConfig &cevtout); - + static SedaConfig *instance_; // In old logic, SedaConfig will parse seda configure file @@ -203,18 +202,20 @@ class SedaConfig { std::map thread_pools_; std::map stages_; std::vector stage_names_; - }; -inline std::map::iterator SedaConfig::begin() { +inline std::map::iterator SedaConfig::begin() +{ return stages_.begin(); } -inline std::map::iterator SedaConfig::end() { +inline std::map::iterator SedaConfig::end() +{ return stages_.end(); } -inline Stage *SedaConfig::get_stage(const char *stagename) { +inline Stage *SedaConfig::get_stage(const char *stagename) +{ if (stagename) { std::string sname(stagename); return stages_[stagename]; @@ -228,5 +229,5 @@ SedaConfig *&get_seda_config(); bool &get_event_history_flag(); u32_t &get_max_event_hops(); -} //namespace common -#endif //__COMMON_SEDA_SEDA_CONFIG_H__ +} // namespace common +#endif //__COMMON_SEDA_SEDA_CONFIG_H__ diff --git a/deps/common/seda/seda_defs.h b/deps/common/seda/seda_defs.h index c78cb9b96b9bc9410b75a1050f16b17c1e572112..045bff92f8d6d3ee76da63dde4de992b7c60ffa8 100644 --- a/deps/common/seda/seda_defs.h +++ b/deps/common/seda/seda_defs.h @@ -15,7 +15,7 @@ See the Mulan PSL v2 for more details. */ #ifndef __COMMON_SEDA_SEDA_DEFS_H__ #define __COMMON_SEDA_SEDA_DEFS_H__ -#define SEDA_BASE_NAME "SEDA_BASE" +#define SEDA_BASE_NAME "SEDA_BASE" #define THREAD_POOLS_NAME "ThreadPools" #define STAGES "STAGES" @@ -30,4 +30,4 @@ See the Mulan PSL v2 for more details. */ #define DEFAULT_THREAD_POOL "DefaultThreads" #define METRCS_REPORT_INTERVAL "MetricsReportInterval" -#endif //__COMMON_SEDA_SEDA_DEFS_H__ +#endif //__COMMON_SEDA_SEDA_DEFS_H__ diff --git a/deps/common/seda/stage.cpp b/deps/common/seda/stage.cpp index 42eb1e5897fa82cfe1a275e1367e3cc0e76ed77f..5ee48ef1648896b6ca2eb63a862d2910ff484678 100644 --- a/deps/common/seda/stage.cpp +++ b/deps/common/seda/stage.cpp @@ -26,7 +26,6 @@ See the Mulan PSL v2 for more details. */ #include "common/seda/thread_pool.h" namespace common { - /** * Constructor * @param[in] tag The label that identifies this stage. @@ -35,8 +34,8 @@ namespace common { * @post event queue is empty * @post stage is not connected */ -Stage::Stage(const char *tag) - : next_stage_list_(), event_list_(), connected_(false), event_ref_(0) { +Stage::Stage(const char *tag) : next_stage_list_(), event_list_(), connected_(false), event_ref_(0) +{ LOG_TRACE("%s", "enter"); assert(tag != NULL); @@ -52,7 +51,8 @@ Stage::Stage(const char *tag) * @pre stage is not connected * @post pending events are deleted and stage is destroyed */ -Stage::~Stage() { +Stage::~Stage() +{ LOG_TRACE("%s", "enter"); assert(!connected_); MUTEX_LOCK(&list_mutex_); @@ -85,7 +85,8 @@ Stage::~Stage() { * @post th_pool_ == pool * @return true if the connection succeeded, else false */ -bool Stage::connect() { +bool Stage::connect() +{ LOG_TRACE("%s%s", "enter", stage_name_); assert(!connected_); assert(th_pool_ != NULL); @@ -126,7 +127,8 @@ bool Stage::connect() { * @post th_pool_ NULL * @post stage is not connected */ -void Stage::disconnect() { +void Stage::disconnect() +{ assert(connected_ == true); LOG_TRACE("%s%s", "enter", stage_name_); @@ -151,7 +153,8 @@ void Stage::disconnect() { * @post event added to the end of event queue * @post event must not be de-referenced by caller after return */ -void Stage::add_event(StageEvent *event) { +void Stage::add_event(StageEvent *event) +{ assert(event != NULL); MUTEX_LOCK(&list_mutex_); @@ -174,7 +177,8 @@ void Stage::add_event(StageEvent *event) { * Query length of queue * @return length of event queue. */ -unsigned long Stage::qlen() const { +unsigned long Stage::qlen() const +{ unsigned long res; MUTEX_LOCK(&list_mutex_); @@ -187,7 +191,8 @@ unsigned long Stage::qlen() const { * Query whether the queue is empty * @return \c true if the queue is empty; \c false otherwise */ -bool Stage::qempty() const { +bool Stage::qempty() const +{ bool empty = false; MUTEX_LOCK(&list_mutex_); @@ -203,7 +208,8 @@ bool Stage::qempty() const { * @return first event on queue. * @post first event on queue is removed from queue. */ -StageEvent *Stage::remove_event() { +StageEvent *Stage::remove_event() +{ MUTEX_LOCK(&list_mutex_); assert(!event_list_.empty()); @@ -220,7 +226,8 @@ StageEvent *Stage::remove_event() { * * @post event ref count on stage is decremented */ -void Stage::release_event() { +void Stage::release_event() +{ MUTEX_LOCK(&list_mutex_); event_ref_--; if (!connected_ && event_ref_ == 0) { @@ -229,4 +236,4 @@ void Stage::release_event() { MUTEX_UNLOCK(&list_mutex_); } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/stage.h b/deps/common/seda/stage.h index 3e075695d516b3779b565f888df257477e0c56c4..af478f01b7b2c2d4636ed0004123ab7b9922c345 100644 --- a/deps/common/seda/stage.h +++ b/deps/common/seda/stage.h @@ -89,7 +89,7 @@ class Stage { // public interface operations - public: +public: /** * Destructor * @pre stage is not connected @@ -109,7 +109,10 @@ class Stage { * Return the Threadpool object * @return reference to the Threadpool for this Stage */ - Threadpool *get_pool() { return th_pool_; } + Threadpool *get_pool() + { + return th_pool_; + } /** * Push stage to the list of the next stages @@ -195,7 +198,10 @@ class Stage { * Query whether stage is connected * @return true if stage is connected */ - bool is_connected() const { return connected_; } + bool is_connected() const + { + return connected_; + } /** * Perform Stage-specific processing for an event @@ -223,12 +229,13 @@ class Stage { * A stage only need to implement this interface if the down-stream * stages support event timeout detection. */ - virtual void timeout_event(StageEvent *event, CallbackContext *context) { + virtual void timeout_event(StageEvent *event, CallbackContext *context) + { LOG_INFO("get a timed out evnet in %s timeout_event\n", stage_name_); this->callback_event(event, context); } - protected: +protected: /** * Constructor * @param[in] tag The label that identifies this stage. @@ -264,7 +271,10 @@ class Stage { * @pre Stage not connected * @return TRUE if and only if outputs are valid and init succeeded. */ - virtual bool initialize() { return true; } + virtual bool initialize() + { + return true; + } /** * set properties for this object @@ -272,7 +282,10 @@ class Stage { * @post initializing the class members * @return Stage instantiated object */ - virtual bool set_properties() { return true; } + virtual bool set_properties() + { + return true; + } /** * Prepare to disconnect the stage. @@ -281,50 +294,56 @@ class Stage { * from the pipeline. Most stages will not need to implement * this function. */ - virtual void disconnect_prepare() { return; } + virtual void disconnect_prepare() + { + return; + } /** * Cleanup stage after disconnection * After disconnection is completed, cleanup any resources held by the * stage and prepare for destruction or re-initialization. */ - virtual void cleanup() { return; } + virtual void cleanup() + { + return; + } // pipeline state - std::list next_stage_list_; // next stage(s) in the pipeline + std::list next_stage_list_; // next stage(s) in the pipeline // implementation state - char *stage_name_; // name of stage + char *stage_name_; // name of stage friend class Threadpool; - private: - std::deque event_list_; // event queue - mutable pthread_mutex_t list_mutex_; // protects the event queue - pthread_cond_t disconnect_cond_; // wait here for disconnect - bool connected_; // is stage connected to pool? - unsigned long event_ref_; // # of outstanding events - Threadpool *th_pool_ = nullptr; // Threadpool for this stage - +private: + std::deque event_list_; // event queue + mutable pthread_mutex_t list_mutex_; // protects the event queue + pthread_cond_t disconnect_cond_; // wait here for disconnect + bool connected_; // is stage connected to pool? + unsigned long event_ref_; // # of outstanding events + Threadpool *th_pool_ = nullptr; // Threadpool for this stage }; -inline void Stage::set_pool(Threadpool *th) { - ASSERT((th != NULL), "threadpool not available for stage %s", - this->get_name()); - ASSERT(!connected_, "attempt to set threadpool while connected: %s", - this->get_name()); +inline void Stage::set_pool(Threadpool *th) +{ + ASSERT((th != NULL), "threadpool not available for stage %s", this->get_name()); + ASSERT(!connected_, "attempt to set threadpool while connected: %s", this->get_name()); th_pool_ = th; } -inline void Stage::push_stage(Stage *st) { - ASSERT((st != NULL), "next stage not available for stage %s", - this->get_name()); - ASSERT(!connected_, "attempt to set push stage while connected: %s", - this->get_name()); +inline void Stage::push_stage(Stage *st) +{ + ASSERT((st != NULL), "next stage not available for stage %s", this->get_name()); + ASSERT(!connected_, "attempt to set push stage while connected: %s", this->get_name()); next_stage_list_.push_back(st); } -inline const char *Stage::get_name() { return stage_name_; } +inline const char *Stage::get_name() +{ + return stage_name_; +} -} //namespace common -#endif // __COMMON_SEDA_STAGE_H__ +} // namespace common +#endif // __COMMON_SEDA_STAGE_H__ diff --git a/deps/common/seda/stage_event.cpp b/deps/common/seda/stage_event.cpp index ddaacf00c9a049a8d8697aa10fc8bb39670cbccb..1a92f8f7b49a8a2363a30549ba97516bb87702f1 100644 --- a/deps/common/seda/stage_event.cpp +++ b/deps/common/seda/stage_event.cpp @@ -26,12 +26,12 @@ See the Mulan PSL v2 for more details. */ namespace common { // Constructor -StageEvent::StageEvent() - : comp_cb_(NULL), ud_(NULL), cb_flag_(false), history_(NULL), stage_hops_(0), - tm_info_(NULL) {} +StageEvent::StageEvent() : comp_cb_(NULL), ud_(NULL), cb_flag_(false), history_(NULL), stage_hops_(0), tm_info_(NULL) +{} // Destructor -StageEvent::~StageEvent() { +StageEvent::~StageEvent() +{ // clear all pending callbacks while (comp_cb_) { CompletionCallback *top = comp_cb_; @@ -52,7 +52,8 @@ StageEvent::~StageEvent() { } // Processing for this event is done; callbacks executed -void StageEvent::done() { +void StageEvent::done() +{ CompletionCallback *top; if (comp_cb_) { @@ -65,7 +66,8 @@ void StageEvent::done() { } // Processing for this event is done; callbacks executed immediately -void StageEvent::done_immediate() { +void StageEvent::done_immediate() +{ CompletionCallback *top; if (comp_cb_) { @@ -79,7 +81,8 @@ void StageEvent::done_immediate() { } } -void StageEvent::done_timeout() { +void StageEvent::done_timeout() +{ CompletionCallback *top; if (comp_cb_) { @@ -94,20 +97,26 @@ void StageEvent::done_timeout() { } // Push the completion callback onto the stack -void StageEvent::push_callback(CompletionCallback *cb) { +void StageEvent::push_callback(CompletionCallback *cb) +{ cb->push_callback(comp_cb_); comp_cb_ = cb; } -void StageEvent::set_user_data(UserData *u) { +void StageEvent::set_user_data(UserData *u) +{ ud_ = u; return; } -UserData *StageEvent::get_user_data() { return ud_; } +UserData *StageEvent::get_user_data() +{ + return ud_; +} // Add stage to list of stages which have handled this event -void StageEvent::save_stage(Stage *stg, HistType type) { +void StageEvent::save_stage(Stage *stg, HistType type) +{ if (!history_) { history_ = new std::list; } @@ -118,16 +127,19 @@ void StageEvent::save_stage(Stage *stg, HistType type) { } } -void StageEvent::set_timeout_info(time_t deadline) { +void StageEvent::set_timeout_info(time_t deadline) +{ TimeoutInfo *tmi = new TimeoutInfo(deadline); set_timeout_info(tmi); } -void StageEvent::set_timeout_info(const StageEvent &ev) { +void StageEvent::set_timeout_info(const StageEvent &ev) +{ set_timeout_info(ev.tm_info_); } -void StageEvent::set_timeout_info(TimeoutInfo *tmi) { +void StageEvent::set_timeout_info(TimeoutInfo *tmi) +{ // release the previous timeout info if (tm_info_) { tm_info_->detach(); @@ -139,7 +151,8 @@ void StageEvent::set_timeout_info(TimeoutInfo *tmi) { } } -bool StageEvent::has_timed_out() { +bool StageEvent::has_timed_out() +{ if (!tm_info_) { return false; } @@ -148,15 +161,17 @@ bool StageEvent::has_timed_out() { } // Accessor function which wraps value for max hops an event is allowed -u32_t &get_max_event_hops() { +u32_t &get_max_event_hops() +{ static u32_t max_event_hops = 0; return max_event_hops; } // Accessor function which wraps value for event history flag -bool &get_event_history_flag() { +bool &get_event_history_flag() +{ static bool event_history_flag = false; return event_history_flag; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/stage_event.h b/deps/common/seda/stage_event.h index 0ef50f85996256950ba60ae919f71482316d5908..16c152dac31183a0c4ed190ce7e4d439f6061944 100644 --- a/deps/common/seda/stage_event.h +++ b/deps/common/seda/stage_event.h @@ -58,7 +58,7 @@ class TimeoutInfo; class StageEvent { - public: +public: // Interface for collecting debugging information typedef enum { HANDLE_EV = 0, CALLBACK_EV, TIMEOUT_EV } HistType; @@ -106,7 +106,10 @@ class StageEvent { UserData *get_user_data(); // True if event represents a callback - bool is_callback() { return cb_flag_; } + bool is_callback() + { + return cb_flag_; + } // Add stage to list of stages which have handled this event void save_stage(Stage *stg, HistType type); @@ -123,23 +126,28 @@ class StageEvent { // If the event has timed out (and should be dropped) bool has_timed_out(); - private: +private: typedef std::pair HistEntry; // Interface to allow callbacks to be run on target stage's threads - void mark_callback() { cb_flag_ = true; } - void clear_callback() { cb_flag_ = false; } - + void mark_callback() + { + cb_flag_ = true; + } + void clear_callback() + { + cb_flag_ = false; + } + // Set a timeout info into the event void set_timeout_info(TimeoutInfo *tmi); - CompletionCallback *comp_cb_; // completion callback stack for this event - UserData *ud_; // user data associated with event by caller - bool cb_flag_; // true if this event is a callback - std::list *history_; // List of stages which have handled ev + CompletionCallback *comp_cb_; // completion callback stack for this event + UserData *ud_; // user data associated with event by caller + bool cb_flag_; // true if this event is a callback + std::list *history_; // List of stages which have handled ev u32_t stage_hops_; // Number of stages which have handled ev - TimeoutInfo *tm_info_; // the timeout info for this event - + TimeoutInfo *tm_info_; // the timeout info for this event }; /** @@ -155,15 +163,18 @@ class StageEvent { * originating stage can access the \c UserData member to recover its state. */ class UserData { - public: +public: /** * \brief A virtual destructor to enable the use of dynamic casts. */ - virtual ~UserData() { return; } + virtual ~UserData() + { + return; + } }; bool &get_event_history_flag(); u32_t &get_max_event_hops(); -} //namespace common -#endif // __COMMON_SEDA_STAGE_EVENT_H__ +} // namespace common +#endif // __COMMON_SEDA_STAGE_EVENT_H__ diff --git a/deps/common/seda/stage_factory.h b/deps/common/seda/stage_factory.h index 6b42f0e359d2ed3645c6a67743cfb0347852a902..6ca5a57fdb990305f393e37d38111ab5490dde9b 100644 --- a/deps/common/seda/stage_factory.h +++ b/deps/common/seda/stage_factory.h @@ -19,10 +19,9 @@ See the Mulan PSL v2 for more details. */ #include "common/seda/stage.h" namespace common { - class Stage; typedef ClassFactory StageFactory; -} //namespace common -#endif // __COMMON_SEDA_STAGE_FACTORY_H__ +} // namespace common +#endif // __COMMON_SEDA_STAGE_FACTORY_H__ diff --git a/deps/common/seda/thread_pool.cpp b/deps/common/seda/thread_pool.cpp index 75178d6594254da4f39d45047cdbaa7ad2436078..5652447b9c2441f6b43c13f5ba5205262e30cd54 100644 --- a/deps/common/seda/thread_pool.cpp +++ b/deps/common/seda/thread_pool.cpp @@ -31,8 +31,14 @@ extern bool &get_event_history_flag(); * @post thread pool has threads threads running */ Threadpool::Threadpool(unsigned int threads, const std::string &name) - : run_queue_(), eventhist_(get_event_history_flag()), nthreads_(0), - threads_to_kill_(0), n_idles_(0), killer_("KillThreads"), name_(name) { + : run_queue_(), + eventhist_(get_event_history_flag()), + nthreads_(0), + threads_to_kill_(0), + n_idles_(0), + killer_("KillThreads"), + name_(name) +{ LOG_TRACE("Enter, thread number:%d", threads); MUTEX_INIT(&run_mutex_, NULL); COND_INIT(&run_cond_, NULL); @@ -48,7 +54,8 @@ Threadpool::Threadpool(unsigned int threads, const std::string &name) * * @post all threads are destroyed and pool is destroyed */ -Threadpool::~Threadpool() { +Threadpool::~Threadpool() +{ LOG_TRACE("%s", "enter"); // kill all the remaining service threads kill_threads(nthreads_); @@ -65,7 +72,8 @@ Threadpool::~Threadpool() { * Query number of threads. * @return number of threads in the thread pool. */ -unsigned int Threadpool::num_threads() { +unsigned int Threadpool::num_threads() +{ MUTEX_LOCK(&thread_mutex_); unsigned int result = nthreads_; MUTEX_UNLOCK(&thread_mutex_); @@ -80,7 +88,8 @@ unsigned int Threadpool::num_threads() { * <= threads * @return number of thread successfully created */ -unsigned int Threadpool::add_threads(unsigned int threads) { +unsigned int Threadpool::add_threads(unsigned int threads) +{ unsigned int i; pthread_t pthread; pthread_attr_t pthread_attrs; @@ -93,8 +102,7 @@ unsigned int Threadpool::add_threads(unsigned int threads) { // attempt to start the requested number of threads for (i = 0; i < threads; i++) { - int stat = pthread_create(&pthread, &pthread_attrs, Threadpool::run_thread, - (void *) this); + int stat = pthread_create(&pthread, &pthread_attrs, Threadpool::run_thread, (void *)this); if (stat != 0) { LOG_WARN("Failed to create one thread\n"); break; @@ -117,7 +125,8 @@ unsigned int Threadpool::add_threads(unsigned int threads) { * <= threads * @return number of threads successfully killed. */ -unsigned int Threadpool::kill_threads(unsigned int threads) { +unsigned int Threadpool::kill_threads(unsigned int threads) +{ LOG_TRACE("%s%d", "enter - threads to kill", threads); MUTEX_LOCK(&thread_mutex_); @@ -156,7 +165,8 @@ unsigned int Threadpool::kill_threads(unsigned int threads) { * Reduces the count of active threads, and, if this is the last pending * kill, signals the waiting kill_threads method. */ -void Threadpool::thread_kill() { +void Threadpool::thread_kill() +{ MUTEX_LOCK(&thread_mutex_); nthreads_--; @@ -178,7 +188,8 @@ void Threadpool::thread_kill() { * @pre to_kill <= current number of threads * @return number of kill thread events successfully scheduled */ -unsigned int Threadpool::gen_kill_thread_events(unsigned int to_kill) { +unsigned int Threadpool::gen_kill_thread_events(unsigned int to_kill) +{ LOG_TRACE("%s%d", "enter", to_kill); assert(MUTEX_TRYLOCK(&thread_mutex_) != 0); assert(to_kill <= nthreads_); @@ -206,7 +217,8 @@ unsigned int Threadpool::gen_kill_thread_events(unsigned int to_kill) { * @pre stage must have a non-empty queue. * @post stage is scheduled on the run queue. */ -void Threadpool::schedule(Stage *stage) { +void Threadpool::schedule(Stage *stage) +{ assert(!stage->qempty()); MUTEX_LOCK(&run_mutex_); @@ -224,23 +236,26 @@ void Threadpool::schedule(Stage *stage) { } // Get name of thread pool -const std::string &Threadpool::get_name() { return name_; } +const std::string &Threadpool::get_name() +{ + return name_; +} /** * Internal thread control function * Function which contains the control loop for each service thread. * Should not be called except when a thread is created. */ -void *Threadpool::run_thread(void *pool_ptr) { - Threadpool *pool = (Threadpool *) pool_ptr; +void *Threadpool::run_thread(void *pool_ptr) +{ + Threadpool *pool = (Threadpool *)pool_ptr; // save thread pool pointer set_thread_pool_ptr(pool); // this is not portable, but is easier to map to LWP s64_t threadid = gettid(); - LOG_INFO("threadid = %llx, threadname = %s\n", threadid, - pool->get_name().c_str()); + LOG_INFO("threadid = %llx, threadname = %s\n", threadid, pool->get_name().c_str()); // enter a loop where we continuously look for events from Stages on // the run_queue_ and handle the event. @@ -292,8 +307,7 @@ void *Threadpool::run_thread(void *pool_ptr) { run_stage->release_event(); } LOG_TRACE("exit %p", pool_ptr); - LOG_INFO("Begin to exit, threadid = %llx, threadname = %s", threadid, - pool->get_name().c_str()); + LOG_INFO("Begin to exit, threadid = %llx, threadname = %s", threadid, pool->get_name().c_str()); // the dummy compiler need this pthread_exit(NULL); @@ -301,20 +315,26 @@ void *Threadpool::run_thread(void *pool_ptr) { pthread_key_t Threadpool::pool_ptr_key_; -void Threadpool::create_pool_key() { +void Threadpool::create_pool_key() +{ // init the thread specific to store thread pool pointer // this is called in main thread, so no pthread_once is needed pthread_key_create(&pool_ptr_key_, NULL); } -void Threadpool::del_pool_key() { pthread_key_delete(pool_ptr_key_); } +void Threadpool::del_pool_key() +{ + pthread_key_delete(pool_ptr_key_); +} -void Threadpool::set_thread_pool_ptr(const Threadpool *thd_Pool) { +void Threadpool::set_thread_pool_ptr(const Threadpool *thd_Pool) +{ pthread_setspecific(pool_ptr_key_, thd_Pool); } -const Threadpool *Threadpool::get_thread_pool_ptr() { - return (const Threadpool *) pthread_getspecific(pool_ptr_key_); +const Threadpool *Threadpool::get_thread_pool_ptr() +{ + return (const Threadpool *)pthread_getspecific(pool_ptr_key_); } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/thread_pool.h b/deps/common/seda/thread_pool.h index d19e0ee77a84bdc021b29e3da3f6e05265c21c06..c1be81036688cb1fefdd3fbe89e0435b0a6da168 100644 --- a/deps/common/seda/thread_pool.h +++ b/deps/common/seda/thread_pool.h @@ -42,7 +42,7 @@ class Stage; */ class Threadpool { - public: +public: // Initialize the static data structures of ThreadPool static void create_pool_key(); @@ -109,8 +109,7 @@ class Threadpool { // Get name of thread pool const std::string &get_name(); - - protected: +protected: /** * Internal thread kill. * Internal operation called only when a thread kill event is processed. @@ -130,7 +129,7 @@ class Threadpool { */ unsigned int gen_kill_thread_events(unsigned int to_kill); - private: +private: /** * Internal thread control function * Function which contains the control loop for each service thread. @@ -145,19 +144,19 @@ class Threadpool { static const Threadpool *get_thread_pool_ptr(); // run queue state - pthread_mutex_t run_mutex_; //< protects the run queue - pthread_cond_t run_cond_; //< wait here for stage to be scheduled - std::deque run_queue_; //< list of stages with work to do - bool eventhist_; //< is event history enabled? + pthread_mutex_t run_mutex_; //< protects the run queue + pthread_cond_t run_cond_; //< wait here for stage to be scheduled + std::deque run_queue_; //< list of stages with work to do + bool eventhist_; //< is event history enabled? // thread state - pthread_mutex_t thread_mutex_; //< protects thread state - pthread_cond_t thread_cond_; //< wait here when killing threads - unsigned int nthreads_; //< number of service threads + pthread_mutex_t thread_mutex_; //< protects thread state + pthread_cond_t thread_cond_; //< wait here when killing threads + unsigned int nthreads_; //< number of service threads unsigned int threads_to_kill_; //< number of pending kill events - unsigned int n_idles_; //< number of idle threads - KillThreadStage killer_; //< used to kill threads - std::string name_; //< name of threadpool + unsigned int n_idles_; //< number of idle threads + KillThreadStage killer_; //< used to kill threads + std::string name_; //< name of threadpool // key of thread specific to store thread pool pointer static pthread_key_t pool_ptr_key_; @@ -166,5 +165,5 @@ class Threadpool { friend class KillThreadStage; }; -} //namespace common -#endif // __COMMON_SEDA_THREAD_POOL_H__ +} // namespace common +#endif // __COMMON_SEDA_THREAD_POOL_H__ diff --git a/deps/common/seda/timer_stage.cpp b/deps/common/seda/timer_stage.cpp index 46508ef76033dd740a8151d944e581fb05d56e5c..c2c35562197165456e5ffc4c7977c03e0bf9ef37 100644 --- a/deps/common/seda/timer_stage.cpp +++ b/deps/common/seda/timer_stage.cpp @@ -26,13 +26,11 @@ See the Mulan PSL v2 for more details. */ #include "common/log/log.h" namespace common { -#define TIMEVAL_EQUAL(t1, t2) \ - ((t1.tv_sec == t2.tv_sec) && (t1.tv_usec == t2.tv_usec)) -#define TIMEVAL_LESS_THAN(t1, t2) \ - ((t1.tv_sec < t2.tv_sec) || \ - ((t1.tv_sec == t2.tv_sec) && (t1.tv_usec < t2.tv_usec))) +#define TIMEVAL_EQUAL(t1, t2) ((t1.tv_sec == t2.tv_sec) && (t1.tv_usec == t2.tv_usec)) +#define TIMEVAL_LESS_THAN(t1, t2) ((t1.tv_sec < t2.tv_sec) || ((t1.tv_sec == t2.tv_sec) && (t1.tv_usec < t2.tv_usec))) -struct timeval sub_timeval(const struct timeval *t1, const struct timeval *t2) { +struct timeval sub_timeval(const struct timeval *t1, const struct timeval *t2) +{ struct timeval result; result.tv_sec = t1->tv_sec - t2->tv_sec; result.tv_usec = t1->tv_usec - t2->tv_usec; @@ -43,7 +41,8 @@ struct timeval sub_timeval(const struct timeval *t1, const struct timeval *t2) { return result; } -struct timeval add_timeval(const struct timeval *t1, const struct timeval *t2) { +struct timeval add_timeval(const struct timeval *t1, const struct timeval *t2) +{ struct timeval result; result.tv_sec = t1->tv_sec + t2->tv_sec; result.tv_usec = t1->tv_usec + t2->tv_usec; @@ -54,8 +53,8 @@ struct timeval add_timeval(const struct timeval *t1, const struct timeval *t2) { return result; } -void realtime_to_monotonic(const struct timeval *time_RT, - struct timeval *time_Mono) { +void realtime_to_monotonic(const struct timeval *time_RT, struct timeval *time_Mono) +{ struct timeval time_now; gettimeofday(&time_now, NULL); @@ -74,7 +73,8 @@ void realtime_to_monotonic(const struct timeval *time_RT, time_Mono->tv_usec = time_temp.tv_usec; } -u64_t TimerToken::next_nonce() { +u64_t TimerToken::next_nonce() +{ static u64_t nonce_cntr = 0; static pthread_mutex_t tt_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -85,7 +85,8 @@ u64_t TimerToken::next_nonce() { return n; } -TimerToken::TimerToken() { +TimerToken::TimerToken() +{ struct timeval t; memset(&t, 0, sizeof(struct timeval)); u64_t n = next_nonce(); @@ -93,28 +94,38 @@ TimerToken::TimerToken() { return; } -TimerToken::TimerToken(const struct timeval &t) { +TimerToken::TimerToken(const struct timeval &t) +{ u64_t n = next_nonce(); set(t, n); return; } -TimerToken::TimerToken(const TimerToken &tt) { +TimerToken::TimerToken(const TimerToken &tt) +{ set(tt.time, tt.nonce); return; } -void TimerToken::set(const struct timeval &t, u64_t n) { +void TimerToken::set(const struct timeval &t, u64_t n) +{ memcpy(&time, &t, sizeof(struct timeval)); nonce = n; return; } -const struct timeval &TimerToken::get_time() const { return time; } +const struct timeval &TimerToken::get_time() const +{ + return time; +} -u64_t TimerToken::get_nonce() const { return nonce; } +u64_t TimerToken::get_nonce() const +{ + return nonce; +} -bool TimerToken::operator<(const TimerToken &other) const { +bool TimerToken::operator<(const TimerToken &other) const +{ if (TIMEVAL_LESS_THAN(time, other.time)) return true; if (TIMEVAL_EQUAL(time, other.time)) @@ -122,20 +133,22 @@ bool TimerToken::operator<(const TimerToken &other) const { return false; } -TimerToken &TimerToken::operator=(const TimerToken &src) { +TimerToken &TimerToken::operator=(const TimerToken &src) +{ set(src.time, src.nonce); return *this; } -std::string TimerToken::to_string() const { +std::string TimerToken::to_string() const +{ std::string s; std::ostringstream ss(s); ss << time.tv_sec << ":" << time.tv_usec << "-" << nonce; return ss.str(); } -TimerRegisterEvent::TimerRegisterEvent(StageEvent *cb, u64_t time_relative_usec) - : TimerEvent(), timer_cb_(cb), token_() { +TimerRegisterEvent::TimerRegisterEvent(StageEvent *cb, u64_t time_relative_usec) : TimerEvent(), timer_cb_(cb), token_() +{ struct timespec timer_spec; clock_gettime(CLOCK_MONOTONIC, &timer_spec); @@ -151,55 +164,82 @@ TimerRegisterEvent::TimerRegisterEvent(StageEvent *cb, u64_t time_relative_usec) return; } -TimerRegisterEvent::TimerRegisterEvent(StageEvent *cb, - struct timeval &time_absolute) - : TimerEvent(), timer_cb_(cb), token_() { +TimerRegisterEvent::TimerRegisterEvent(StageEvent *cb, struct timeval &time_absolute) + : TimerEvent(), timer_cb_(cb), token_() +{ realtime_to_monotonic(&time_absolute, &timer_when_); return; } -TimerRegisterEvent::~TimerRegisterEvent() { return; } +TimerRegisterEvent::~TimerRegisterEvent() +{ + return; +} -const struct timeval &TimerRegisterEvent::get_time() { return timer_when_; } +const struct timeval &TimerRegisterEvent::get_time() +{ + return timer_when_; +} -StageEvent *TimerRegisterEvent::get_callback_event() { return timer_cb_; } +StageEvent *TimerRegisterEvent::get_callback_event() +{ + return timer_cb_; +} -StageEvent *TimerRegisterEvent::adopt_callback_event() { +StageEvent *TimerRegisterEvent::adopt_callback_event() +{ StageEvent *e = timer_cb_; timer_cb_ = NULL; return e; } -void TimerRegisterEvent::set_cancel_token(const TimerToken &t) { +void TimerRegisterEvent::set_cancel_token(const TimerToken &t) +{ token_ = t; return; } -std::unique_ptr TimerRegisterEvent::get_cancel_token() { +std::unique_ptr TimerRegisterEvent::get_cancel_token() +{ const TimerToken *token_cp = new TimerToken(token_); std::unique_ptr token_ptr(token_cp); return token_ptr; } TimerCancelEvent::TimerCancelEvent(const TimerToken &cancel_token) - : TimerEvent(), token_(cancel_token), cancelled_(false) { + : TimerEvent(), token_(cancel_token), cancelled_(false) +{ return; } -TimerCancelEvent::~TimerCancelEvent() { return; } +TimerCancelEvent::~TimerCancelEvent() +{ + return; +} -const TimerToken &TimerCancelEvent::get_token() { return token_; } +const TimerToken &TimerCancelEvent::get_token() +{ + return token_; +} -void TimerCancelEvent::set_success(bool s) { +void TimerCancelEvent::set_success(bool s) +{ cancelled_ = s; return; } -bool TimerCancelEvent::get_success() { return cancelled_; } +bool TimerCancelEvent::get_success() +{ + return cancelled_; +} TimerStage::TimerStage(const char *tag) - : Stage(tag), timer_queue_(&TimerStage::timer_token_less_than), shutdown_(false), - num_events_(0), timer_thread_id_(0) { + : Stage(tag), + timer_queue_(&TimerStage::timer_token_less_than), + shutdown_(false), + num_events_(0), + timer_thread_id_(0) +{ pthread_mutex_init(&timer_mutex_, NULL); pthread_condattr_t condattr; pthread_condattr_init(&condattr); @@ -211,9 +251,9 @@ TimerStage::TimerStage(const char *tag) return; } -TimerStage::~TimerStage() { - for (timer_queue_t::iterator i = timer_queue_.begin(); i != timer_queue_.end(); - ++i) { +TimerStage::~TimerStage() +{ + for (timer_queue_t::iterator i = timer_queue_.begin(); i != timer_queue_.end(); ++i) { delete i->second; } @@ -225,7 +265,8 @@ TimerStage::~TimerStage() { return; } -Stage *TimerStage::make_stage(const std::string &tag) { +Stage *TimerStage::make_stage(const std::string &tag) +{ TimerStage *s = new TimerStage(tag.c_str()); ASSERT(s != NULL, "Failed to instantiate stage."); if (!s->set_properties()) { @@ -237,29 +278,34 @@ Stage *TimerStage::make_stage(const std::string &tag) { return s; } -bool TimerStage::set_properties() { +bool TimerStage::set_properties() +{ // No configuration is stored in the system properties. return true; } -bool TimerStage::initialize() { +bool TimerStage::initialize() +{ // The TimerStage does not send messages to any other stage. ASSERT(next_stage_list_.size() == 0, "Invalid NextStages list."); // Start the thread to maintain the timer const pthread_attr_t *thread_attrs = NULL; - void *thread_args = (void *) this; - int status = pthread_create(&timer_thread_id_, thread_attrs, - &TimerStage::start_timer_thread, thread_args); + void *thread_args = (void *)this; + int status = pthread_create(&timer_thread_id_, thread_attrs, &TimerStage::start_timer_thread, thread_args); if (status != 0) LOG_ERROR("failed to create timer thread: status=%d\n", status); return (status == 0); } -u32_t TimerStage::get_num_events() { return num_events_; } +u32_t TimerStage::get_num_events() +{ + return num_events_; +} -void TimerStage::disconnect_prepare() { +void TimerStage::disconnect_prepare() +{ LOG_INFO("received signal to initiate shutdown_.\n"); pthread_mutex_lock(&timer_mutex_); shutdown_ = true; @@ -275,32 +321,36 @@ void TimerStage::disconnect_prepare() { return; } -void TimerStage::handle_event(StageEvent *event) { +void TimerStage::handle_event(StageEvent *event) +{ TimerEvent *e = dynamic_cast(event); if (e == NULL) { - LOG_WARN("received event of unexpected type: typeid=%s\n", - typeid(*event).name()); - return; // !!! EARLY EXIT !!! + LOG_WARN("received event of unexpected type: typeid=%s\n", typeid(*event).name()); + return; // !!! EARLY EXIT !!! } TimerRegisterEvent *register_ev = dynamic_cast(event); if (register_ev != NULL) { register_timer(*register_ev); - return; // !!! EARLY EXIT !!! + return; // !!! EARLY EXIT !!! } TimerCancelEvent *cancel_ev = dynamic_cast(event); if (cancel_ev != NULL) { cancel_timer(*cancel_ev); - return; // !!! EARLY EXIT !!! + return; // !!! EARLY EXIT !!! } return; } -void TimerStage::callback_event(StageEvent *e, CallbackContext *ctx) { return; } +void TimerStage::callback_event(StageEvent *e, CallbackContext *ctx) +{ + return; +} -void TimerStage::register_timer(TimerRegisterEvent ®_ev) { +void TimerStage::register_timer(TimerRegisterEvent ®_ev) +{ const TimerToken tt(reg_ev.get_time()); LOG_TRACE("registering event: token=%s\n", tt.to_string().c_str()); @@ -310,11 +360,10 @@ void TimerStage::register_timer(TimerRegisterEvent ®_ev) { // add the event to the timer queue StageEvent *timer_cb = reg_ev.adopt_callback_event(); - std::pair result = - timer_queue_.insert(std::make_pair(tt, timer_cb)); + std::pair result = timer_queue_.insert(std::make_pair(tt, timer_cb)); ASSERT(result.second, - "Internal error--" - "failed to register timer because token is not unique."); + "Internal error--" + "failed to register timer because token is not unique."); ++num_events_; // if event was added to the head of queue, schedule a timer check @@ -332,7 +381,8 @@ void TimerStage::register_timer(TimerRegisterEvent ®_ev) { return; } -void TimerStage::cancel_timer(TimerCancelEvent &cancel_ev) { +void TimerStage::cancel_timer(TimerCancelEvent &cancel_ev) +{ pthread_mutex_lock(&timer_mutex_); bool success = false; timer_queue_t::iterator it = timer_queue_.find(cancel_ev.get_token()); @@ -347,8 +397,7 @@ void TimerStage::cancel_timer(TimerCancelEvent &cancel_ev) { } pthread_mutex_unlock(&timer_mutex_); - LOG_DEBUG("cancelling event: token=%s, success=%d\n", - cancel_ev.get_token().to_string().c_str(), (int) success); + LOG_DEBUG("cancelling event: token=%s, success=%d\n", cancel_ev.get_token().to_string().c_str(), (int)success); cancel_ev.set_success(success); cancel_ev.done(); @@ -356,7 +405,8 @@ void TimerStage::cancel_timer(TimerCancelEvent &cancel_ev) { return; } -void TimerStage::trigger_timer_check() { +void TimerStage::trigger_timer_check() +{ LOG_TRACE("signaling timer thread to complete timer check\n"); pthread_mutex_lock(&timer_mutex_); @@ -366,14 +416,16 @@ void TimerStage::trigger_timer_check() { return; } -void *TimerStage::start_timer_thread(void *arg) { +void *TimerStage::start_timer_thread(void *arg) +{ TimerStage *tstage = static_cast(arg); ASSERT(tstage != NULL, "Internal error--failed to start timer thread."); tstage->check_timer(); return NULL; } -void TimerStage::check_timer() { +void TimerStage::check_timer() +{ pthread_mutex_lock(&timer_mutex_); while (true) { @@ -389,7 +441,7 @@ void TimerStage::check_timer() { // Trigger all events for which the trigger time has already passed. timer_queue_t::iterator first = timer_queue_.begin(); timer_queue_t::iterator last; - std::list < StageEvent * > done_events; + std::list done_events; for (last = first; last != timer_queue_.end(); ++last) { if (TIMEVAL_LESS_THAN(now, last->first.get_time())) break; @@ -400,10 +452,9 @@ void TimerStage::check_timer() { // It is ok to hold the mutex while executing this loop. // Triggering the events only enqueues the event on the // caller's queue--it does not perform any real work. - for (std::list::iterator i = done_events.begin(); - i != done_events.end(); ++i) { - LOG_TRACE("triggering timer event: sec=%ld, usec=%ld, typeid=%s\n", - now.tv_sec, now.tv_usec, typeid(**i).name()); + for (std::list::iterator i = done_events.begin(); i != done_events.end(); ++i) { + LOG_TRACE( + "triggering timer event: sec=%ld, usec=%ld, typeid=%s\n", now.tv_sec, now.tv_usec, typeid(**i).name()); (*i)->done(); --num_events_; } @@ -415,7 +466,7 @@ void TimerStage::check_timer() { // call to wait on the condition variable. if (shutdown_) { LOG_INFO("received shutdown signal, abandoning timer maintenance\n"); - break; // !!! EARLY EXIT !!! + break; // !!! EARLY EXIT !!! } // Sleep until the next service interval. @@ -433,8 +484,7 @@ void TimerStage::check_timer() { ts.tv_sec = first->first.get_time().tv_sec; ts.tv_nsec = first->first.get_time().tv_usec * NSEC_PER_USEC; - LOG_TRACE("sleeping until next deadline: sec=%ld, nsec=%ld\n", ts.tv_sec, - ts.tv_nsec); + LOG_TRACE("sleeping until next deadline: sec=%ld, nsec=%ld\n", ts.tv_sec, ts.tv_nsec); pthread_cond_timedwait(&timer_condv_, &timer_mutex_, &ts); } } @@ -444,9 +494,9 @@ void TimerStage::check_timer() { return; } -bool TimerStage::timer_token_less_than(const TimerToken &tt1, - const TimerToken &tt2) { +bool TimerStage::timer_token_less_than(const TimerToken &tt1, const TimerToken &tt2) +{ return (tt1 < tt2); } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/seda/timer_stage.h b/deps/common/seda/timer_stage.h index ca7514d7289e3856fd543f731c821714a369864d..11d5c318f68c61984e3b4b990630c5deb3244b11 100644 --- a/deps/common/seda/timer_stage.h +++ b/deps/common/seda/timer_stage.h @@ -41,7 +41,7 @@ namespace common { * TimerStage. */ class TimerToken { - public: +public: TimerToken(); TimerToken(const struct timeval &t); TimerToken(const TimerToken &tt); @@ -53,7 +53,7 @@ class TimerToken { friend bool timer_token_less_than(const TimerToken &tt1, const TimerToken &tt2); - private: +private: void set(const struct timeval &t, u64_t n); static u64_t next_nonce(); @@ -68,9 +68,15 @@ class TimerToken { * \brief An abstract base class for all timer-related events. */ class TimerEvent : public StageEvent { - public: - TimerEvent() : StageEvent() { return; } - virtual ~TimerEvent() { return; } +public: + TimerEvent() : StageEvent() + { + return; + } + virtual ~TimerEvent() + { + return; + } }; /** @@ -88,7 +94,7 @@ class TimerEvent : public StageEvent { * the requested time. */ class TimerRegisterEvent : public TimerEvent { - public: +public: /** * \brief Create an event to request the registration of a timer * callback using relative time. @@ -177,7 +183,7 @@ class TimerRegisterEvent : public TimerEvent { */ void set_cancel_token(const TimerToken &t); - private: +private: StageEvent *timer_cb_; struct timeval timer_when_; TimerToken token_; @@ -196,7 +202,7 @@ class TimerRegisterEvent : public TimerEvent { * the associated callback event. */ class TimerCancelEvent : public TimerEvent { - public: +public: /** * \brief Create an event to request the cancellation of a timer * callback that was previously set. @@ -237,7 +243,7 @@ class TimerCancelEvent : public TimerEvent { */ const TimerToken &get_token(); - private: +private: TimerToken token_; bool cancelled_; }; @@ -273,7 +279,7 @@ class TimerCancelEvent : public TimerEvent { * to maintain the timer. */ class TimerStage : public Stage { - public: +public: ~TimerStage(); static Stage *make_stage(const std::string &tag); @@ -283,7 +289,7 @@ class TimerStage : public Stage { */ u32_t get_num_events(); - protected: +protected: TimerStage(const char *tag); bool set_properties(); bool initialize(); @@ -294,7 +300,7 @@ class TimerStage : public Stage { // For ordering the keys in the timer_queue_. static bool timer_token_less_than(const TimerToken &tt1, const TimerToken &tt2); - private: +private: void register_timer(TimerRegisterEvent ®_ev); void cancel_timer(TimerCancelEvent &cancel_ev); bool timeval_less_than(const struct timeval &t1, const struct timeval &t2); @@ -303,18 +309,16 @@ class TimerStage : public Stage { static void *start_timer_thread(void *arg); - typedef std::map - timer_queue_t; + typedef std::map timer_queue_t; timer_queue_t timer_queue_; pthread_mutex_t timer_mutex_; pthread_cond_t timer_condv_; - bool shutdown_; // true if stage has received the shutdown signal - u32_t num_events_; // the number of timer events currently outstanding - pthread_t timer_thread_id_; // thread id of the timer maintenance thread + bool shutdown_; // true if stage has received the shutdown signal + u32_t num_events_; // the number of timer events currently outstanding + pthread_t timer_thread_id_; // thread id of the timer maintenance thread }; -} //namespace common -#endif // __COMMON_SEDA_TIMER_STAGE_H__ +} // namespace common +#endif // __COMMON_SEDA_TIMER_STAGE_H__ diff --git a/deps/common/time/datetime.cpp b/deps/common/time/datetime.cpp index 55a175f8a3bd2f7c45b248d7277201b1537b97bb..69eef2bdfe6539b56233a504fcc388b8bdc4ac9b 100644 --- a/deps/common/time/datetime.cpp +++ b/deps/common/time/datetime.cpp @@ -22,30 +22,46 @@ See the Mulan PSL v2 for more details. */ #include namespace common { -DateTime::DateTime(std::string &xml_str) { +DateTime::DateTime(std::string &xml_str) +{ tm tmp; - sscanf(xml_str.c_str(), "%04d-%02d-%02dT%02d:%02d:%02dZ", &tmp.tm_year, - &tmp.tm_mon, &tmp.tm_mday, &tmp.tm_hour, &tmp.tm_min, &tmp.tm_sec); + sscanf(xml_str.c_str(), + "%04d-%02d-%02dT%02d:%02d:%02dZ", + &tmp.tm_year, + &tmp.tm_mon, + &tmp.tm_mday, + &tmp.tm_hour, + &tmp.tm_min, + &tmp.tm_sec); m_date = julian_date(tmp.tm_year, tmp.tm_mon, tmp.tm_mday); m_time = make_hms(tmp.tm_hour, tmp.tm_min, tmp.tm_sec, 0); } -time_t DateTime::str_to_time_t(std::string &xml_str) { +time_t DateTime::str_to_time_t(std::string &xml_str) +{ tm tmp; - sscanf(xml_str.c_str(), "%04d-%02d-%02dT%02d:%02d:%02dZ", &tmp.tm_year, - &tmp.tm_mon, &tmp.tm_mday, &tmp.tm_hour, &tmp.tm_min, &tmp.tm_sec); + sscanf(xml_str.c_str(), + "%04d-%02d-%02dT%02d:%02d:%02dZ", + &tmp.tm_year, + &tmp.tm_mon, + &tmp.tm_mday, + &tmp.tm_hour, + &tmp.tm_min, + &tmp.tm_sec); m_date = julian_date(tmp.tm_year, tmp.tm_mon, tmp.tm_mday); m_time = make_hms(tmp.tm_hour, tmp.tm_min, tmp.tm_sec, 0); return to_time_t(); } -std::string DateTime::time_t_to_str(int timet) { +std::string DateTime::time_t_to_str(int timet) +{ std::ostringstream oss; oss << std::dec << std::setw(10) << timet; return oss.str(); } -std::string DateTime::time_t_to_xml_str(time_t timet) { +std::string DateTime::time_t_to_xml_str(time_t timet) +{ std::string ret_val; std::ostringstream oss; struct tm tmbuf; @@ -70,11 +86,18 @@ std::string DateTime::time_t_to_xml_str(time_t timet) { return ret_val; } -std::string DateTime::str_to_time_t_str(std::string &xml_str) { +std::string DateTime::str_to_time_t_str(std::string &xml_str) +{ tm tmp; std::ostringstream oss; - sscanf(xml_str.c_str(), "%04d-%02d-%02dT%02d:%02d:%02dZ", &tmp.tm_year, - &tmp.tm_mon, &tmp.tm_mday, &tmp.tm_hour, &tmp.tm_min, &tmp.tm_sec); + sscanf(xml_str.c_str(), + "%04d-%02d-%02dT%02d:%02d:%02dZ", + &tmp.tm_year, + &tmp.tm_mon, + &tmp.tm_mday, + &tmp.tm_hour, + &tmp.tm_min, + &tmp.tm_sec); m_date = julian_date(tmp.tm_year, tmp.tm_mon, tmp.tm_mday); m_time = make_hms(tmp.tm_hour, tmp.tm_min, tmp.tm_sec, 0); time_t timestamp = to_time_t(); @@ -82,20 +105,24 @@ std::string DateTime::str_to_time_t_str(std::string &xml_str) { return oss.str(); } -time_t DateTime::nowtimet() { +time_t DateTime::nowtimet() +{ struct timeval tv; gettimeofday(&tv, 0); - return tv.tv_sec;; + return tv.tv_sec; + ; } -DateTime DateTime::now() { +DateTime DateTime::now() +{ struct timeval tv; gettimeofday(&tv, 0); return from_time_t(tv.tv_sec, tv.tv_usec / 1000); } //! Return date and time as a string in Xml Schema date-time format -std::string DateTime::to_xml_date_time() { +std::string DateTime::to_xml_date_time() +{ std::string ret_val; tm tm_info; @@ -122,12 +149,14 @@ std::string DateTime::to_xml_date_time() { return ret_val; } -time_t DateTime::add_duration(std::string xml_duration) { +time_t DateTime::add_duration(std::string xml_duration) +{ add_duration_date_time(xml_duration); return to_time_t(); } -void DateTime::add_duration_date_time(std::string xml_duration) { +void DateTime::add_duration_date_time(std::string xml_duration) +{ // start datetime values int s_year, s_month, s_day; int s_hour, s_min, s_sec, s_millis = 0; @@ -200,21 +229,19 @@ void DateTime::add_duration_date_time(std::string xml_duration) { return; } -int DateTime::max_day_in_month_for(int yr, int month) { +int DateTime::max_day_in_month_for(int yr, int month) +{ int tmp_month = ((month - 1) % 12) + 1; int tmp_year = yr + ((tmp_month - 1) / 12); - if (tmp_month == MON_JAN || tmp_month == MON_MAR || tmp_month == MON_MAY || - tmp_month == MON_JUL || tmp_month == MON_AUG || tmp_month == MON_OCT || - tmp_month == MON_DEC) { + if (tmp_month == MON_JAN || tmp_month == MON_MAR || tmp_month == MON_MAY || tmp_month == MON_JUL || + tmp_month == MON_AUG || tmp_month == MON_OCT || tmp_month == MON_DEC) { return 31; } else { - if (tmp_month == MON_APR || tmp_month == MON_JUN || tmp_month == MON_SEP || - tmp_month == MON_NOV) + if (tmp_month == MON_APR || tmp_month == MON_JUN || tmp_month == MON_SEP || tmp_month == MON_NOV) return 30; else { - if (tmp_month == MON_FEB && ((0 == tmp_year % 400) || - ((0 != tmp_year % 100) && 0 == tmp_year % 4))) { + if (tmp_month == MON_FEB && ((0 == tmp_year % 400) || ((0 != tmp_year % 100) && 0 == tmp_year % 4))) { return 29; } else return 28; @@ -222,7 +249,8 @@ int DateTime::max_day_in_month_for(int yr, int month) { } } -void DateTime::parse_duration(std::string dur_str, struct tm &tm_t) { +void DateTime::parse_duration(std::string dur_str, struct tm &tm_t) +{ std::string::size_type index = 0; bzero(&tm_t, sizeof(tm_t)); if (dur_str[index] != 'P') { @@ -314,7 +342,8 @@ void DateTime::parse_duration(std::string dur_str, struct tm &tm_t) { // generate OBJ_ID_TIMESTMP_DIGITS types unique timestamp string // caller doesn't need get any lock #define OBJ_ID_TIMESTMP_DIGITS 14 -std::string Now::unique() { +std::string Now::unique() +{ struct timeval tv; u64_t temp; static u64_t last_unique = 0; @@ -324,7 +353,7 @@ std::string Now::unique() { static pthread_mutex_t mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER; #endif gettimeofday(&tv, NULL); - temp = (((u64_t) tv.tv_sec) << 20) + tv.tv_usec; + temp = (((u64_t)tv.tv_sec) << 20) + tv.tv_usec; pthread_mutex_lock(&mutex); if (temp > last_unique) { // record last timeStamp @@ -343,12 +372,12 @@ std::string Now::unique() { // should not cover OBJ_ID_TIMESTMP_DIGITS, which is only // related with the object id. std::ostringstream oss; - oss << std::hex << std::setw(OBJ_ID_TIMESTMP_DIGITS) << std::setfill('0') - << temp; + oss << std::hex << std::setw(OBJ_ID_TIMESTMP_DIGITS) << std::setfill('0') << temp; return oss.str(); } -bool DateTime::is_valid_xml_datetime(const std::string &str) { +bool DateTime::is_valid_xml_datetime(const std::string &str) +{ // check length. 20 is the length of a xml date if (str.length() != 20) return false; @@ -366,12 +395,17 @@ bool DateTime::is_valid_xml_datetime(const std::string &str) { // check month, date, hour, min, second is valid tm tmp; - int ret = - sscanf(str.c_str(), "%04d-%02d-%02dT%02d:%02d:%02dZ", &tmp.tm_year, - &tmp.tm_mon, &tmp.tm_mday, &tmp.tm_hour, &tmp.tm_min, &tmp.tm_sec); + int ret = sscanf(str.c_str(), + "%04d-%02d-%02dT%02d:%02d:%02dZ", + &tmp.tm_year, + &tmp.tm_mon, + &tmp.tm_mday, + &tmp.tm_hour, + &tmp.tm_min, + &tmp.tm_sec); if (ret != 6) - return false; // should have 6 match + return false; // should have 6 match if (tmp.tm_mon > 12 || tmp.tm_mon <= 0) return false; @@ -387,4 +421,4 @@ bool DateTime::is_valid_xml_datetime(const std::string &str) { return true; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/time/datetime.h b/deps/common/time/datetime.h index fbfed7eed72c32692de68a921409ebccfe8c0bb1..a49391fc09fea7da9d26ceb1283d3ca1b9921f60 100644 --- a/deps/common/time/datetime.h +++ b/deps/common/time/datetime.h @@ -27,7 +27,6 @@ See the Mulan PSL v2 for more details. */ namespace common { - /* * \brief Date and time are represented as integer for ease of * calculation and comparison. @@ -80,14 +79,16 @@ struct DateTime { }; // Default constructor - initializes to zero - DateTime() : m_date(0), m_time(0) {} + DateTime() : m_date(0), m_time(0) + {} // Construct from a Julian day number and time in millis - DateTime(int date, int time) : m_date(date), m_time(time) {} + DateTime(int date, int time) : m_date(date), m_time(time) + {} // Construct from the specified components - DateTime(int year, int month, int day, int hour, int minute, int second, - int millis) { + DateTime(int year, int month, int day, int hour, int minute, int second, int millis) + { m_date = julian_date(year, month, day); m_time = make_hms(hour, minute, second, millis); } @@ -100,13 +101,15 @@ struct DateTime { // Load the referenced values with the year, month and day // portions of the date in a single operation - inline void get_ymd(int &year, int &month, int &day) const { + inline void get_ymd(int &year, int &month, int &day) const + { get_ymd(m_date, year, month, day); } // Load the referenced values with the hour, minute, second and // millisecond portions of the time in a single operation - inline void get_hms(int &hour, int &minute, int &second, int &millis) const { + inline void get_hms(int &hour, int &minute, int &second, int &millis) const + { int ticks = m_time / MILLIS_PER_SEC; hour = ticks / SECONDS_PER_HOUR; minute = (ticks / SECONDS_PER_MIN) % MINUTES_PER_HOUR; @@ -116,13 +119,14 @@ struct DateTime { // Convert the DateTime to a time_t. Note that this operation // can overflow on 32-bit platforms when we go beyond year 2038. - inline time_t to_time_t() const { - return (SECONDS_PER_DAY * (m_date - JULIAN_19700101) + - m_time / MILLIS_PER_SEC); + inline time_t to_time_t() const + { + return (SECONDS_PER_DAY * (m_date - JULIAN_19700101) + m_time / MILLIS_PER_SEC); } // Convert the DateTime to a struct tm which is in UTC - tm to_tm() const { + tm to_tm() const + { int year, month, day; int hour, minute, second, millis; tm result = {0}; @@ -142,35 +146,46 @@ struct DateTime { } // Set the date portion of the DateTime - void set_ymd(int year, int month, int day) { + void set_ymd(int year, int month, int day) + { m_date = julian_date(year, month, day); } // Set the time portion of the DateTime - void set_hms(int hour, int minute, int second, int millis) { + void set_hms(int hour, int minute, int second, int millis) + { m_time = make_hms(hour, minute, second, millis); } // Clear the date portion of the DateTime - void clear_date() { m_date = 0; } + void clear_date() + { + m_date = 0; + } // Clear the time portion of the DateTime - void clear_time() { m_time = 0; } + void clear_time() + { + m_time = 0; + } // Set the internal date and time members - void set(int date, int time) { + void set(int date, int time) + { m_date = date; m_time = time; } // Initialize from another DateTime - void set(const DateTime &other) { + void set(const DateTime &other) + { m_date = other.m_date; m_time = other.m_time; } // Add a number of seconds to this - void operator+=(int seconds) { + void operator+=(int seconds) + { int d = seconds / SECONDS_PER_DAY; int s = seconds % SECONDS_PER_DAY; @@ -203,10 +218,9 @@ struct DateTime { // Helper method to convert a broken down time to a number of // milliseconds since midnight - static int make_hms(int hour, int minute, int second, int millis) { - return MILLIS_PER_SEC * - (SECONDS_PER_HOUR * hour + SECONDS_PER_MIN * minute + second) + - millis; + static int make_hms(int hour, int minute, int second, int millis) + { + return MILLIS_PER_SEC * (SECONDS_PER_HOUR * hour + SECONDS_PER_MIN * minute + second) + millis; } // Return the current wall-clock time as a DateTime @@ -216,7 +230,8 @@ struct DateTime { time_t nowtimet(); // Convert a time_t and optional milliseconds to a DateTime - static DateTime from_time_t(time_t t, int millis = 0) { + static DateTime from_time_t(time_t t, int millis = 0) + { struct tm tmbuf; tm *tm = gmtime_r(&t, &tmbuf); return from_tm(*tm, millis); @@ -224,22 +239,24 @@ struct DateTime { // Convert a tm and optional milliseconds to a DateTime. \note // the tm structure is assumed to contain a date specified in UTC - static DateTime from_tm(const tm &tm, int millis = 0) { - return DateTime(julian_date(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday), - make_hms(tm.tm_hour, tm.tm_min, tm.tm_sec, millis)); + static DateTime from_tm(const tm &tm, int millis = 0) + { + return DateTime( + julian_date(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday), make_hms(tm.tm_hour, tm.tm_min, tm.tm_sec, millis)); } // Helper method to calculate a Julian day number. - static int julian_date(int year, int month, int day) { + static int julian_date(int year, int month, int day) + { int a = (14 - month) / 12; int y = year + 4800 - a; int m = month + 12 * a - 3; - return (day + int((153 * m + 2) / 5) + y * 365 + int(y / 4) - int(y / 100) + - int(y / 400) - 32045); + return (day + int((153 * m + 2) / 5) + y * 365 + int(y / 4) - int(y / 100) + int(y / 400) - 32045); } // Convert a Julian day number to a year, month and day - static void get_ymd(int jday, int &year, int &month, int &day) { + static void get_ymd(int jday, int &year, int &month, int &day) + { int a = jday + 32044; int b = (4 * a + 3) / 146097; int c = a - int((b * 146097) / 4); @@ -253,7 +270,8 @@ struct DateTime { // Return a human-friendly string representation of the timestamp, // expressed in terms of the local timezone - std::string to_string_local() { + std::string to_string_local() + { const time_t tt = to_time_t(); // 'man asctime' specifies that buffer must be at least 26 bytes char buffer[32]; @@ -265,7 +283,8 @@ struct DateTime { // Return a human-friendly string representation of the timestamp, // expressed in terms of Coordinated Universal Time (UTC) - std::string to_string_utc() { + std::string to_string_utc() + { const time_t tt = to_time_t(); // 'man asctime' specifies that buffer must be at least 26 bytes char buffer[32]; @@ -288,15 +307,18 @@ struct DateTime { void parse_duration(std::string dur_str, struct tm &tm_t); }; -inline bool operator==(const DateTime &lhs, const DateTime &rhs) { +inline bool operator==(const DateTime &lhs, const DateTime &rhs) +{ return lhs.m_date == rhs.m_date && lhs.m_time == rhs.m_time; } -inline bool operator!=(const DateTime &lhs, const DateTime &rhs) { +inline bool operator!=(const DateTime &lhs, const DateTime &rhs) +{ return !(lhs == rhs); } -inline bool operator<(const DateTime &lhs, const DateTime &rhs) { +inline bool operator<(const DateTime &lhs, const DateTime &rhs) +{ if (lhs.m_date < rhs.m_date) return true; else if (lhs.m_date > rhs.m_date) @@ -306,21 +328,25 @@ inline bool operator<(const DateTime &lhs, const DateTime &rhs) { return false; } -inline bool operator>(const DateTime &lhs, const DateTime &rhs) { +inline bool operator>(const DateTime &lhs, const DateTime &rhs) +{ return !(lhs == rhs || lhs < rhs); } -inline bool operator<=(const DateTime &lhs, const DateTime &rhs) { +inline bool operator<=(const DateTime &lhs, const DateTime &rhs) +{ return lhs == rhs || lhs < rhs; } -inline bool operator>=(const DateTime &lhs, const DateTime &rhs) { +inline bool operator>=(const DateTime &lhs, const DateTime &rhs) +{ return lhs == rhs || lhs > rhs; } // Calculate the difference between two DateTime values and return // the result as a number of seconds -inline int operator-(const DateTime &lhs, const DateTime &rhs) { +inline int operator-(const DateTime &lhs, const DateTime &rhs) +{ return (DateTime::SECONDS_PER_DAY * (lhs.m_date - rhs.m_date) + // Truncate the millis before subtracting lhs.m_time / 1000 - rhs.m_time / 1000); @@ -330,54 +356,67 @@ inline int operator-(const DateTime &lhs, const DateTime &rhs) { class TimeStamp : public DateTime { public: // Defaults to the current date and time - TimeStamp() : DateTime(DateTime::now()) {} + TimeStamp() : DateTime(DateTime::now()) + {} // Defaults to the current date - TimeStamp(int hour, int minute, int second, int millisecond = 0) - : DateTime(DateTime::now()) { + TimeStamp(int hour, int minute, int second, int millisecond = 0) : DateTime(DateTime::now()) + { set_hms(hour, minute, second, millisecond); } TimeStamp(int hour, int minute, int second, int date, int month, int year) - : DateTime(year, month, date, hour, minute, second, 0) {} + : DateTime(year, month, date, hour, minute, second, 0) + {} - TimeStamp(int hour, int minute, int second, int millisecond, int date, - int month, int year) - : DateTime(year, month, date, hour, minute, second, millisecond) {} + TimeStamp(int hour, int minute, int second, int millisecond, int date, int month, int year) + : DateTime(year, month, date, hour, minute, second, millisecond) + {} - TimeStamp(time_t time, int millisecond = 0) - : DateTime(from_time_t(time, millisecond)) {} + TimeStamp(time_t time, int millisecond = 0) : DateTime(from_time_t(time, millisecond)) + {} - TimeStamp(const tm *time, int millisecond = 0) - : DateTime(from_tm(*time, millisecond)) {} + TimeStamp(const tm *time, int millisecond = 0) : DateTime(from_tm(*time, millisecond)) + {} - void set_current() { set(DateTime::now()); } + void set_current() + { + set(DateTime::now()); + } }; // Time only represented in UTC. class Time : public DateTime { public: // Defaults to the current time - Time() { set_current(); } + Time() + { + set_current(); + } - Time(const DateTime &val) : DateTime(val) { clear_date(); } + Time(const DateTime &val) : DateTime(val) + { + clear_date(); + } - Time(int hour, int minute, int second, int millisecond = 0) { + Time(int hour, int minute, int second, int millisecond = 0) + { set_hms(hour, minute, second, millisecond); } - Time(time_t time, int millisecond = 0) - : DateTime(from_time_t(time, millisecond)) { + Time(time_t time, int millisecond = 0) : DateTime(from_time_t(time, millisecond)) + { clear_date(); } - Time(const tm *time, int millisecond = 0) - : DateTime(from_tm(*time, millisecond)) { + Time(const tm *time, int millisecond = 0) : DateTime(from_tm(*time, millisecond)) + { clear_date(); } // Set to the current time. - void set_current() { + void set_current() + { DateTime d = now(); m_time = d.m_time; } @@ -387,19 +426,30 @@ public: class Date : public DateTime { public: // Defaults to the current date - Date() { set_current(); } + Date() + { + set_current(); + } - Date(const DateTime &val) : DateTime(val) { clear_time(); } + Date(const DateTime &val) : DateTime(val) + { + clear_time(); + } - Date(int date, int month, int year) - : DateTime(year, month, date, 0, 0, 0, 0) {} + Date(int date, int month, int year) : DateTime(year, month, date, 0, 0, 0, 0) + {} - Date(long sec) : DateTime(sec / DateTime::SECONDS_PER_DAY, 0) {} + Date(long sec) : DateTime(sec / DateTime::SECONDS_PER_DAY, 0) + {} - Date(const tm *time) : DateTime(from_tm(*time)) { clear_time(); } + Date(const tm *time) : DateTime(from_tm(*time)) + { + clear_time(); + } // Set to the current time. - void set_current() { + void set_current() + { DateTime d = now(); m_date = d.m_date; } @@ -407,7 +457,8 @@ public: class Now { public: - static inline s64_t sec() { + static inline s64_t sec() + { struct timeval tv; gettimeofday(&tv, 0); time_t sec = tv.tv_sec; @@ -417,13 +468,15 @@ public: return sec; } - static inline s64_t usec() { + static inline s64_t usec() + { struct timeval tv; gettimeofday(&tv, 0); return (s64_t)tv.tv_sec * 1000000 + tv.tv_usec; } - static inline s64_t msec() { + static inline s64_t msec() + { struct timeval tv; gettimeofday(&tv, 0); s64_t msec = (s64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; @@ -435,5 +488,5 @@ public: static std::string unique(); }; -} //namespace common -#endif //__COMMON_TIME_DATETIME_H__ +} // namespace common +#endif //__COMMON_TIME_DATETIME_H__ diff --git a/deps/common/time/timeout_info.cpp b/deps/common/time/timeout_info.cpp index c21209057aad57c80c25a0409cef980010c8c78c..85684337e8de1afaa3fdc973e0828645a167071e 100644 --- a/deps/common/time/timeout_info.cpp +++ b/deps/common/time/timeout_info.cpp @@ -17,25 +17,28 @@ See the Mulan PSL v2 for more details. */ #include namespace common { -TimeoutInfo::TimeoutInfo(time_t deadLine) - : deadline_(deadLine), is_timed_out_(false), ref_cnt_(0) { +TimeoutInfo::TimeoutInfo(time_t deadLine) : deadline_(deadLine), is_timed_out_(false), ref_cnt_(0) +{ MUTEX_INIT(&mutex_, NULL); } -TimeoutInfo::~TimeoutInfo() { +TimeoutInfo::~TimeoutInfo() +{ // unlock mutex_ as we locked it before 'delete this' MUTEX_UNLOCK(&mutex_); MUTEX_DESTROY(&mutex_); } -void TimeoutInfo::attach() { +void TimeoutInfo::attach() +{ MUTEX_LOCK(&mutex_); ref_cnt_++; MUTEX_UNLOCK(&mutex_); } -void TimeoutInfo::detach() { +void TimeoutInfo::detach() +{ MUTEX_LOCK(&mutex_); if (0 == --ref_cnt_) { delete this; @@ -44,7 +47,8 @@ void TimeoutInfo::detach() { MUTEX_UNLOCK(&mutex_); } -bool TimeoutInfo::has_timed_out() { +bool TimeoutInfo::has_timed_out() +{ MUTEX_LOCK(&mutex_); bool ret = is_timed_out_; if (!is_timed_out_) { @@ -58,4 +62,4 @@ bool TimeoutInfo::has_timed_out() { return ret; } -} //namespace common \ No newline at end of file +} // namespace common \ No newline at end of file diff --git a/deps/common/time/timeout_info.h b/deps/common/time/timeout_info.h index e619582651e111c250d6724680d27ad5fd8425d3..9e6fd57896aeb634791ecc6274afd09b7c248219 100644 --- a/deps/common/time/timeout_info.h +++ b/deps/common/time/timeout_info.h @@ -28,7 +28,7 @@ namespace common { */ class TimeoutInfo { - public: +public: /** * Constructor * @param[in] deadline_ deadline_ of this timeout @@ -44,7 +44,7 @@ class TimeoutInfo { // Check if it has timed out bool has_timed_out(); - private: +private: // Forbid copy ctor and =() to support ref count // Copy constructor. @@ -53,24 +53,24 @@ class TimeoutInfo { // Assignment operator. TimeoutInfo &operator=(const TimeoutInfo &ti); - protected: +protected: // Avoid heap-based \c TimeoutInfo // so it can easily associated with \c StageEvent // Destructor. ~TimeoutInfo(); - private: - time_t deadline_; // when should this be timed out +private: + time_t deadline_; // when should this be timed out // used to predict timeout if now + reservedTime > deadline_ // time_t reservedTime; - bool is_timed_out_; // timeout flag + bool is_timed_out_; // timeout flag int ref_cnt_; // reference count of this object - pthread_mutex_t mutex_; // mutex_ to protect ref_cnt_ and flag + pthread_mutex_t mutex_; // mutex_ to protect ref_cnt_ and flag }; -} //namespace common -#endif // __COMMON_TIME_TIMEOUT_INFO_H__ +} // namespace common +#endif // __COMMON_TIME_TIMEOUT_INFO_H__ diff --git a/deps/common/version.h b/deps/common/version.h index ef1f94a265b7eb6d5755903121963134c5f8d95c..73e3e5fe30e92b45bd70b4581ae4bc0ff2cfc7f5 100644 --- a/deps/common/version.h +++ b/deps/common/version.h @@ -35,10 +35,8 @@ namespace common { #define STR1(R) #R #define STR2(R) STR1(R) -#define VERSION_STR \ - (STR2(MAIJOR_VER) "." STR2(MINOR_VER) "." STR2(PATCH_VER) "." STR2(OTHER_VER)) -#define VERSION_NUM \ - (MAIJOR_VER << 24 | MINOR_VER << 16 | PATCH_VER << 8 | OTHER_VER) +#define VERSION_STR (STR2(MAIJOR_VER) "." STR2(MINOR_VER) "." STR2(PATCH_VER) "." STR2(OTHER_VER)) +#define VERSION_NUM (MAIJOR_VER << 24 | MINOR_VER << 16 | PATCH_VER << 8 | OTHER_VER) -} //namespace common -#endif //__COMMON_VERSION_H__ +} // namespace common +#endif //__COMMON_VERSION_H__ diff --git a/docs/lectures/copyright.md b/docs/lectures/copyright.md new file mode 100644 index 0000000000000000000000000000000000000000..5207193a548d5c5c52f762081fc25222ad0279ed --- /dev/null +++ b/docs/lectures/copyright.md @@ -0,0 +1,6 @@ +# 版权声明 + +1. 本教材刊载的所有内容,包括但不限于文字报道、图片、视频、图表、标志标识、商标、版面设计、专栏目录与名称、内容分类标准等,均受《中华人民共和国著作权法》、《中华人民共和国商标法》、《中华人民共和国专利法》及适用之国际公约中有关著作权、商标权、专利权以及或其它财产所有权法律的保护,相应的版权或许可使用权均属华中科技大学谢美意老师、左琼老师所有。 +2. 凡未经华中科技大学谢美意老师、左琼老师授权,任何媒体、网站及个人不得转载、复制、重制、改动、展示或使用《数据库管理系统实现基础讲义》的局部或全部的内容。如果已转载,请自行删除。同时,我们保留进一步追究相关行为主体的法律责任的权利。 +3. 本教材刊载的所有内容授权给北京奥星贝斯科技有限公司。 + diff --git a/docs/lectures/images/1-1.png b/docs/lectures/images/1-1.png new file mode 100644 index 0000000000000000000000000000000000000000..83f265517049c797bb065d4aa92fe0366639c6f2 Binary files /dev/null and b/docs/lectures/images/1-1.png differ diff --git a/docs/lectures/images/1.3.1.3-1.png b/docs/lectures/images/1.3.1.3-1.png new file mode 100644 index 0000000000000000000000000000000000000000..7ed8356afc25750eb0520b93e5caca3e1989cd8d Binary files /dev/null and b/docs/lectures/images/1.3.1.3-1.png differ diff --git a/docs/lectures/images/1.3.1.3-2.png b/docs/lectures/images/1.3.1.3-2.png new file mode 100644 index 0000000000000000000000000000000000000000..4b8e61e7d25fbee04de02f188d2f578f1fdd7a0a Binary files /dev/null and b/docs/lectures/images/1.3.1.3-2.png differ diff --git a/docs/lectures/images/2-1.png b/docs/lectures/images/2-1.png new file mode 100644 index 0000000000000000000000000000000000000000..9c37de67e5a58c072db3a5c848b1739040219889 Binary files /dev/null and b/docs/lectures/images/2-1.png differ diff --git a/docs/lectures/images/2-2.png b/docs/lectures/images/2-2.png new file mode 100644 index 0000000000000000000000000000000000000000..5cb9ca93f012488cb6a28a511829b0f61c1c3d33 Binary files /dev/null and b/docs/lectures/images/2-2.png differ diff --git a/docs/lectures/images/2-3.png b/docs/lectures/images/2-3.png new file mode 100644 index 0000000000000000000000000000000000000000..132ec9595dff682b4644f58c1a442ba0454f79ac Binary files /dev/null and b/docs/lectures/images/2-3.png differ diff --git a/docs/lectures/images/2-4.png b/docs/lectures/images/2-4.png new file mode 100644 index 0000000000000000000000000000000000000000..e4a01d79e77c2a0b26f3fd31356b3e6643f1f605 Binary files /dev/null and b/docs/lectures/images/2-4.png differ diff --git a/docs/lectures/images/2-5.png b/docs/lectures/images/2-5.png new file mode 100644 index 0000000000000000000000000000000000000000..4a89ae6d137d0853d64117f219a90eea40bb49b5 Binary files /dev/null and b/docs/lectures/images/2-5.png differ diff --git a/docs/lectures/images/2-6.png b/docs/lectures/images/2-6.png new file mode 100644 index 0000000000000000000000000000000000000000..96ade6e244a8fe16592b8936355b48794bbc5065 Binary files /dev/null and b/docs/lectures/images/2-6.png differ diff --git a/docs/lectures/images/2-7.png b/docs/lectures/images/2-7.png new file mode 100644 index 0000000000000000000000000000000000000000..0f4576949a2873a2299b4c43e2ec454f48e7c358 Binary files /dev/null and b/docs/lectures/images/2-7.png differ diff --git a/docs/lectures/images/2-8.png b/docs/lectures/images/2-8.png new file mode 100644 index 0000000000000000000000000000000000000000..cec31b80d1f1fa92e9c6f75386ce05f730e524a6 Binary files /dev/null and b/docs/lectures/images/2-8.png differ diff --git a/docs/lectures/images/3-1.png b/docs/lectures/images/3-1.png new file mode 100644 index 0000000000000000000000000000000000000000..1ab5abb6b31acee14447fa5b134d6e713253d02e Binary files /dev/null and b/docs/lectures/images/3-1.png differ diff --git a/docs/lectures/images/3-2-a.png b/docs/lectures/images/3-2-a.png new file mode 100644 index 0000000000000000000000000000000000000000..75427d5fa786557ebc1eed342340788c5492c06e Binary files /dev/null and b/docs/lectures/images/3-2-a.png differ diff --git a/docs/lectures/images/3-2-b.png b/docs/lectures/images/3-2-b.png new file mode 100644 index 0000000000000000000000000000000000000000..32836a76470373cf4b7490921b7dc7d17c9e7703 Binary files /dev/null and b/docs/lectures/images/3-2-b.png differ diff --git a/docs/lectures/images/3-2-c.png b/docs/lectures/images/3-2-c.png new file mode 100644 index 0000000000000000000000000000000000000000..73b9a8d2f10689719582c01bae07a6da1afceedb Binary files /dev/null and b/docs/lectures/images/3-2-c.png differ diff --git a/docs/lectures/images/3-3-a.png b/docs/lectures/images/3-3-a.png new file mode 100644 index 0000000000000000000000000000000000000000..ceb63d34facc4ee17673735b9fb5d0eaca7c3a63 Binary files /dev/null and b/docs/lectures/images/3-3-a.png differ diff --git a/docs/lectures/images/3-3-b.png b/docs/lectures/images/3-3-b.png new file mode 100644 index 0000000000000000000000000000000000000000..0b4f7b8b48441f6087ff2d2d5a5be39a45d18f4f Binary files /dev/null and b/docs/lectures/images/3-3-b.png differ diff --git a/docs/lectures/images/3-3-c.png b/docs/lectures/images/3-3-c.png new file mode 100644 index 0000000000000000000000000000000000000000..69897cf0837d10c2601cf0c52a7d494704102a66 Binary files /dev/null and b/docs/lectures/images/3-3-c.png differ diff --git a/docs/lectures/images/3-4.png b/docs/lectures/images/3-4.png new file mode 100644 index 0000000000000000000000000000000000000000..40e9647038dd8693c20b31d66fa8959d5f03f001 Binary files /dev/null and b/docs/lectures/images/3-4.png differ diff --git a/docs/lectures/images/3-5.png b/docs/lectures/images/3-5.png new file mode 100644 index 0000000000000000000000000000000000000000..287fa94b3a1b11bab8ba5cf5208fb8e5973477da Binary files /dev/null and b/docs/lectures/images/3-5.png differ diff --git a/docs/lectures/images/3-6-a.png b/docs/lectures/images/3-6-a.png new file mode 100644 index 0000000000000000000000000000000000000000..9403cc9fa55e8b674e0af1f55f3c09ca84c49776 Binary files /dev/null and b/docs/lectures/images/3-6-a.png differ diff --git a/docs/lectures/images/3-6-b.png b/docs/lectures/images/3-6-b.png new file mode 100644 index 0000000000000000000000000000000000000000..eafca32fefba585f9fb6466877286d1922eaf54f Binary files /dev/null and b/docs/lectures/images/3-6-b.png differ diff --git a/docs/lectures/images/3-7-a.png b/docs/lectures/images/3-7-a.png new file mode 100644 index 0000000000000000000000000000000000000000..674055dcaaefa9448e0f1e885292a0ad10287c41 Binary files /dev/null and b/docs/lectures/images/3-7-a.png differ diff --git a/docs/lectures/images/3-7-b.png b/docs/lectures/images/3-7-b.png new file mode 100644 index 0000000000000000000000000000000000000000..b2e388bf5138428e031fa015ffe493aacace434d Binary files /dev/null and b/docs/lectures/images/3-7-b.png differ diff --git a/docs/lectures/images/3-7-c.png b/docs/lectures/images/3-7-c.png new file mode 100644 index 0000000000000000000000000000000000000000..7ced26d5dd936a7821c7b70cf7f2b68af2a306f3 Binary files /dev/null and b/docs/lectures/images/3-7-c.png differ diff --git a/docs/lectures/images/4-1.png b/docs/lectures/images/4-1.png new file mode 100644 index 0000000000000000000000000000000000000000..e50c8c7da2f1b5ee10fca678ff5ff0fdfb51e625 Binary files /dev/null and b/docs/lectures/images/4-1.png differ diff --git a/docs/lectures/images/4-2.png b/docs/lectures/images/4-2.png new file mode 100644 index 0000000000000000000000000000000000000000..188e695434d52756cda59cede59354faf2bbcf89 Binary files /dev/null and b/docs/lectures/images/4-2.png differ diff --git a/docs/lectures/images/4-3.png b/docs/lectures/images/4-3.png new file mode 100644 index 0000000000000000000000000000000000000000..bc2a5b249bc849f081f2d0ec96df4744231f68db Binary files /dev/null and b/docs/lectures/images/4-3.png differ diff --git a/docs/lectures/images/4-4.png b/docs/lectures/images/4-4.png new file mode 100644 index 0000000000000000000000000000000000000000..7a83798e79487ae9070ddc3872cb7ad2c4710808 Binary files /dev/null and b/docs/lectures/images/4-4.png differ diff --git a/docs/lectures/images/4-5.png b/docs/lectures/images/4-5.png new file mode 100644 index 0000000000000000000000000000000000000000..978a6c282da33e63a25382c3d60d96f0a18c3678 Binary files /dev/null and b/docs/lectures/images/4-5.png differ diff --git a/docs/lectures/images/4-6.png b/docs/lectures/images/4-6.png new file mode 100644 index 0000000000000000000000000000000000000000..d75f1825fe7b513869261dbba3efc7de42c8ba1e Binary files /dev/null and b/docs/lectures/images/4-6.png differ diff --git a/docs/lectures/images/5-1.png b/docs/lectures/images/5-1.png new file mode 100644 index 0000000000000000000000000000000000000000..b6e0009268c73d848788160693e3484fb9056f60 Binary files /dev/null and b/docs/lectures/images/5-1.png differ diff --git a/docs/lectures/images/5.2.1.1-1.png b/docs/lectures/images/5.2.1.1-1.png new file mode 100644 index 0000000000000000000000000000000000000000..939370e7cd50972341e610cc08a9b23c75bcc97d Binary files /dev/null and b/docs/lectures/images/5.2.1.1-1.png differ diff --git a/docs/lectures/images/5.2.1.1-2.png b/docs/lectures/images/5.2.1.1-2.png new file mode 100644 index 0000000000000000000000000000000000000000..a70112e716155665721533ef5e9c80336ddda1b6 Binary files /dev/null and b/docs/lectures/images/5.2.1.1-2.png differ diff --git a/docs/lectures/images/5.2.1.1-3.png b/docs/lectures/images/5.2.1.1-3.png new file mode 100644 index 0000000000000000000000000000000000000000..678974c6c2588a16ba0a9953005dfc18a16991f7 Binary files /dev/null and b/docs/lectures/images/5.2.1.1-3.png differ diff --git a/docs/lectures/images/5.2.1.1-4.png b/docs/lectures/images/5.2.1.1-4.png new file mode 100644 index 0000000000000000000000000000000000000000..9058d64afe83499602e3152d6105ef9024101f41 Binary files /dev/null and b/docs/lectures/images/5.2.1.1-4.png differ diff --git a/docs/lectures/images/5.2.2.1-1.png b/docs/lectures/images/5.2.2.1-1.png new file mode 100644 index 0000000000000000000000000000000000000000..81af2738a32f376a907e932e2c159c45c1942d5f Binary files /dev/null and b/docs/lectures/images/5.2.2.1-1.png differ diff --git a/docs/lectures/images/5.3.1.1-4.png b/docs/lectures/images/5.3.1.1-4.png new file mode 100644 index 0000000000000000000000000000000000000000..6faf25e46c9c0bd107c67cf19e1751001ba77e19 Binary files /dev/null and b/docs/lectures/images/5.3.1.1-4.png differ diff --git a/docs/lectures/images/6-1.png b/docs/lectures/images/6-1.png new file mode 100644 index 0000000000000000000000000000000000000000..a09bc0fae3c4737b64ba2ed1d70d1f4cb6124b44 Binary files /dev/null and b/docs/lectures/images/6-1.png differ diff --git a/docs/lectures/images/6-2.png b/docs/lectures/images/6-2.png new file mode 100644 index 0000000000000000000000000000000000000000..cc047777facf204d9194ee2774fb03234b006e74 Binary files /dev/null and b/docs/lectures/images/6-2.png differ diff --git a/docs/lectures/images/6-3.png b/docs/lectures/images/6-3.png new file mode 100644 index 0000000000000000000000000000000000000000..169b997e3dbaac31363ce6a7bcedb376d787901c Binary files /dev/null and b/docs/lectures/images/6-3.png differ diff --git a/docs/lectures/images/6-4.png b/docs/lectures/images/6-4.png new file mode 100644 index 0000000000000000000000000000000000000000..26f9f000ef458fcff5234995b239ab668c6a2b46 Binary files /dev/null and b/docs/lectures/images/6-4.png differ diff --git a/docs/lectures/index.md b/docs/lectures/index.md new file mode 100644 index 0000000000000000000000000000000000000000..114728918a9f1bc18ac5760175128f77b325e770 --- /dev/null +++ b/docs/lectures/index.md @@ -0,0 +1,21 @@ +# 数据库管理系统实现基础讲义 + +作者 华中科技大学谢美意 左琼 + + + +[第1章 数据库管理系统概述](lecture-1.md) + +[第2章 数据库的存储结构](lecture-2.md) + +[第3章 索引结构](lecture-3.md) + +[第4章 查询处理](lecture-4.md) + +[第5章 查询优化](lecture-5.md) + +[第6章 事务处理](lecture-6.md) + +[参考资料](references.md) + +[版权声明](copyright.md) diff --git a/docs/lectures/lecture-1.md b/docs/lectures/lecture-1.md new file mode 100644 index 0000000000000000000000000000000000000000..992ba329007f64ca2c6b0c63c36c3ecc50419127 --- /dev/null +++ b/docs/lectures/lecture-1.md @@ -0,0 +1,144 @@ +# 第1章 数据库管理系统概述 + +## 1.1 课程简介 + +随着信息时代的发展,数据的重要性日益凸显,它是各级政府机构、科研部门、企事业单位的宝贵财富和资源,因此数据库系统的建设对于这些组织的生存和发展至关重要。作为数据库系统的核心和基础,数据库管理系统(Data Base Management System,DBMS)得到了越来越广泛的应用。DBMS帮助用户实现对共享数据的高效组织、存储、管理和存取,经过数十年的研究发展,已经成为继操作系统之后最复杂的系统软件。 + +对于DBMS的学习一般可分为两个阶段: + +第一个阶段是学习DBMS的使用,包括如何运用数据库语言创建、访问和管理数据库,如何利用DBMS设计开发数据库应用程序。在这个阶段,学习者只需掌握DBMS提供的功能,并不需要了解DBMS本身的工作原理。 + +第二个阶段是学习DBMS的内部结构和实现机制。通过学习DBMS的实现技术,学习者对数据库系统的工作原理会有更深入的理解,这有助于学习者分析数据库系统在复杂应用环境中可能出现的各种性能问题,设计开发出更高效的数据库应用程序,并为其从事数据库管理软件和工具的开发及改进工作打下基础。 + +在本教程中,我们从DBMS开发者的视角,讨论实现一个关系型DBMS需要考虑的一些关键问题,比如:数据库在存储介质上是如何组织和存储的?一条SQL语句是如何被正确地解析执行的?有哪些结构和方法可以用来快速定位数据库中的记录,提高存取效率?多用户共享数据库时,如何在避免并发错误的同时提高并发度?发生故障时,如何保证数据库能够恢复到正确的状态?在此基础上,学习者可以尝试自己从零开始开发一个简单的DBMS,并逐渐完善、增强它的功能,在这个过程中掌握各种计算机专业知识在DBMS这样的复杂系统软件设计中的应用,提高自己的系统综合能力。 + +作为学习本课程的前提条件,我们假设学习者已经具备了一定的计算机学科背景知识,包括关系代数、关系数据库语言SQL、数据结构、算法,以及操作系统及编译的相关知识。 + +## 1.2 数据库管理系统的组成 + +![图1-1 DBMS内部结构图](images/1-1.png) + +
图1-1 DBMS内部结构图
+ +DBMS允许用户创建数据库并对数据库中的数据进行查询和修改,同时提供故障时的数据恢复功能和多用户同时访问时的并发控制功能。图1-1是一个DBMS的内部结构示意图。其中单线框表示系统模块,双线框表示内存中的数据结构,实线表示控制流+数据流,虚线表示数据流。该图反映了DBMS的几大主要功能的处理流程,即数据定义、数据操纵和事务管理,这些功能均依赖底层的存储管理及缓冲区管理组件提供对磁盘中数据的访问支持。以下我们分别对这几个功能进行简要说明。 + +### 1.2.1 存储及缓冲区管理 + +数据库中的数据通常驻留在磁盘中,当系统需要对数据进行操作时,要先将其从磁盘读入内存。 + +存储管理器的任务是控制数据在磁盘上的放置和数据在磁盘与内存之间的交换。很多DBMS依赖底层操作系统的文件系统来管理磁盘中的数据,也有一些DBMS为了提高效率,直接控制数据在磁盘设备中的存储和访问。存储管理器登记了数据在磁盘上所处的位置,将上层模块提出的逻辑层面的页面访问请求映射为物理层面的磁盘访问命令。 + +缓冲区管理器将内存空间划分为与页面同等大小的帧,来缓存从磁盘读入的页面,并保证这些页面在内存和磁盘上的副本的一致性。DBMS中所有需要从磁盘获取信息的上层模块都需要与缓冲区管理器交互,通过缓冲区读写数据。这些信息包括以下类型: + +- 数据:数据库自身的内容。 +- 元数据:描述数据库的结构及其约束的数据库模式。 +- 日志记录:记录事务对数据库所做修改的信息,用于保证数据库的一致性和持久性。 +- 统计信息:DBMS收集和存储的关于表、索引等数据库对象的大小、 取值分布等信息,用于查询优化。 +- 索引:支持对数据进行高效存取的数据结构。 + +### 1.2.2 DDL命令的处理 + +DDL是指数据定义语言,这类命令一般由DBA等有特殊权限的用户执行,用于定义或修改数据库的模式,比如创建或者删除表、索引等。关于数据库模式的描述信息称为元数据。元数据与普通数据一样,也是以表(称为系统表)的形式存在的。DDL命令由DDL处理器解析其语义,然后调用记录管理器及索引管理器对相应的元数据进行修改。 + +### 1.2.3 DML命令的处理 + +DML是指数据操纵语言,这类命令一般由普通用户或应用程序执行。DML又可分为对数据库的修改操作(增、删、改)和对数据库的查询操作。 + +对DML命令的处理中最重要的部分是查询处理。查询处理的过程分为以下几步: + +- 查询分析及检查:先对查询语句的文本进行语法分析,将其转换为语法树,然后进行查询检查(例如,检查查询中所提到的关系是否确实存在),并将语法树中的某些结构转换成内部形式,形成查询树。查询树表示了一个关系代数表达式,即要在关系上执行的一系列操作。 +- 查询优化:查询优化器利用元数据和关于数据的统计信息来确定哪个操作序列可能是最快的,将最初的查询树等价转换为最高效的操作序列。 +- 查询执行:执行引擎负责查询计划的执行,它通过完成查询计划中的各个操作,得到最终的执行结果。在执行过程中,它需要与DBMS中很多其他组件进行交互。例如,调用记录管理器和索引管理器获取需要的数据,调用并发控制组件对缓冲区中的某条记录加锁以避免并发错误,或者调用日志组件登记对数据库所做的修改。 + +### 1.2.4 事务处理 + +事务是一组数据库操作,这组操作要么都做,要么都不做,不可分割。一个事务中包含哪些操作是由用户定义的,可以包含多个数据库操作,也可以只包含单个数据库操作。对事务的处理由事务管理器负责,它包括并发控制组件和日志及恢复组件,目的是保证事务的ACID特性,即原子性、一致性、隔离性和持久性。 + +事务管理器接收来自用户或应用程序的事务命令,从而得知什么时候事务开始、什么时候事务结束、以及事务的参数设置(例如事务的隔离级),然后在事务运行过程中执行下列任务: + +- 登记日志:为了保证一致性和持久性,事务对于数据库的每一个修改都在磁盘上记录日志,以保证不管在什么时候发生故障,日志及恢复组件都能根据日志将数据库恢复到某个一致的状态。日志一开始被写到缓冲区中,然后会在适当的时机从日志缓冲区写回到磁盘中。 +- 并发控制:事务的执行从表面上看必须是孤立的,但是在大多数系统中,实际上有许多事务在同时执行。因此,并发控制组件必须保证多个事务的各个动作以一种适当的顺序执行,从而使得最终的结果与这些事务串行执行的结果相同。常见的并发控制方式是封锁机制,通过加锁来防止两个事务以可能造成不良后果的方式存取同一数据。 + +## 1.3 关系模型和SQL + +本教程讨论的关系型DBMS是以关系模型为理论基础的。另一方面,SQL作为一种关系数据库标准语言,得到了几乎所有商用关系DBMS的广泛支持。要实现一个关系DBMS,我们需要考虑如何在系统中支持符合关系模型定义的数据结构、数据操作和数据约束,同时支持用户通过SQL命令来访问系统。本节将简单回顾关系模型和SQL中的一些重要概念,并讨论二者的关系。 + +### 1.3.1关系模型 + +1970年,E.F.Codd在他的论文《A Relation Model of Data for Large Shared Data Banks》中首次提出关系模型。关系模型相对于层次模型和网状模型的优势在于:它提供了一种只使用自然结构来描述数据的方法,而不需要为了方便机器表示而附加任何额外的结构。这样就为更高级的数据语言提供了基础,这种语言使得程序能够独立于数据的机器表示及组织方式,具有更好的数据独立性。 + +#### 1.3.1.1关系 + +关系模型采用的数据结构称为关系。在关系模型中,数据库中的全部数据及数据间的联系都用关系来表示。关系是一个无序的元组集合,每个元组由一组属性值构成,表示一个实体。一个有n个属性的关系称为n元关系。由于关系中的元组是无序的,因此DBMS可以采用任何它希望的方式存储它们,以便进行优化。 + +#### 1.3.1.2 主键和外键 + +主键和外键反映了关系模型的实体完整性约束和参照完整性约束。 + +主键可唯一地标识关系中的一个元组,以确保没有任何两个元组是完全一样的。如果用户没有定义主键,有些DBMS会自动创建内部主键。 + +外键指定一个关系中的属性在取值时必须与另一个关系中的某个元组相对应,不能随意取值。 + +#### 1.3.1.3 关系代数 + +关系代数是关系模型定义的一组运算符,用于检索和操作关系中的元组。每个运算符接受一个或多个关系作为输入,并输出一个新的关系。为了表示查询,可以将这些运算符连接在一起以创建更复杂的运算,称为关系代数表达式。 + +常见的关系代数运算符包括: + +- **选择(selection)**:选择运算是从关系R中选取满足给定条件的元组构成结果关系,记作σF(R)。 +- **投影(Projection)** :投影运算是从关系R中选取若干属性列A构成结果关系,记作 ΠA(R)。 +- **并( Union )** :两个关系R和S的并是由属于R或属于S的元组构成的集合,记为 R∪S。 +- **交( Intersection)** :两个关系R和S的交是由既属于R又属于S的元组构成的集合,记为 R ∩ S。 +- **差(Difference )** :两个关系R和S的差是由属于R但不属于S的元组构成的集合,记为 R-S。 +- **笛卡尔积( Cartesian Product)** :两个关系R和S的笛卡尔积是由这两个关系中元组拼接而成的所有可能的元组的集合,记为R×S。 +- **自然连接(Natural Join)** :两个关系R和S的自然连接是由这两个关系中在共同属性上取值相等的元组拼接而成的所有可能的元组的集合,记为R⋈S。 + +关系代数可以被视为一种过程化语言,因为一个关系代数表达式指定了查询的具体计算步骤。例如, ![1.3.1.3-1](images/1.3.1.3-1.png) 指定的计算步骤是先计算关系S和SC的自然连接,然后选择,而 ![1.3.1.3-2](images/1.3.1.3-2.png) 指定的计算步骤则是先选择后连接。这两个表达式其实是等价的,它们的计算结果相同,但是计算速度却不同,后者明显更快。如果像这样由用户来指定查询的计算步骤,性能优化的压力就会落在用户身上,因为他们必须考虑如何写出更高效的查询表达式。所以更好的方法是DBMS提供一种非过程化语言,用户只指定需要什么数据,而不指定如何找到它。这正是SQL的成功之处。 + +### 1.3.2 SQL + +SQL 是关系数据库的标准语言,它是1974 年由Boyce和Chamberlin提出的,最初叫 Seque(Structured English Query Language), 并在IBM公司研发的关系数据库管理系统原型System R上实现,后改名为SQL(Structured Query Language)。SQL是一种通用的、功能极强的关系数据库语言,其功能不仅仅是查询,而是包括数据库模式创建、数据库数据的插入与修改、数据库安全性完整性定义与控制等一系列功能。但是,数据查询仍然是SQL中最重要、也最具特色的功能。 + +关系模型中的关系在SQL中被映射为表或视图。其中,表是指数据实际存储在数据库中的关系,视图是指不实际存储数据,但是需要时可以由实际存储的关系构造出来的关系。 + +需要指出的是,关系模型中的关系和SQL中的表和视图在概念上存在一些差异。前者是基于集合(set)的,即关系中的元组是不允许重复的;而后者是基于包(bag)的,允许表、视图或结果集中出现重复的元组。 + +SQL的查询通过SELECT语句来表达,它的基本语法如下: + +```sql + SELECT <列名或表达式序列> + FROM <表名或视图名序列> + [WHERE <行条件表达式>] + [GROUP BY <列名序列> + [HAVING <组条件表达式>] ] + [ORDER BY <排序列名>[ASC|DESC] [,...]] +``` + +以上语法成分中,只有SELECT和FROM子句是必不可少的。此外,SQL还提供了一个强大的特性,允许在WHERE、FROM或HAVING子句中嵌入子查询。子查询也是一个SELECT语句,在上述的WHERE、FROM或HAVING子句中可以使用子查询的返回结果来进行计算,这也是SQL之所以称为"结构化"查询语言的原因。 + +对于一条典型的查询语句,其结果可以这样计算: + +1. 读取FROM子句中基本表及视图的数据,并执行笛卡尔积操作; +2. 选取其中满足WHERE子句中条件表达式的元组; +3. 按GROUP BY子句中指定列的值分组; +4. 提取满足HAVING子句中组条件表达式的那些分组; +5. 按SELECT子句投影出结果关系; +6. 按ORDER BY子句对结果关系进行排序。 + +以上计算过程可以被看作是对一系列关系代数运算的执行。实际上一个SELECT语句在DBMS中就是被解析为一个关系代数表达式,再由执行引擎来对其进行计算的。但是对于同一条SELECT语句,可能存在多个等价的关系代数表达式。例如,对于以下语句: + + +```sql +SELECT 姓名 +FROM 学生, 选课 +WHERE 学生.学号=选课.学号 AND 课号=2 ; +``` + + +存在多个等价的关系代数表达式: + +1. ​ Π姓名学生.学号=选课.学号 ∧ 课号=2 (学生×选课)) +2. ​ Π姓名课号=2 (学生⋈选课)) +3. ​ Π姓名(学生⋈σ课号=2 (选课) + +这三个表达式的计算代价差异巨大,而DBMS的一个重要任务就是通过查询优化处理找到其中代价最小的那一个。SQL采用的这种非过程化语言形式,既简化了用户的表达,又为DBMS优化查询语句的执行性能提供了巨大的灵活性。 diff --git a/docs/lectures/lecture-2.md b/docs/lectures/lecture-2.md new file mode 100644 index 0000000000000000000000000000000000000000..34c5ab93114dbc80563d1d4f20c374a8b7a722fe --- /dev/null +++ b/docs/lectures/lecture-2.md @@ -0,0 +1,237 @@ + +# 第2章 数据库的存储结构 + +## 2.1 存储设备概述 + +大多数计算机系统中都存在多种数据存储类型,根据不同存储介质的速度和成本,可以把它们按层次结构组织起来,如图2-1所示。位于顶部的存储设备是最接近CPU的,其存取速度最快,但是容量最小,价格也最昂贵。离CPU越远,存储设备的容量就越大,不过速度也越慢,每比特的价格也越便宜。 + +![图2-1 存储设备层次结构图](images/2-1.png) + +
图2-1 存储设备层次结构图
+ +按其存储数据的持久性,可将存储设备分为易失性存储和非易失性存储两类。 + +- **易失性存储:** 易失性意味着当机器掉电时存储介质中的数据会丢失。易失性存储支持随机字节寻址方式,程序可以跳转到任意字节地址并获取数据。易失性存储通常指的是内存。 +- **非易失性存储:** 非易失性是指存储设备不需要通过连续供电来保证其存储的数据不丢失。非易失性存储设备是块寻址的,这意味着为了读取该设备中特定偏移位置上的一个值,必须先将包含这个值的一个块的数据加载到内存中。非易失性存储设备虽然也支持随机存取,但通常在顺序访问时(即同时读取多个连续块时)性能表现更好。目前常见的非易失性存储有固态硬盘(SSD)和机械硬盘(HDD),在本教程中不刻意区分,统称为磁盘。 + +除了上述存储设备,目前还有一种称为持久内存(persistent memory)的新型存储设备。持久内存既有内存的高速性,又有磁盘的持久性,兼具双重优势,不过这类设备不在本教程的讨论范围内。 + +## 2.2 面向磁盘的DBMS概述 + +根据数据库的主存储介质的不同,DBMS可分为面向磁盘(disk-oriented)和面向内存(memory-oriented)两种体系结构,本教程重点介绍经典的面向磁盘的体系结构。这种体系结构的特点是,为了保证在系统发生故障时的数据持久化,数据库使用非易失的磁盘作为主存储介质,但是由于系统不能直接操作磁盘上的数据,因此还需使用易失的内存作为缓存。众所周知,相对于内存,磁盘的访问速度非常慢,因此在面向磁盘的DBMS中,需要重点考虑的一个问题就是,如何在磁盘和内存之间交换数据才能减少磁盘I/O带来的性能延迟。 + +![图2-2 面向磁盘的DBMS](images/2-2.png) + +
图2-2 面向磁盘的DBMS
+ +面向磁盘的DBMS的存储架构如图2-2所示。DBMS将数据库映射到文件中,这些文件由底层操作系统维护,永久存储在磁盘上。因为文件存取是操作系统提供的基本功能,所以我们默认文件系统总是作为DBMS的基础而存在的。主流操作系统提供的通常为无结构的流文件,DBMS会将每个文件再划分为固定大小的数据块,称为页(page)。页是DBMS在磁盘和内存间交换数据的基本单元。 + +如果需要对数据库进行读写操作,DBMS需要先将数据从磁盘读取到内存中的缓冲池内,缓冲池管理器负责在磁盘和内存之间以页为单位进行数据交换。DBMS的执行引擎在语句处理过程中需要使用某个数据页时,会向缓冲池提出请求,缓冲池管理器负责将该页读入内存,并向执行引擎提供该页在内存中的指针。当执行引擎操作那部分内存时,缓冲池管理器必须确保该页面始终驻留在那片内存区域中。 + +## 2.3 文件的组织结构 + +### 2.3.1文件的分页 + +DBMS最常见的做法是将数据库以文件的形式存储在磁盘上。有些DBMS可能使用一组文件来存储数据库,有些DBMS可能只使用单个文件。 + +从操作系统的角度来看,一个文件就是一个字节流序列,操作系统并不关心和了解文件的内容以及文件之间的关联性。数据库文件的内容只有创建它的DBMS才知道如何解读,因为它是由DBMS以其特定的方式来组织的。 + +数据库文件的组织和管理由DBMS的存储管理器负责,它将文件划分为页面的集合,并且负责跟踪记录这些页面的使用情况,包括哪些页面存储了什么数据,哪些页面是空闲的等等。页面中可以存储不同类型的数据,比如记录、索引等,但是DBMS通常不会将不同类型的数据混合存储在同一个页面中。 + +### 2.3.2 页的标识 + +每个页面都有一个唯一的标识符。如果数据库是单个文件,那么页面ID可以直接映射为文件内的偏移量;如果数据库包含多个文件,则还需加上文件标识符来进行区分。大多数DBMS都有一个间接层,能够将页面ID映射为文件路径和偏移量。系统上层模块请求一个页面时,先给出页面ID,存储管理器将该页面ID转换为文件路径和偏移量,并由此定位到对应页面。 + +### 2.3.3 页的大小 + +大多数DBMS使用固定大小的页面,因为支持可变大小的页面会带来很多麻烦。例如,对于可变大小的页面,删除一个页面可能会在数据库文件中留下一个空缺,而由于页面的大小不等,这个空缺位置很难被一个新页填满,从而导致碎片问题。 + +大多数数据库默认使用4~8KB的页大小,但是许多数据库允许用户在创建数据库实例时自定义页的大小。 + +需要注意区分以下两个关于页的概念: + +- **硬件页:** 即磁盘块,大小通常为4 KB,是磁盘I/O的基本单位。 +- **数据库页:** 大小通常为磁盘块大小的整数倍,是DBMS在磁盘和缓冲池之间交换数据的基本单位。 + +二者的区别在于,对硬件页的写操作是原子的,但是对数据库页的写操作则不一定。换言之,如果硬件页的大小为4KB,那么当系统尝试向磁盘写入一个硬件页时,这4KB数据要么全部写入,要么全部不写入,这一点是由存储设备来保证的。但是,如果数据库页大于硬件页,那么DBMS对一个数据库页的写操作将被操作系统分解为对多个硬件页的写操作,此时DBMS必须采取额外措施来确保数据被安全地写入磁盘,因为系统可能会在将一个数据库页写入到磁盘的过程中发生崩溃,从而导致该数据库页的内容出现不一致性错误。 + +### 2.3.4 堆文件 + +关系是记录的集合,这些记录在数据库文件中可以有多种组织方式: + +- **堆文件组织( heap file organization)** :堆文件是页的无序集合,记录在页中以随机的顺序存储。即,一条记录可以放在文件中的任何地方,只要那里有足够的空间存放这条记录,记录间不用考虑先后顺序的。 通常每个关系使用一个单独的堆文件。 +- 顺序文件组织(sequential file organization):记录根据其"查找键"的值顺序存储。 +- **散列文件组织( hash file organization)** :在每条记录的某个/些属性上计算一个散列函数,根据散列的结果来确定将记录放到文件的哪个页面中。 + +在本节种,我们重点介绍堆文件的组织方式。由于这种组织方式并不关心记录间的顺序,因此DBMS只需要登记堆文件中哪些页面中是存储了数据的(数据页),哪些页面是空闲的(空闲页)。具体可以采用以下两种表示形式: + +- 链表:以链表的形式将文件中的空闲页和数据页分别勾连起来,并在文件的首页维护两个指针,分别指向空闲页链表和数据页链表的第一个页面,如图2-3所示。这种方式下,如果想要找到一个特定的数据页,需要从链首开始逐个扫描链表中的页面,直到找到为止,I/O开销较大。 +- 页目录:维护一种特殊的页面(目录页),在该页中记录每个数据页的位置以及该数据页中剩余的空闲空间大小,如图2-4所示。页目录将页面的状态信息集中存放在一起,可以提高查找特定页面的速度。 + +![图2-3 链表表示法](images/2-3.png) + +
图2-3 链表表示法
+ +![图2-4 页目录表示法](images/2-4.png) + +
图2-4 页目录表示法
+ +## 2.4 页的组织结构 + +一个页面的内部结构可以粗略的划分为两部分: + +- **页头** :页头登记了关于页面内容的元数据,如页面大小、校验和、DBMS版本、事务可见性、压缩信息等。有些系统(如Oracle)要求页面是自包含的,即关于该页的所有描述信息都可以在该页面中找到。 +- **数据区** :存放数据的区域。这里我们只讨论如何在数据区中存放记录。目前DBMS中最常用的方法是采用槽式页面。这种方法将数据区划分为一个个插槽(slot),每个插槽中放置一条记录。 + +注意,本节的讨论基于以下限制条件:(1)不存在整个数据区放不下单条记录的情况;(2)一条记录必须包含在单个页面中,换言之,没有哪条记录是一部分包含在一个页面中、一部分包含在另一个页面中的(第5节讨论的溢出页除外),这个限制可以简化并加速数据访问。 + +### 2.4.1 槽式页面 + +在槽式页面结构中,为了登记当前页面中有多少条记录以及每条记录的位置,必须在页头中维护以下信息: + +1. 本页中已使用的槽的数量; +2. 最后一个已使用的槽的起始位置; +3. 一个槽数组,登记本页中每个记录的起始位置。 + +如果允许记录是变长的,我们一开始并不能确定一个页面中能存放多少条记录,因此也就无法确定槽数组的最大长度,也就是说页头所占的区域大小是不确定的。因此比较合理的做法是,向页中插入记录时,槽数组从前向后增长,而被插入的记录数据则是从页尾向前增长。当槽数组和记录数据相遇时,则认为该页面是满页。槽式页面的布局示意图如图2-5所示。 + +![图2-5 槽式页面的布局](images/2-5.png) + +
图2-5 槽式页面的布局
+ +### 2.4.2 插入记录 + +向关系中插入一条记录时,对于堆文件,只需要找到一个有足够空闲空间能放得下这条记录的页面,或当所有已分配页面中都没有足够空闲空间时,就申请一个新的空闲页,然后将记录放置在那里。 + +### 2.4.3 删除记录 + +从页中删除记录时,需要考虑如何回收该记录的空间。 + +一种方法是在页内滑动记录,使得记录间没有空隙,从而保证页面中未使用的区域一定位于槽数组和已使用区域之间,图2-5表示的就是这种方式。 + +如果不滑动记录,则需要在页头维护一个空闲区列表,以保证当向页中插入一条新记录时,我们能知道该页中的空闲区在哪里,有多大。当然,页头通常不必存储全部空闲区列表,只存列表的链头就够了,然后可以使用空闲区自身的空间存储下一个空闲区的信息。 + +### 2.4.4 修改记录 + +如果修改的是定长记录,对页面存储没有影响,因为修改后记录占用的空间与修改前完全相同。但是如果修改的是变长记录,就会碰到与插入和删除类似的问题。 + +如果修改后的记录比其旧版本长,则我们需要在当前页面中获得更多的空间,这个过程可能涉及记录的滑动。如果当前页面中的空闲区域不够,还需要将记录移动到其他页面。反之,如果记录由于修改而变短,我们可以像删除记录时那样回收其释放的空间。 + +## 2.5 记录的组织结构 + +记录本质上就是一个字节序列,如何将这些字节解释为属性类型和值是DBMS的工作。与页面结构类似,记录内部结构也可以分为两部分: + +- **记录头** :存放关于记录的元数据,例如DBMS并发控制协议的可见性信息(即哪个事务创建/修改了此记录的信息)、NULL值的位映射等。注意,关于数据库模式的元数据没有必要存储在记录头里。 +- **记录数据** :包含记录中各个属性的实际数值。如前所述,大多数DBMS不允许记录的长度超过页面的大小,且一个页面中一般只存放同一个关系的记录。 + +### 2.5.1 定长记录 + +定长记录全部由定长字段组成,是最简单的记录组织形式。定长记录的插入和删除是比较容易实现的,因为被删除的记录留出的可用空间恰好是插入新的记录所需要的空间。 + +定长记录在组织时需要注意的一个问题是内存对齐问题。很多处理器需要在数据的开始地址为4或8的倍数时才能实现更高效的内存读写,所以DBMS在组织记录数据时通常会根据情况使所有字段的起始地址是4或8的倍数。采用这种做法时,一个字段前可能会存在一些没有被上一个字段使用的空间,这些空间其实是被浪费掉了。但尽管如此,这样做还是有必要的。因为记录虽然是存放在磁盘而不是内存中,但是对记录的操作仍需在内存中进行,所以在组织记录时需要考虑如何让它在内存能够被高效访问。 + +### 2.5.2 变长记录 + +变长记录允许记录中存在一个或多个变长字段。由于变长字段在记录中的偏移位置是不确定的,因此记录中必须包含足够多的信息,让我们能够方便地提取记录的任何字段。变长记录的实现可以采用以下两种方法。 + +一种简单有效的实现方法,是将所有定长字段放在变长字段之前,然后在记录头写入以下信息:(1)记录长度;(2)除第一个变长字段之外的所有变长字段的偏移位置。之所以不需要存第一个变长字段的偏移位置,是因为我们知道第一个变长字段就紧跟在定长字段之后。一个变长记录的例子如图2-6所示,该记录共包含四个字段,其中有两个变长字段:name和address。 + +![图2-6 变长记录表示方法一示例](images/2-6.png) + +
图2-6 变长记录表示方法一示例
+ +变长记录的另一种表示方法是保持记录定长,将变长部分放在另一个溢出页中,而在记录本身存储指向每一个变长字段开始位置的指针,如图2-7所示。 + +![图2-7 用溢出页存放变长字段](images/2-7.png) + +
图2-7 用溢出页存放变长字段
+ +这种方法的好处是可以保持记录定长,能够更有效地对记录进行搜索,记录也很容易在页内或页间移动。但是另一方面,将变长部分存储在另一个页中,增加了为检索一条记录的全部数据而需要进行的磁盘I/O次数。 + +溢出页不仅可以存储变长字段,还可以用于存储大值数据类型的字段,比如TEXT和BLOB字段,这些数据往往需要使用多个页面来存储。 + +## 2.6 缓冲池管理 + +面向磁盘的DBMS的一个主要目标就是尽量减少磁盘和内存之间传输的页面数量。减少磁盘访问次数的一种方法是在内存中保留尽可能多的页面,理想情况下,要访问的页面正好都已经在内存中了,这样就不再需要访问磁盘了。 + +但是在内存中保留所有的页面是不可能的,所以就需要有效地管理内存中用于缓存页面的空间,尽可能提高页面在内存中的命中率。用于缓存页面的那部分内存空间称为缓冲池,负责缓冲池空间分配的子系统称为缓冲池管理器。 + +### 2.6.1 缓冲池结构 + +缓冲池本质上是在DBMS内部分配的一大片内存区域,用于存储从磁盘获取的页面。这片内存空间被组织为一个数组,其中每个数组项被称为一个帧(frame),一个帧正好能放置一个页面。当一个页面被请求时,DBMS首先搜索缓冲池,如果在缓冲池中没有找到该页,就从磁盘获取该页的副本,并放置到缓冲池的一个帧中。缓冲池的组织结构如图2-8所示。 + +![图2-8 缓冲池组织结构](images/2-8.png) + +
图2-8 缓冲池组织结构
+ +为了有效和正确地使用缓冲池,缓冲池管理器必须维护一些元数据。 + +页表是一个内存哈希表,用于登记当前已经在内存中的页面的信息。页表将页面ID映射到缓冲池中一个帧的位置。因为缓冲池中页面的顺序不一定反映磁盘上的顺序,所以需要通过这个额外的数据结构来定位页面在缓冲池中的位置。 + +除了保存页面的内存地址,页表还为每个页面维护一个脏标志和一个引用计数器。 + +- 脏标志:脏标志由线程在修改页面时设置。如果一个页面被设置了脏标志,就意味着缓冲池管理器必须将该页写回磁盘,以保证磁盘上的页面副本包含最新的数据。 +- 引用计数:引用计数表示当前访问该页(读取或修改该页)的线程数。线程在访问该页之前必须增加引用计数。如果页的引用计数大于零,说明该页面正在被使用,此时不允许缓冲池管理器从内存中淘汰该页。 + +关于缓冲池中的内存空间如何分配的问题,缓冲池管理器可采取两种策略: + +- 全局策略:有利于当前整体工作负载的策略。全局策略综合考虑所有活动事务,以找到分配内存的最佳方案。 +- 本地策略:以保证单个查询或事务运行得更快为目标的策略。本地策略将一个帧分配给特定事务时,不考虑其他并发事务的行为,即使这样可能对整体工作负载不利。 + +### 2.6.2 缓冲池替换算法 + +与其他应用程序一样,DBMS对数据库文件的读写操作都需要通过调用操作系统的接口来实现。通常,为了优化I/O性能,操作系统自身也维护了一个缓冲区来缓存从磁盘读入的数据块。这个缓冲区和DBMS的缓冲池在功能上显然是重复的,会导致同一个数据库页面的数据在内存中的冗余存储,而且操作系统缓冲区的管理策略还使得DBMS难以控制内存与磁盘之间的页面交互。因此,大多数DBMS都使用直接I/O绕过操作系统的缓存。 + +当DBMS需要释放一个帧来为新的页面腾出空间时,它必须决定从缓冲池中淘汰哪个页面,这取决于DBMS采用的缓冲池替换算法。替换算法的目标是提高正确性、准确性、速度和元数据开销。需要注意的是,引用计数大于零的页面是不能淘汰的。 + +常用的替换算法有最近最少使用(LRU)算法和时钟(CLOCK)算法。 + +- LRU算法:LRU算法为每个页面维护其最后一次被访问的时间戳,这些时间戳可以存储在一个单独的数据结构(如队列)中,以便对其进行排序来提高效率。需要淘汰页面时,DBMS总是选择淘汰时间戳最早的页面。 +- CLOCK算法:CLOCK算法是一种近似LRU算法,它不需要每个页面都有单独的时间戳,而是为每个页面维护一个引用位。当某个页面被访问时,就将它的引用位的值置为1。想象页面被组织在循环缓冲区中,需要选择淘汰页面时,有一个"时钟指针"在循环缓冲区中扫描,检查页面的引用位是否为1。如果是,则将引用位重新置0并移动指针到下一个页面;否则,淘汰当前页面。 + +LRU算法和CLOCK算法应用于DBMS的缓冲池管理时存在许多问题。比如顺序扫描时,LRU和CLOCK容易使缓冲池的内容出现顺序溢出问题。因为顺序扫描会依次读取每个页面,所以读取页面的时间戳并不能反映我们实际想要哪些页面。换句话说,最近使用的页面实际上是最不需要的页面。 + +有三种解决方案可以解决LRU和CLOCK算法的缺点。 + +第一种解决方案是LRU-K,它会以时间戳的形式登记最后K次引用的历史,并计算连续引用之间的时间间隔,将此历史记录用于预测页面下一次被访问的时间。 + +第二种解决方案是对每个查询进行局部化,DBMS在每个查询的局部范围内选择要淘汰的页面,这样可以最小化每个查询对缓冲池的污染。 + +最后一种解决方案是优先级提示,它允许事务在查询执行期间根据每个页面的上下文,告诉缓冲池管理器该页面是否重要。 + +在淘汰页面时,对于脏页可以有两种处理方法:(1)总是优先淘汰缓冲池中的非脏页面;(2)先将脏页写回磁盘以确保其更改被持久化,然后再将其淘汰。后者会降低替换页面的速度;而前者虽然速度快,但是有可能将未来不会被再次访问的脏页留在缓冲池。 + +避免在淘汰页面时执行页面写出操作的一种方法是后台写。采用这种方法的DBMS会定期遍历页表并将脏页写入磁盘。当脏页被安全写入磁盘后,将该页面的脏标志重新置零。 + +### 2.6.3 缓冲池的优化 + +有许多方法来优化缓冲池,使其适合应用程序的工作负载。 + +(1)多缓冲池 + +DBMS可以维护多个用于不同目的的缓冲池,比如每个数据库使用一个缓冲池,每种页面类型使用一个缓冲池。然后针对其中存储的数据的特点,每个缓冲池可以采用量身定制的管理策略。 + +将所需页面映射到缓冲池有两种方法:对象ID和散列。对象ID这种方法需要扩展元数据,使其包含关于每个缓冲池正在管理哪些数据库对象的信息,然后通过对象ID,就可以实现从对象到特定缓冲池的映射。另一种方法是散列,DBMS散列页面ID以选择访问哪个缓冲池。 + +(2)预取 + +DBMS还可以根据查询计划通过预取页面来进行优化。然后,在处理第一组页面时,系统可以将第二组页面预取到缓冲池中。这种方法通常在顺序访问多个页面时使用。 + +(3)扫描共享 + +查询游标可以重用从磁盘读入的数据或操作符的计算结果。这种方法允许将多个查询附加到扫描表的单个游标上。当一个查询开始扫描时,如果已经有另一个查询在扫描,DBMS会将第一个查询附加到第二个查询的游标上。DBMS登记第二个查询加入时的位置,以便在到达数据结构末尾时结束扫描。 + +(4)缓冲池旁路 + +为了避免开销,顺序扫描操作符不会将获取的页存储在缓冲池中,而是使用正在运行的查询的本地内存。如果操作符需要读取磁盘上连续的大量页序列,那么这种方法可以很好地工作。缓冲池旁路也可以用于临时数据,如排序、连接。 + +### 2.6.4 其他内存池 + +除了元组和索引,DBMS还需要内存来存放其他东西。这些内存池中的内容可能并不总是来自磁盘或者需要写入磁盘,具体取决于实现。 + +- 排序+连接缓冲区 +- 查询缓存 +- 维护缓冲区 +- 日志缓冲区 +- 字典缓存 + diff --git a/docs/lectures/lecture-3.md b/docs/lectures/lecture-3.md new file mode 100644 index 0000000000000000000000000000000000000000..944944dc476a8854aff195e9f2132a313eee12d2 --- /dev/null +++ b/docs/lectures/lecture-3.md @@ -0,0 +1,281 @@ + +# 第3章 索引结构 + +## 3.1 索引结构概述 + +许多查询只涉及表中的少量记录。例如"查找学号为'U2021001'的学生的专业",这个查询最多只涉及学生表中的一条记录。如果系统为了找到学号为"U2021001"的记录而读取整个学生表,这样的操作方式显然是低效的。理想情况下,系统应该能够直接定位到这条记录。为了支持这种访问方式,需要额外设计一些与表相关联的附加结构,我们称之为索引。 + +索引是这样的数据结构:它以一个或多个属性的值为输入,并能快速地定位具有该值的记录的位置。建立索引的属性(组)称为查找键(search key)。与表一样,索引结构同样存储在数据库文件中。例如,我们可以用一个数据文件来存储一个表,用一个索引文件来存储一个索引。一个数据文件可能拥有一个或多个索引文件。 + +由于索引是表的附加结构,当表的内容发生变化时,DBMS必须同步更新该表的索引,以确保索引的内容与表的内容一致。由此可见,索引虽然有助于提高查询性能,但是索引本身也会带来存储和维护开销,因此在一个数据库应用中,具体创建什么索引、以及创建多少索引,用户是需要权衡的。不过在查询的执行过程中,是否需要使用索引、以及使用哪些索引,则是由DBMS来决定的,用户并不能干涉。如何恰当地利用索引来提高查询的执行效率,是DBMS的重要工作。 + +数据库系统中存在不同类型的索引结构,这些索引结构之间没有绝对的优劣之分,只能说某种索引结构在某种特定的场景下是最合适的。评价一种索引结构一般参考以下指标: + +- 查找类型:该索引结构能有效支持的查找类型,比如等值查找、范围查找等。 +- 查找时间:使用该索引结构找到一个特定索引项(集)所需的时间。 +- 插入时间:插入一个新的索引项所需的时间,包括找到插入这个新索引项的正确位置,以及更新索引结构所需的时间。 +- 删除时间:删除一个索引项所需的时间,包括找到待删除项所需的时间, 以及更新索引结构所需的时间。 +- 空间开销:索引结构所占用的存储空间。 + +在本教程中,我们将介绍数据库系统中最常用的索引结构: B+树和散列表。 + +## 3.2 B+树 + +### 3.2.1 B+树的结构 + +B+树是一种平衡排序树,树中根结点到叶结点的每条路径的长度相同,并且保持键的有序排列。在B+树中进行搜索、顺序访问、插入和删除的时间复杂度均为O(log(n)),它是在数据插入和删除的情况下仍能保持其执行效率的几种使用最广泛的索引结构之一,几乎所有现代DBMS都使用B+树。 + +B+树可以定义为具有以下性质的m路搜索树: + +- 除非整棵树只有一个结点,否则根结点至少有两个子结点; +- 除根结点外的所有内结点至少是半满的,即有⌈m/2⌉到m个子结点; +- 所有叶结点的深度相等; +- 叶结点中键的数量必须大于等于 ⌈(m-1)/2⌉ 且小于等于 m-1 ; +- 每个有k个键的内结点都有k+1个非空子结点; +- 叶结点中包含所有查找键值。 + +![图3-1 B+树示意图](images/3-1.png) + +
图3-1 B+树示意图
+ +B+树的示意图如图3-1所示。树中的每个结点中都包含一个键/值对数组,这个数组是按键排序的。键/值对中的键来自索引的查找键,值则根据结点类型而有不同含义。如果结点是内结点,则值是指向子结点的指针。如果结点是叶结点,则结点中的值可能是记录ID,比如对于数据库中的非聚集索引,B+树中存放的就是指向记录位置的指针;叶结点中的值也可能是记录数据,比如对于聚集索引, B+树中存放的就是记录的实际数据。 + +在树的最底层,叶结点间通过兄弟指针链接起来,形成一个按所有键值大小排序的链表,以便更高效地支持范围查找等顺序处理。 + +图3-1中的B+树,其m的取值为4。在具体实现中,将B+树索引存储到磁盘文件中时,通常用一个页面来存储一个结点,在页面能够容纳的前提下,应该把m的值取得尽可能大,从而使得树的高度尽可能小。 + +### 3.2.2 B+树的查找 + +1. 等值查找 + +假设有一棵B+树,如果想找出键值为K的记录,则需要执行从根结点到叶结点的递归查找,查找过程为: + +1. 若当前结点为内结点,且结点中的键为**K1,K2,…,Kn**,则根据以下规则来决定下一步对此结点的哪一个子结点进行查找: + + 1. 如果**K1**,则下一个结点为第1个子结点; + 2. 如果**Ki≤Ki+1**,则下一个结点为第i+1个子结点; + 3. 如果**K≥Kn**,则下一个结点为第n+1个子结点。 + +递归执行此查找过程,直到查找到叶结点; + +1. 若当前结点为叶结点,在该结点的键值中查找,若第i个键值为K,则根据第i个值即可找到所需记录;否则查找失败。 + +1. 范围查找 + +如果想在B+树中找出在范围[a, b]之间的所有键值,先通过等值查找来查找键a,不论键a在B+树中是否存在,都会到达可能出现a的叶结点,然后在该叶结点中查找等于或大于a的那些键。只要在当前叶结点中不存在比b大的键,就根据兄弟指针找到下一个叶结点,继续查找[a, b]之间的所有键值。 + +上面的查找算法在查找范围只有上界或者只有下界时也有效: + +1. 当查找范围为[a,+∞)时,先找到键a可能出现的叶结点,然后从该结点中第一个等于或大于a的键开始,一直到最后一个叶结点的最后一个键。 +2. 当查找范围为(‐∞, b]时,则从B+树的第一个叶结点开始向后查找,直到遇到第一个超过b的键时停止查找。 + +### 3.2.3 B+树的插入 + +要向B+树中插入一个新索引项,必须遍历该树并使用内部结点来确定将键插入到哪个叶结点。在插入过程中,当结点太满时需要对其进行拆分,过程如下: + +1. 找到正确的叶结点L; +2. 将新索引项按顺序插入到L中: + + 1. 如果L有足够的空间,则执行插入操作,算法结束; + 2. 否则,将L平均拆分为L和L2两个结点,并复制L2的第一个键,将其插入到L的父结点中。 + +1. 如果父结点中有足够的空间,则执行插入操作,算法结束;否则拆分父结点,将该结点的中间键上移插入到其父结点,然后将剩余的索引项平均拆分为两个结点。递归执行此步骤直到算法结束。 + +![图3-2 B+树的插入过程示意图-a](images/3-2-a.png) + +
(a) 插入10后
+ +![图3-2 B+树的插入过程示意图-b](images/3-2-b.png) + +
(b) 插入10后
+ +![图3-2 B+树的插入过程示意图-c](images/3-2-c.png) + +
(c) 插入2后
+ +
图3-2 B+树的插入过程示意图
+ +图3-2是向一棵4路B+树分别插入键值10和2的过程。可以看到,插入键值10后,原B+树中最右的叶结点发生了分裂,新增叶结点的第一个键值10被复制并插入到父结点中。插入键值2后,最左的叶结点发生了分裂,新增叶结点的第一个键值3被复制并插入到父结点中,而且还进一步导致了父结点的分裂,其中间键值7被上移并插入到新增的根结点中。 + +### 3.2.4 B+树的删除 + +在删除过程中,如果因删除索引项导致结点小于半满状态,则必须合并结点。过程如下: + +1. 找到待删除的索引项所在的叶结点L; +2. 从L中删除该索引项,删除后: + +1. 如果L不低于半满状态,则算法结束; +2. 否则,通过向兄弟结点借索引项来满足约束条件,如果能成功借到,则算法结束; +3. 如果兄弟结点也没有多余的索引项可借,则合并L和兄弟结点,删除父结点中指向被合并子结点的索引项。递归执行以上删除操作,直至算法结束。 + +![图3-3 B+树的删除过程示意图-a](images/3-3-a.png) + +
(a) 删除前
+ +![图3-3 B+树的删除过程示意图-b](images/3-3-b.png) + +
(b) 删除6后
+ +![图3-3 B+树的删除过程示意图-c](images/3-3-c.png) + +
(c) 删除1后
+ +
图3-3 B+树的删除过程示意图
+ +图3-3是从一棵5路B+树中先后删除键值6和1的过程。可以看到,删除键值6时,原B+树中第二个叶结点中的项数已经无法满足最低要求,因此向左边的兄弟结点借了1项来达到约束条件。删除键值1时,最左的叶结点中项数无法满足最低要求,而且兄弟结点也没有多余的项可借,因此只能对最左的两个结点进行合并。 + +### 3.2.5 非唯一查找键 + +基于某个查找键来构建索引时,假如表中存在两条或者多条记录在查找键属性上拥有相同的值,那么该查找键称为非唯一查找键。 + +非唯一查找键的一个问题在于影响记录删除的效率。假设某个查找键值出现了很多次,当表中拥有该查找键值的某条记录被删除时,为了维护索引与表数据的一致性,删除操作需要在B+树中查看很多个索引项,才能从中找出和被删除记录相对应的那个索引项并删除它,这个过程可能需要遍历多个叶结点。 + +解决以上问题的方法有两种: + +一种简单的解决方法是创建包含原始查找键和其他额外属性的复合查找键,确保该复合查找键对于所有记录是唯一的,这种方法通常被大多数数据库系统使用。这个额外属性也叫唯一化属性,它可以是记录ID,或者是在拥有相同查找键值的所有记录中取值唯一的任何其他属性。删除一条记录时,先计算该记录的复合查找键值,然后再用这个复合键值到索引中查找。因为复合查找键值是唯一的,所以不会影响记录删除的效率。在这种方法中,一个查找键值在记录中出现多少次,它在索引中就会被重复存储多少次。 + +另一种方法是,每个查找键值在B+树中只存储一次,并且为该查找键值维护一个记录指针的桶(或者列表)来解决非唯一问题。这种方法虽然没有存储冗余信息,但是索引维护和修改起来更加复杂。 + +## 3.3 散列表 + +散列表也叫哈希表,是一种常见的数据结构,它通过把键值映射到桶数组中的某个位置来加快查找记录的速度。散列表中包含两个关键元素: + +- **散列函数** :散列函数h以查找键(散列键)为参数并计算出一个介于0到B-1之间的整数。 +- **桶数组** :桶数组是一个编号从0到B-1、长度为B的数组,其中包含B个链表头,每个链表头对应一个桶,用于存储记录。 + +构造散列表时,如果一条记录的查找键为K,则将该记录链接到桶号为h(K)的桶中存储。 + +散列表在DBMS中被广泛运用,例如基于散列表来组织数据文件、基于散列表来构造索引文件、或者基于散列表进行连接运算等。当散列表的规模大到内存难以容纳时,或者出于数据持久化的目的,就需要将散列表存储在磁盘中。本教程主要讨论散列表在磁盘上的实现。 + +磁盘中的散列表与内存中的散列表存在一些区别。首先,桶数组是由页面组成,而不是由指向链表的指针组成;其次,散列到某个桶中的记录是存储在磁盘上的页面而非内存中。因此,磁盘上的散列表在设计时需要考虑访问磁盘的I/O代价以及表规模的扩展问题。 + +### 3.3.1 静态散列表 + +对于一个散列表,如果其桶数组的规模B(即桶的数量)一旦确定下来就不再允许改变,则称其为静态散列表。 + +#### 3.3.1.1散列函数 + +由于在设计时无法事先准确知道文件中将存储哪些搜索键值,因此我们希望选择一个具有下列特性的散列函数: + +- 函数的输出是确定的。相同的搜索键值应该总是生成相同的散列值。 +- 输出值的分布是随机且均匀的。散列函数应该表现为随机的,即散列值不应与搜索键的任何外部可见的排序相关,且不管搜索键值实际怎样分布,每个桶应分配到的记录数应该几乎相同。 +- 易于计算。散列函数的执行时间不能太长,因为它需要执行很多次。 + +理想的散列函数是能将搜索键值均匀地分布到所有桶中,使每个桶含有相同数目的记录,但是这样的函数往往需要非常长的时间来进行计算。因此,散列函数需要在冲突率和快速执行之间进行权衡。目前最先进的散列函数是Facebook XXHash3。 + +#### 3.3.1.2散列表的插入 + +当一个查找键为K的新记录需要被插入时,先计算h(K),找到桶号为h(K)的桶。如果桶内还有空间,我们就把该记录存放到此桶对应的页面中。如果该桶的页面中已经没有空间了,就增加一个新的溢出页,链接到该桶之后,并把新记录存入该页面。这种处理桶溢出问题的方式称为溢出链,如图3-4所示。 + +![图3-4 散列表的溢出链](images/3-4.png) + +
图3-4 散列表的溢出链
+ +#### 3.3.1.3散列表的删除 + +删除查找键值为K的记录与插入操作的方式类似。先找到桶号为h(K)的桶,由于不同的查找键值可能被映射到同一个桶中,因此还需要在桶内搜索,查找键值为K的记录,继而将找到的记录删除。删除记录后,如果允许记录在页面中移动,还可以选择合并同一桶链上的页面来减少链的长度。但是合并页面也有一定的风险,如果交替地往一个桶中插入和删除记录,可能导致页面被反复地创建和删除。 + +#### 3.3.1.4散列表的效率 + +如果希望达到最好的查找效率,理想情况是散列表中有足够的桶,每个桶只由单个页面组成。如果是这样,那么查询一条记录就只需一次磁盘I/O,且记录的插入和删除也只需两次磁盘I/O。 + +为了减少桶溢出的可能性,桶的数量B可选为 (_n_/_f_)\*(1+_d_),其中n是要存储的记录总数,f是一个桶中能存放的记录数,d表示避让因子,一般取值为0.2。这种做法会导致一定的浪费,平均每个桶有20%的空间是空的,好处则是减少了溢出的可能性。 + +但是,如果记录不断增长,而桶的数量固定不变,那么最终还是会出现很多桶都包含多个页面的情况。这种情况下,我们就需要在由多个页面构成的桶链中查找记录,每访问一个新的页面就增加一次磁盘I/O,这显然会严重影响散列表的查找效率。 + +### 3.3.2 动态散列表 + +静态散列表由于其桶的数量不能改变,因此当无法预知记录总数时,难以解决由于记录数不断增长而带来的性能问题。本节我们将讨论两种动态散列表,它们能够以不同的方式动态调整散列表的大小,既不需要重新构建整个表,又能保证每个桶大多只有一个页面,从而最大化读写效率。 + +#### 3.3.2.1 可扩展散列表 + +与静态散列表相比,可扩展散列表在结构上做了以下改变: + +- 增加了一个间接层,用一个指向页面的指针数组(桶地址表)而非页面数组来表示桶数组。 +- 指针数组能动态增长,且数组长度总是2的幂,因此数组每增长一次,桶的数量就翻倍。 +- 并非每个桶都单独拥有一个页面。如果多个桶的记录只需一个页面就能放下,那么这些桶可能共享一个页面,即多个桶指针指向同一个页面。 +- 散列函数h为每个键计算出一个长度为N的二进制序列,N的值足够大(比如32),但是在某一时刻,这个序列中只有前i位(i≤N)被使用,此时桶的数量为 2i个。 + +可扩展散列表的一般形式如图3-5所示。 + +![图3-5 可扩展散列表结构示意图](images/3-5.png) + +
图3-5 可扩展散列表结构示意图
+ +向可扩展散列表中插入键值为K的记录的方法如下: + +1. 计算h(K),取出该二进制序列的前i位,并找到桶数组中编号与之相等的项,定位到该项对应的页面,假设该页面的编号为j; + +2. 如果页面j中还有剩余空间,则将该记录插入该页面,操作结束; + +3. 如果页面j已满,则需要分裂该页面: + + a) 如果i=ij,说明在桶地址表中只有一个表项指向页面j,此时分裂该页,需要增加桶地址表的 大小,以容纳由于分裂而产生的两个桶指针。令i=i+1,使桶地址表的大小翻倍。桶地址表扩 展后,原表中的每个表项都被两个表项替代,且这两个表项都包含和原始表项一样的指针, 所以也应该有两个表项指向页面j。此时,分配一个新的页面n,并让第二个表项指向页面n。 将ij和in的值均置为当前的i值,并将原页面j中的各条记录重新散列,根据前i位来确定该记录 是放在页面j中还是页面n中,然后再次尝试插入新记录。极端情况下,新纪录要插入的页面 可能仍然是满的,说明原页面j中的所有记录在分裂后仍然被散列到了同一个页面中,此时需 要继续上述分裂过程,直至为新纪录找到可存放的空间。 + + b) 如果i> ij,说明在桶地址表中有多个表项指向页面j,此时不需要扩大桶地址表就能分裂页面 j。分配一个新的页面n,将ij和in置为原ij加1后的值;调整桶地址表中原来指向页面j的表项, 其中一半仍指向页面j,另一半则指向新创建的页面n;重新散列页面j中的各条记录,将其分 配到页面j或页面n中,并再次尝试插入新记录。与上一种情况一样,插入仍有可能失败,此 时需继续进行页面分裂的处理。 + +以下是一个可扩展散列表的例子。图3-6(a)所示为一个小型的可扩展散列表,假设其散列函数h能产生4位二进制序列,即N=4。散列表只使用了1位,即i=1。此时桶数组只有2项,一个编号为0,一个编号为1,分别指向两个页面。第一页存放所有散列值以0开头的记录,第二页存放所有散列值以1开头的记录。每个页面上都标注了一个数字,表示由散列函数得到的二进制序列中的前几位用于判定记录在该页面中的成员资格。目前两个页面都只用了1位。 + +接下来向表中插人一个散列值为1010序列的记录。因为第一位是1,所以该记录应放入第二个页面,但第二页已经满了,因此需要分裂该页。而此时i2=i=l,因此先要将桶数组翻倍,令i=2,将数组的长度扩展为4。 + +扩展桶数组后,以0开头的两个项都指向存放散列值以0开头的记录的第一页,且该页上标注数字仍然为1, 说明该页中记录的成员资格只由其散列值的第一位判定。而原本存放散列值以1开头的记录的页面则需要分裂,把这个页面中以10开头和11开头的记录分别存放到两个页面中。在这两个页面上方标注的数字是2,表示该页面中记录的成员资格需要使用散列值的前两位来判定。改变后的散列表如图3-6(b)所示。 + + + +![图3-6 可扩展散列表举例-a](images/3-6-a.png) + +
(a) 插入前
+ +![图3-6 可扩展散列表举例-b](images/3-6-b.png) + + + +
(b) 插入散列值为1010的记录后
+ +
图3-6 可扩展散列表举例
+ +可扩展散列表的优点在于每个桶只有一个页面,所以如果桶地址表小到可以驻留在内存的话,查找一个记录最多只需要一次磁盘I/O。但是由于它是以桶数组翻倍的形式扩展的,所以也存在以下缺点: + +- 随着i的增大,每次桶数组翻倍时需要做的工作将越来越多,而且这些工作还会阻塞对散列表的并发访问,影响插入和并发操作的效率。 +- 随着i的增大,桶地址表会越来越大,可能无法全部驻留在内存,或者会挤占其他数据在内存中的空间,导致系统中的磁盘I/O操作增多。 + +#### 3.3.2.2 线性散列表 + +针对可扩展散列表存在的问题,下面介绍另一种动态散列表,称为线性散列表。相对于可扩展散列表,线性散列表中桶的增长较为缓慢,它有以下特点: + +- 桶数n的大小,要能使所有桶中的实际记录总数与其能容纳的记录总数之间的比值保持在一个指定的阈值之下(如80%),如果超过该阈值,则增加一个新桶。 +- 允许桶有溢出页,但是所有桶的平均溢出页数远小于1。 +- 若当前的桶数为n,则桶数组项编号的二进制位数i=⌈ log2n⌉。 + +令一个线性散列表当前桶数为n,桶数组项编号的二进制位数为i,向线性散列表中插入键值为K的记录的方法如下: + +1. 计算h(K),取出该二进制序列右端的i位,假设为a1a2…ai,令a1a2…ai对应的二进制整数为m。如果mi,说明编号为m的桶还不存在,则将记录存入编号为(m-2i-1)的桶中,即将a1a2…ai中的a1改为0时对应的桶。 +2. 如果要插入的桶中没有空间,则创建一个溢出页,将其链到该桶上,并将记录就存入该溢出块中。 +3. 插入记录后,计算 (当前实际记录总数r) / (n个桶能容纳的记录总数) 的值,并跟阈值相比,若超过阈值,则增加一个新桶到线性散列表中。注意,新增加的桶和之前发生插入的桶之间没有任何联系。如果新桶编号的二进制表示为la2a3…ai,则分裂桶号为0a2a3…ai的桶中的记录,根据这些记录的散列值的后i-1位分别散列到这两个桶中。 + +当n的值超过2i时,需要将i的值加1。理论上,对于现有的桶编号,要在它们的位序列前面增加一个0,来保证跟新的桶编号的位数一致,但是由于桶编号被解释成二进制整数,因此实际上它们只需要保持原样即可。 + +以下是一个线性散列表的例子。 + +图3-7(a)所示为一个桶数n=2 的线性散列表,桶编号所需要的二进制位数i = ⌈ log22⌉ = 1,表中的记录数r=3。图中两个桶的编号分别为0和1,每个桶包含一个页面,每个页面能存放两个记录。假设散列函数产生4位二进制序列,用记录散列值的末位来确定该记录所属的桶,所有散列值以0结尾的记录放入第一个桶,以1结尾的记录放入第二个桶。 + +在确定桶数n时,本例使用的阈值是85%,即桶的平均充满率不超过总容量的85%。 + +下面先插入散列值为0101的记录。因为0101以1结尾,所以记录应放入第二个桶。插入该记录后,两个桶中存放了四个记录,平均充满率为100%,超过了85%,因此需要增加一个新桶,即桶数n=3。i = ⌈log23⌉ = 2,即桶编号需要2位。新增的桶的编号为10。接着,分裂桶00(即原来的桶0),将散列值为0000 (末两位为00)的记录保留在桶00中,散列值为1010(末两位为10)的记录存入桶10中,改变后的散列表如图3-7(b)所示。 + +接下来再插入散列值为0001的记录。因为0001的末两位为01,所以应将该记录存入桶01中。不巧的是,该桶的页面已经装满,所以需要增加一个溢出页来提供存储空间。插入后,3个桶中有5条记录,平均充满率约83%,未超过85%,所以不需要创建新桶。改变后的散列表如图3-7(c)所示。 + +![图3-7 线性散列表举例-a](images/3-7-a.png) + +
(a) 插入前
+ +![图3-7 线性散列表举例-b](images/3-7-b.png) + +
(b) 插入散列值为0101的记录后
+ +![图3-7 线性散列表举例-c](images/3-7-c.png) + +
(c) 插入散列值为0001的记录后
+ +
图3-7 线性散列表举例
+ diff --git a/docs/lectures/lecture-4.md b/docs/lectures/lecture-4.md new file mode 100644 index 0000000000000000000000000000000000000000..070a0b809c46cdf6e80fc3f80dce7b3dace2c264 --- /dev/null +++ b/docs/lectures/lecture-4.md @@ -0,0 +1,246 @@ + +# 第4章 查询处理 + +## 4.1查询处理概述 + +![图 4-1 关系数据库查询处理流程](images/4-1.png) + +
图 4-1 关系数据库查询处理流程
+ +关系数据库管理系统查询处理可以分为4个阶段:查询分析、查询检查、查询优化和查询执行。 + +1. **查询分析** :对用户提交的查询语句进行扫描、词法分析和语法分析,判断是否符合SQL语法规则,若没有语法错误,就会生成一棵语法树。 +2. **查询检查** :对语法树进行查询检查,首先根据数据字典中的模式信息检查语句中的数据对象,如关系名、属性名是否存在和有效;还要根据数据字典中的用户权限和完整性约束信息对用户的存取权限进行检查。若通过检查,则将数据库对象的外部名称转换成内部表示。这个过程实际上是对语法树进行语义解析的过程,最后语法树被解析为一个具有特定语义的关系代数表达式,其表示形式仍然是一棵树,称为查询树。 +3. **查询优化** :每个查询都会有多种可供选择的执行策略和操作算法,查询优化就是选择一个能高效执行的查询处理策略。一般将查询优化分为代数优化和物理优化。代数优化指对关系代数表达式进行等价变换,改变代数表达式中操作的次序和组合,使查询执行更高效;物理优化则是指存取路径和底层操作算法的选择,选择依据可以是基于规则、代价、语义的。查询优化之后,形成查询计划。 +4. **查询执行** :查询计划由一系列操作符构成,每一个操作符实现计划中的一步。查询执行阶段,系统将按照查询计划逐步执行相应的操作序列,得到最终的查询结果。 + +## 4.2 选择运算 + +选择操作的典型实现方法有全表扫描法和索引扫描法。 + +### 4.2.1 全表扫描法 + +对查询的基本表顺序扫描,逐一检查每个元组是否满足选择条件,把满足条件的元组作为结果输出。 + +假设可以使用的内存为M块,全表扫描的算法思想如下: + +1. 按物理次序读表T的M块到内存; +2. 检查内存的每个元组t,如果t满足选择条件,则输出t; +3. 如果表T还有其他块未被处理,重复(1)和(2)。 + +这种方法适合小表,对规模大的表要进行顺序扫描,当选择率(即满足条件的元组数占全表比例)较低时,此算法效率很低。 + +### 4.2.2 索引扫描法 + +当选择条件中的属性上有索引(例如B+树索引或Hash索引)时,通过索引先找到满足条件的元组指针,再通过元组指针直接在要查询的表中找到元组。 + +**[例1 ]** 等值查询:`select * from t1 where col=常量`,并且col上有索引(B+树索引或Hash索引均可) ,则使用索引得到col为该常量元组的指针,通过元组指针在表t1中检索到结果。 + +**[例2 ]** 范围查询: `select * from t1 where col > 常量`,并且col上有B+树索引,使用B+树索引找到col=常量的索引项,以此为入口点在B+树的顺序集上得到col \> 常量的所有元组指针, 通过这些元组指针到t1表中检索满足条件的元组。 + +**[例 3 ]** 合取条件查询:`select * from t1 where col1=常量a AND col2 >常量b`,如果 col1和 col1上有组合索引(col1,col2),则利用此组合索引进行查询筛选;否则,如果 col1和 col2上分别有索引,则: + +方法一:分别利用各自索引查找到满足部分条件的一组元组指针,求这2组指针的交集,再到t1表中检索得到结果。 + +方法二:只利用索引查找到满足该部分条件的一组元组指针,通过这些元组指针到t1表中检索,对得到的元组检查另一些选择条件是否满足,把满足条件的元组作为结果输出。 + +一般情况下,当选择率较低时,基于索引的选择算法要优于全表扫描。但在某些情况下,如选择率较高、或者要查找的元组均匀分散在表中,这时索引扫描法的性能可能还不如全表扫描法,因为还需要考虑扫描索引带来的额外开销。 + +## 4.3 排序运算 + +排序是数据库中的一个基本功能,用户通过Order by子句即能达到将指定的结果集排序的目的,而且不仅仅是Order by子句,Group by、Distinct等子句都会隐含使用排序操作。 + +### 4.3.1 利用索引避免排序 + +为了优化查询语句的排序性能,最好的情况是避免排序,合理利用索引是一个不错的方法。因为一些索引本身也是有序的,如B+树,如果在需要排序的字段上面建立了合适的索引,那么就可以跳过排序过程,提高查询速度。 + +例如:假设t1表存在B+树索引key1(key\_part1, key\_part2),则以下查询可以利用索引来避免排序: + +```sql + +​ SELECT * FROM t1 ORDER BY key_part1, key_part2; +​ SELECT * FROM t1 WHERE key_part1 = constant ORDER BY key_part2; +​ SELECT * FROM t1 WHERE key_part1 > constant ORDER BY key_part1; +​ SELECT * FROM t1 WHERE key_part1 = constant1 AND key_part2 > constant2 ORDER BY key_part2; +``` + +如果排序字段不在索引中,或者分别存在于多个索引中,或者排序键的字段顺序与组合索引中的字段顺序不一致,则无法利用索引来避免排序。 + +### 4.3.2 数据库内部排序方法 + +对于不能利用索引来避免排序的查询,DBMS必须自己实现排序功能以满足用户需求。实现排序的算法可以是文件排序,也可以是内存排序,具体要由排序缓冲区(sort buffer)的大小和结果集的大小来确定。 + +数据库内部排序的实现主要涉及3种经典排序算法:快速排序、归并排序和堆排序。对于不能全部放在内存中的关系,需要引入外排序,最常用的就是外部归并排序。外部归并排序分为两个阶段:Phase1 – Sorting,对主存中的数据块进行排序,然后将排序后的数据块写回磁盘;Phase2 – Merging,将已排序的子文件合并成一个较大的文件。 + +#### 4.3.2.1 常规排序法 + +一般情况下通用的常规排序方法如下: + +(1) 从表t中获取满足WHERE条件的记录; + +(2) 对于每条记录,将记录的主键+排序键(id,colp)取出放入sort buffer; + +(3) 如果sort buffer可以存放所有满足条件的(id,colp)对,则进行排序;否则sort buffer满后,进行排序并固化到临时文件中。(排序算法采用快速排序); + +(4) 若排序中产生了临时文件,需要利用归并排序算法,保证临时文件中记录是有序的; + +(5) 循环执行上述过程,直到所有满足条件的记录全部参与排序; + +(6) 扫描排好序的(id,colp)对,并利用id去取SELECT需要返回的目标列; + +(7) 将获取的结果集返回给用户。 + +从上述流程来看,是否使用文件排序主要看sort buffer是否能容下需要排序的(id,colp)对。此外一次排序涉及两次I/O:第一次是取(id,colp),第二次是取目标列。由于第一次返回的结果集是按colp排序,因此id是乱序的。通过乱序的id去取目标列时,会产生大量的随机I/O。因此,可以考虑对第二次I/O进行优化,即在取数据之前首先将id排序并放入缓冲区,然后按id顺序去取记录,从而将随机I/O转为顺序I/O。 + +为了避免第二次I/O,还可以考虑一次性取出(id,colp,目标列),当然这样对缓冲区的需求会更大。 + +#### 4.3.2.2 堆排序法 + +堆排序法适用于形如"order by limit m,n"的这类排序问题,即跳过m条数据,提取n条数据。这种情况下,虽然仍然需要所有元组参与排序,但是只需要m+n个元组的sort buffer空间即可,对于m和n很小的场景,基本不会出现因sort buffer不够而需要使用临时文件进行归并排序的问题。对于升序,采用大顶堆,最终堆中的元素组成了最小的n个元素;对于降序,则采用小顶堆,最终堆中的元素组成了最大的n的元素。 + +## 4.4 连接运算 + +连接操作是查询处理中最常用最耗时的操作之一。主要有4种实现方法:嵌套循环、排序-合并、索引连接和散列连接。 + +首先引入2个术语:外关系(outer relation)和内关系(inner relation)。外关系是左侧数据集,内关系是右侧数据集。例如:对于A JOIN B,A为外关系,B为内关系。多数情况下,A JOIN B 的成本跟 B JOIN A 的成本是不同的。假定外关系有n个元组,内关系有m个元组。 + +### 4.4.1 嵌套循环连接 + +嵌套循环连接是最简单且通用的连接算法,其执行步骤为:针对外关系的每一行,查看内关系里的所有行来寻找匹配的行。这是一个双重循环,时间复杂度为O(n\*m)。 + +![图 4-2 嵌套循环连接示意图](images/4-2.png) + +
图 4-2 嵌套循环连接示意图
+ +在磁盘 I/O 方面, 针对外关系的每一行,内部循环需要从内关系读取m行。这个算法需要从磁盘读取 n+ n\*m 行。但是,如果外关系足够小,我们可以把它先读入内存,那么就只需要读取 n+m 行。按照这个思路,外关系就应该选更小的那个关系,因为它有更大的机会装入内存。 + +当然,内关系如果可以由索引代替,对磁盘 I/O 将更有利。 + +当外关系太大无法装入内存时,采用块嵌套循环连接方式,对磁盘 I/O 更加有利。其基本思路是将逐行读取数据,改为以页(块)为单位读取数据。算法如下: + +(1) 从磁盘读取外关系的一个数据页到内存; + +(2) 从磁盘依次读取内关系的所有数据页到内存,与内存中外关系的数据进行比较,保留匹配的结果; + +(3) 从磁盘读取外关系的下一个数据页,并继续执行(2),直至外关系的最后一个页面。 + +与嵌套循环连接算法相比,块嵌套循环连接算法的时间复杂度没有变化,但降低了磁盘访问开销,变为M+M\*N。其中,M为外关系的页数,N为内关系的页数。 + +### 4.4.2 索引嵌套循环连接 + +在嵌套循环连接中,若在内关系的连接属性上有索引,则可以用索引查找替代文件扫描。对于外关系的每一个元组,可以利用索引查找内关系中与该元组满足连接条件的元组。这种连接方法称为索引嵌套循环连接,它可以在已有索引或者为了计算该连接而专门建立临时索引的情况下使用。 + +索引嵌套循环连接的代价可以如下计算。对于外关系的每一个元组,需要先在内关系的索引上进行查找,再检索相关元组。在最坏的情况下,缓冲区只能容纳外关系的一页和索引的一页。此时,读取外关系需M次I/O操作,这里的M指外关系的数据页数;对于外关系中的每个元组,在内关系上进行索引查找,假设索引查找带来的I/O开销为C,则总的I/O开销为:M+(m×C),其中m为外关系的元组数。 + +这个代价计算公式表明,如果两个关系上均有索引时, 一般把元组较少的关系作外关系时效果较好。 + +![图4-3 索引连接示意图](images/4-3.png) + +
图4-3 索引连接示意图
+ +### 4.4.3 排序-合并连接 + +排序-合并连接算法常用于等值连接,尤其适合参与连接的表已经排好序的情况。其方法如下: + +第一步:如果参与连接的表没有排好序,则根据连接属性排序; + +第二步:sorted\_merge: + +(1) 初始化两个指针,分别指向两个关系的第一个元组; + +(2) 比较两个关系的当前元组(当前元组=指针指向的元组); + +(3) 如果匹配,保留匹配的结果,两个指针均后移一个位置; + +(4) 如果不匹配,就将指向较小元组的那个指针后移一个位置; + +(5) 重复步骤(2)、(3)、(4),直到其中一个关系的指针移动到末尾。 + +![图4-4 排序-合并连接示意图](images/4-4.png) + +
图4-4 排序-合并连接示意图
+ +因为两个关系都是已排序的,不需要"回头去找",所以此方法的时间复杂度为O(n+m)。如果两个关系还需要排序,则还要考虑排序的成本:O(n\*Log(n) + m\*Log(m))。 + +很多情况下,参与连接的数据集已经排好序了,比如:表内部就是有序的,或者参与连接的是查询中已经排好序的中间结果,那么选用排序-合并算法是比较合适的。 + +### 4.4.4 散列连接 + +散列连接算法也是适用于等值连接的算法。 + +散列连接分成两个阶段:第一步,划分阶段,为较小的关系建立hash表,将连接属性作为hash码;第二步,试探阶段,对另一张表的连接属性用同样的hash函数进行散列,将其与相应桶中匹配的元组连接起来。 + +本算法要求内存足够大,小表的hash表如果能全部放进内存,则效果较好。 + +![图 4-5 散列连接示意图](images/4-5.png) + +
图 4-5 散列连接示意图
+ +在时间复杂度方面需要做些假设来简化问题: + +(1) 内关系被划分成 X 个散列桶。散列函数几乎均匀地分布每个关系内数据的散列值,即散列桶大小一致。 + +(2) 外关系的元素与散列桶内所有元素的匹配,成本是散列桶内元素的数量。 + +算法的开销包括创建散列表的成本(m) +散列函数的计算开销\*n + (m/X) \* n。如果散列函数创建的散列桶的规模足够小,则算法复杂度为O(m+n)。 + +### 4.4.5 连接算法的选择 + +具体情况下,应该选择以上哪种连接算法,有许多因素要考量: + +(1) 空闲内存:没有足够的内存就无法使用内存中的散列连接。 + +(2) 两个数据集的大小。比如,如果一个大表连接一个很小的表,那么嵌套循环连接就比散列连接快,因为后者有创建散列表的高昂成本;如果两个表都非常大,那么嵌套循环连接的CPU成本就很高。 + +(3) 是否有索引:如果连接属性上有两个B+树索引的话,合并连接会是很好的选择。 + +(4) 关系是否已经排序:这时候合并连接是最好的选择。 + +(5) 结果是否需要排序:即使参与连接的是未排序的数据集,也可以考虑使用成本较高的合并连接(带排序的),比如得到排序的结果后,我们还可以将它用于另一个合并联接,或者查询中存在ORDER BY/GROUP BY/DISTINCT等操作符,它们隐式或显式地要求一个排序结果。 + +(6) 连接的类型:是等值连接?还是内连接?外连接?笛卡尔积?或者自连接?有些连接算法在某些情况下是不适用的。 + +(7) 数据的分布:如果连接条件的数据是倾斜的,用散列连接不是好的选择,因为散列函数将产生分布极不均匀的散列桶。 + +(8) 多表连接:连接顺序的选择很重要。 + +另外,还可能考虑实现方式问题,比如连接操作使用多线程或多进程的代价考量。因此,DBMS需要通过查询优化器来选择恰当的执行计划。 + +## 4.5 表达式计算 + +如何计算包含多个运算步骤的关系代数表达式?有两种方法:物化计算和流水线计算。 + +### 4.5.1 物化计算 + +物化计算以适当的顺序每次执行一次操作;每次计算的结果被物化到一个临时关系以备后用。其缺点为:需要构造临时关系,而且这些临时关系必须写到磁盘上(除非很小)。 + +表达式的执行顺序可以依据表达式在查询树中的层次而定,从树的底部开始。 + +![图4-6 一棵查询树](images/4-6.png) + +
图4-6 一棵查询树
+ +如图4-6所示,此例中只有一个底层运算:department上的选择运算,底层运算的输入是数据库中的关系department。用前面提到的算法执行树中的运算,并将结果存储在临时关系中。在树的高一层中,使用这个临时关系来进行计算,这时输入的要么是临时关系,要么是一个数据库关系。通过重复这一过程,最终可以计算位于树的根节点的运算,从而得到表达式的最终结果。 + +由于运算的每个中间结果会被物化用于下一层的运算,此方法称为物化计算。物化计算的代价不仅是那些所涉及的运算代价的总和,还可能包括将中间结果写到磁盘的代价。 + +### 4.5.2 流水线计算 + +流水线计算可同时计算多个运算,运算的结果传递给下一个,而不必保存临时关系。这种方法通过减少查询执行中产生的临时文件的数量,来提高查询执行的效率。 + +如图4-6中,可以将选择、连接操作和投影操作组合起来,放入一条流水线,选择得到一个结果传给连接、连接产生一个结果元组马上传送给投影操作去做处理,避免中间结果的创建,从而直接产生最终结果。 + +创建一个操作的流水线可以带来的好处是: + +(1) 消除读和写临时关系的代价,从而减少查询计算代价。 + +(2) 流水线产生查询结果,边生成边输出给用户,提高响应时间。 + +流水线可按两种方式来执行: + +方式一:需求驱动方式,在操作树的顶端的将数据往上拉。 + +方式二:生产者驱动方式,将数据从操作树的底层往上推。 + +需求驱动的流水线方法比生产者驱动的流水线方法使用更广泛,因为它更容易实现。但流水线技术限制了能实现操作的可用算法。例如,若连接运算的左端输入来自流水线,则不能使用排序-合并连接,但可以用索引连接算法。由于这些限制,并非所有情况下流水线方法的代价都小于物化方法。 + diff --git a/docs/lectures/lecture-5.md b/docs/lectures/lecture-5.md new file mode 100644 index 0000000000000000000000000000000000000000..6951f35df093735dfe8293e8b8c2786e40814b14 --- /dev/null +++ b/docs/lectures/lecture-5.md @@ -0,0 +1,664 @@ + +# 第5章 查询优化 + +## 5.1 查询优化概述 + +查询优化即求解给定查询语句的高效执行计划的过程。它既是关系数据库管理系统实现的关键技术,又是关系系统的优点所在。由DBMS进行查询优化的好处在于:查询优化的优点不仅在于用户不必考虑如何最好的表达查询以获得较高的效率,而且在于系统可以比用户程序的"优化"做得更好。 + +查询计划,从形式上看是一颗二叉树,树叶是每个单表对象,两个树叶的父节点是一个连接操作符连接后的中间结果(另外还有一些其他节点如排序等也可以作为中间结果),这个结果是一个临时关系,这样直至根节点。 + +从一个查询计划看,涉及的主要"关系节点"包括: + +- 单表节点:考虑单表的获取方式(全表扫描,或索引获取,或索引定位再I/O到数据块获取数据)。这是一个物理存储到内存解析成逻辑字段的过程。 +- 两表节点:考虑两表以何种方式连接,代价有多大,连接路径有哪些等。表示内存中的元组如何进行元组间的连接。此时,元组通常已经存在于内存中。这是一个完整用户语义的逻辑操作,但只是局部操作,只涉及两个具体的关系。完成用户全部语义,需要配合多表的连接顺序的操作。 +- 多表中间节点:考虑多表连接顺序如何构成代价最少的"执行计划"。决定连接执行的顺序。 + +查询优化的总目标是选择有效的策略,求得给定关系表达式的值,使得查询代价较小。因为查询优化的搜索空间有时非常大,实际系统选择的策略不一定是最优的,而是较优的。 + +查询优化主要包括逻辑优化和物理优化。其中,逻辑优化又可包含语法级查询优化、基于规则的优化等;而物理优化主要指基于代价的优化。语法级优化是基于语法的等价转换;基于规则的优化(如依据关系代数的规则或依据经验的规则等)具有操作简单且能快速确定执行方式的优点,但这种方法只是排除了一部分不好的可能;基于代价的优化是在查询计划生成过程中,计算每条存取路径进行量化比较,从而得到开销最小的情况,但如果组合情况多则开销的判断时间就很多。查询优化器的实现,多是这两种优化策略的组合使用。 + +## 5.2 逻辑优化 + +查询优化器在逻辑优化阶段主要解决的问题是:如何找出SQL语句的等价变换形式,使SQL执行更高效。 + +### 5.2.1代数优化 + +代数优化是基于关系代数等价变换规则的优化方法。 + +代数优化策略是通过对关系代数表达式的等价变换来提高查询效率。所谓关系代数表达式的等价是指用相同的关系代替两个表达式中相应的关系所得到的结果是相同的。两个关系表达式E1和E2是等价的。 + +#### 5.2.1.1 关系代数表达式等价变换规则 + +常用的关系代数等价变换规则如下: + +1. **连接、笛卡尔积的交换律** + +设E1和E2为关系代数表达式,F为连接运算条件,则有: + +​ E1×E2 ≡ E2×E1 + +​ E1⋈E2 ≡ E2⋈E1 + +​ ![5.2.1.1-1](images/5.2.1.1-1.png) ≡ ![5.2.1.1-2](images/5.2.1.1-2.png) + +对于连接和笛卡尔积运算,可以交换前后位置,其结果不变。例如,两表连接算法中有嵌套循环连接算法,对外表和内表有要求,外表尽可能小则有利于做"基于块的嵌套循环连接",所以通过交换律可以将元组少的表作为外表。 + +1. **连接、笛卡尔积结合律** + +设E1、E2、E3为关系代数表达式,F1、F2为连接运算条件。则有: + +​ (E1×E2)×E3 ≡ E1×(E2×E3) + +​ (E1⋈E2)⋈E3 ≡ E1⋈(E2⋈E3) + +​ ![5.2.1.1-3](images/5.2.1.1-3.png) ≡ ![5.2.1.1-4](images/5.2.1.1-4.png) + +对于连接、笛卡尔积运算,如果新的结合有利于减少中间关系的大小,则可以优先处理。 + +1. **投影的串接定律** + +设E为关系代数表达式,Ai(i=1,2,3,…,n),Bj(j=1,2,3,…,m)是属性名,且{A1,A2,…,An}为{B1,B2,…,Bm}的子集。则有: + +​ ∏A1,A2,…,An(∏B1,B2,…,Bm(E)) ≡ ∏A1,A2,…,An (E) + +在同一个关系上,只需做一次投影运算,且一次投影时选择多列同时完成。所以许多数据库优化引擎会为一个关系收集齐该关系上的所有列,即目标列和WHERE、GROUP BY等子句中涉及到的所有该关系的列。 + +1. **选择的串接律** + +设E为关系代数表达式,F1、F2为选择条件。则有: + +​ σF1F2(E)) ≡ σF1F2(E) + +此变换规则对于优化的意义在于:选择条件可以合并,使得一次选择运算就可检查全部条件,而不必多次过滤元组,所以可以把同层的合取条件收集在一起,统一进行判断。 + +1. **选择和投影的交换律** + +设E为关系代数表达式,F为选择条件,Ai(i=1,2,3,…,n)是属性名。选择条件F只涉及属性A1,A2,…,An。则有: + +​ σF(∏A1,A2,…,An (E)) ≡∏A1,A2,…,AnF(E)) + +此变换规则对于优化的意义在于:先投影后选择可以改为先选择后投影,这对于以行为单位来存储关系的主流数据库而言,很有优化意义。按照这种存储方式,系统总是先获取元组,然后才能解析得到其中的列。 + +设E为关系代数表达式,F为选择条件,Ai(i=1,2,3…,n)是属性名,选择条件F中有不属于A1,A2,…,An的属性B1,B2,…,Bn。则有: + +​ ∏A1,A2,…,AnF(E)) ≡ ∏A1,A2,…,AnF(∏A1,A2,…,An,B1,B2,…,Bm(E))) + +此变换规则对于优化的意义在于:先选择后投影可以改为先做带有选择条件中的列的投影,然后选择,最后再完成最外层的投影。这样内层的选择和投影可以同时进行,不会增加过多的计算开销,但能减小中间结果集的规模。 + +1. **选择与笛卡尔积的交换律** + +设E1、E2为关系代数表达式,F为选择条件,F中涉及的属性都是E1中的属性,则有: + +​ σF(E1×E2) ≡ σF(E1)×E2 + +如果F=F1∧F2,且F1只涉及E1中的属性,F2只涉及E2中的属性,则有: + +​ σF(E1×E2) ≡ σF1(E1)×σF2(E2) + +此变换规则对于优化的意义在于:条件下推到相关的关系上,先做选择后做笛卡尔积运算,这样可以减小中间结果的大小。 + +1. **选择与并的分配律** + +如果E1和E2有相同的属性名,且E= E1∪E2,则有: + +​ σF(E1∪E2) ≡ σF(E1) ∪σF (E2) + +此变换规则对于优化的意义在于:条件下推到相关的关系上,先选择后做并运算,可以减小每个关系输出结果的大小。 + +1. **选择与差的分配律** + +如果E和E2有相同的属性名,则: + +​ σF(E1-E2) ≡ σF(E1)-σF(E2) + +此变换规则对于优化的意义在于:条件下推到相关的关系上,先选择后做差运算,可以减小每个关系输出结果的大小。 + +1. **投影与笛卡尔积的交换律** + +设A1,A2,…,An是E1的属性,B1,B2,…,Bm是E2的属性,则有: + +​ ∏A1,A2,…,An,B1,B2,…,Bm(E1×E2) ≡ ∏A1,A2,…,An(E1)×∏B1,B2,…,Bm(E2) + +此变换规则对于优化的意义在于:先投影后做笛卡尔积,可减少做笛卡尔积前每个元组的长度,使得计算后得到的新元组的长度也变短。 + +1. **投影与并的交换律** + +如果E1和E2有相同的属性名,则有: + +​ ∏A1,A2,…,An (E1∪E2) ≡ ∏A1,A2,…,An (E1)∪∏A1,A2,…,An (E2) + +此变换规则对于优化的意义在于:先投影后做并运算,可减少做并运算前每个元组的长度。 + +#### 5.2.1.2 针对不同运算符的优化规则 + +针对不同运算符的优化规则如表5-1~5-3所示。 + +
表5-1 运算符主导的优化
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
运算符子类型根据特点可得到的优化规则可优化的原因
选择对同一个表的同样选择条件,作一次即可。单行文本输入框幂等性:多次应用同一个选择有同样效果; +交换性:应用选择的次序在最终结果中没有影响 +选择可有效减少在它的操作数中的元组数的运算(元组个数减少)。
分解有复杂条件的选择合取,合并多个选择为更少的需要求值的选择,多个等式则可以合并①。合取的选择等价于针对这些单独条件的一系列选择。
析取,分解它们使得其成员选择可以被移动或单独优化②。析取的选择等价于选择的并集。
选择和笛卡尔积尽可能先做选择。运算关系分别有N和M行,先做积运算将包含N×M行。先做选择运算减少N和M,则可避免不满足条件的元组参与积运算,节约时间同时减少结果集的大小。
尽可能下推选择。如果积运算后面没有跟随选择运算,可以尝试使用其它规则从表达式树更高层下推选择。
选择和集合运算选择下推到的集合运算中,如表5-2中的3种情况。选择在差集、交集和并集算子上满足分配律。
选择和投影在投影之前进行选择。如果选择条件中引用的列是投影中的列的子集,则选择与投影满足交换性。
投影基本投影性质尽可能先做投影投影是幂等的;投影可以减少元组大小。
投影和集合运算投影下推到集合的运算中,如表5-3中的情况。投影在差集、交集和并集算子上满足分配律。
+ + +1. 如WHERE A.a=B.b AND B.b=C.c可以合并为={A.a,B.b,C.c}而不是两个等式={A.a,B.b}和={B.b,C.c}。 +1. 如WHERE A.a=3 OR A.b\>8,如果A.a、A.b列上分别有索引,也许SELECT \* FROM A WHERE A.a=3 UNION SELECT \* FROM A WHERE A.b>8可以分别利用各自的索引提高查询效率。 + +表5-2 选择下推到集合的运算 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
初始式
优化后的等价表达式
等价表达式一等价表达式二等价表达式三
σA(R-S)σA(R)-σA(S)σA(R)-S
σA(R∪S)σA(R)∪σA(S)
σA(R∩S)σA(R)∩σA (S)σA(R)∩SR∩σA(S)
+ +表5-3 投影下推到集合的运算 + +| **初始式** | **优化后的等价表达式** | +| --- | --- | +| ∏A1,A2,…,An(R-S) | ∏A1,A2,…,An(R)- ∏A1,A2,…,An(S) | +| ∏A1,A2,…,An(R∪S) | ∏A1,A2,…,An(R) ∪∏A1,A2,…,An(S) | +| ∏A1,A2,…,An(R∩S) | ∏A1,A2,…,An(R) ∩∏A1,A2,…,An(S) | + +#### 5.2.1.3 查询树启发式规则 + +包括: + +1. 选择运算应尽可能先做。 +2. 把投影运算和选择运算同时进行。如有若干投影和选择运算,并且它们都对同一个关系操作,则可以在扫描次关系的同时完成所有这些运算以避免重复扫描关系。 +3. 把投影同其前或后的双目运算结合起来,没有必要为了去掉某些字段而扫描一遍关系。 +4. 把某些选择同在它前面要执行的笛卡尔积结合起来称为一个连接运算。连接(特别是等值连接)运算比笛卡尔积性能高很多。 +5. 找出公共子表达式,将其计算结果缓存起来,避免重复计算。 + +### 5.2.2 语法级查询优化 + +语法级优化要解决的主要问题是找出SQL语句的等价变换形式,使得SQL执行更高效,包括: + +- 子句局部优化。如等价谓词重写、where和having条件简化等。 +- 关联优化。如子查询优化、连接消除、视图重写等。 +- 形式变化优化。如嵌套连接消除等。 + +以下介绍几种常见的优化方法。 + +#### 5.2.2.1 子查询优化 + +早期的查询优化器对子查询都采用嵌套执行的方式,即对父查询中的每一行都执行一次子查询,这样效率很低,因此对其进行优化很有必要。例如,将子查询转为连接操作之后,有如下好处: + +- 子查询不用多次执行; +- 优化器可以根据统计信息来选择不同的连接方法和不同的连接顺序; +- 子查询中的连接条件、过滤条件分别变成了父查询的连接条件和过滤条件,优化器可以对这些条件进行下推,以提高执行效率。 + +1. **常见子查询优化技术** + +**(1)** **子查询合并** + +在语义等价条件下,多个子查询可以合并成一个子查询,这样多次表扫描,多次连接减少为单次表扫描和单次连接。例如: + +```sql +SELECT * +FROM t1 +WHERE a1<10 AND ( +EXISTS (SELECT a2 FROM t2 WHERE t2.a2<5 AND t2.b2=1) OR +EXISTS (SELECT a2 FROM t2 WHERE t2.a2<5 AND t2.b2=2) +); +``` + +可优化为: + +```sql +SELECT * +FROM t1 +WHERE a1<10 AND ( +EXISTS (SELECT a2 FROM t2 WHERE t2.a2<5 AND (t2.b2=1 OR t2.b2=2) +); +``` + +此例中,两个EXISTS子查询合并为一个子查询,查询条件也进行了合并。 + +**(2)** **子查询展开** + +子查询展开又称子查询反嵌套,子查询上拉。实质是把某些子查询重写为等价的多表连接操作。带来好处是,有关的访问路径、连接方法和连接顺序可能被有效使用,使得查询语句的层次尽可能地减少。常见的IN / ANY / SOME / ALL / EXISTS依据情况转为半连接(SEMI JOIN)。例如: + +```sql +SELECT * +FROM t1, (SELECT * FROM t2 WHERE t2.a2>10) v_t2 +WHERE t1.a1<10 AND v_t2.a2<20; +``` + +可优化为: + +```sql +SELECT * +FROM t1, t2 +WHERE t1.a1<10 AND t2.a2<20 AND t2.a2>10; +``` + +此例中,原本的子查询变为了t1、t2表的连接操作,相当于把t2表从子查询中上拉了一层。 + +子查询展开是一种最常用的子查询优化技术,如果子查询是只包含选择、投影、连接操作的简单语句,没有聚集函数或者group子句,则可以上拉,前提是上拉后的结果不能带来多余元组,需遵循以下规则: + +- 如果上层查询结果没有重复(select包含主键),则可以展开子查询,并且展开后的查询的select子句前应加上distinct标志; +- 如果上层查询的select语句中有distinct标志,则可以直接子查询展开; +- 如果内层查询结果没有重复元组,则可以展开。 + +子查询展开的具体步骤如下: + +1. 将子查询和上层查询的from子句连接为同一个from子句,并且修改相应的运行参数; +2. 将子查询的谓词符号进行相应修改(如IN修改为=ANY); +3. 将子查询的where条件作为一个整体与上层查询的where条件进行合并,并用and连接,从而保证新生成的谓词与原谓词的语义相同,成为一个整体。 + +**(3)** **聚集子查询消除** + +这种方法将聚集子查询的计算上推,使得子查询只需计算一次,并与父查询的部分或全表做左外连接。例如: + +```sql +SELECT * +FROM t1 +WHERE t1.a1 > (SELECT avg(t2.a2) FROM t2); +``` + +可优化为: + +```sql +SELECT t1.* +FROM t1, (SELECT avg(t2.a2) FROM t2) as tm(avg_a2) ) +WHERE t1.a1 ? tm.avg_a2; +``` + +**(4)** **其他** + +此外还有利用窗口函数消除子查询、子查询推进等技术,本文不再细述。 + +1. **针对不同类型子查询的优化方法** + +**(1) IN类型子查询** + +IN类型有3种格式: + +格式一: + +```sql +outer_expr [not] in (select inner_expr from ... where subquery_where) +``` + +格式二: + +```sql +outer_expr = any (select inner_expr from ... where subquery_where) +``` + +格式三: + +```sql +(oe_1, ..., oe_N) [not] in (select ie_1, ..., ie_N from ... where subquery_where) +``` + +对于in类型子查询的优化,如表5-4所示。 + +
表5-4 IN类型子查询优化的几种情况
+ +![5.2.2.1-1](images/5.2.2.1-1.png) + +情况一:outer\_expr和inner\_expr均为非NULL值。 + +优化后的表达式为: + +```sql +exists (select 1 from ... where subquery_where and outer_expr=inner_expr) +``` + +子查询优化需要满足2个条件: + +- outer\_expr和inner\_expr不能为NULL; + +- 不需要从结果为FALSE的子查询中区分NULL。 + +情况二:outer\_expr是非空值。 + +优化后的表达式为: + +```sql +exists (select 1 from ... where subquery_where and +(outer_expr=inner_expr or inner_expr IS NULL); +``` + +情况三:outer\_expr为空值。 + +则原表达式等价为: + +```sql +NULL in (select inner_expr FROM ... where subquery_where) +``` + +当outer\_expr为空时,如果子查询结果为: + +- NULL,select语句产生任意行数据; +- FALSE,select语句不产生数据。 + +对上面的等价形式,还有2点需说明: + +- 谓词IN等价于=ANY。如:以下2条SQL语句是等价的。 + +```sql +select col1 from t1 where col1 =ANY (select col1 from t2); +select col1 from t1 where col1 IN (select col1 from t2); +``` + +- 带有IN谓词的子查询,如果满足上述3种情况,可做等价变换,把外层条件下推到子查询中,变形为EXISTS类型的逻辑表达式判断。而EXISTS子查询可以被半连接算法实现优化。 + +**(2) ALL/ANY/SOME类型子查询** + +ALL/ANY/SOME子查询格式如下: + +```sql +outer_expr operator ALL (subquery) +outer_expr operator ANY (subquery) +outer_expr operator SOME (subquery) +``` + +其中,operator是操作符,可以是>、>=、=、<、<=中任何一个。其中, + +- =ANY与IN含义相同,可采用IN子查询优化方法; +- SOME与ANY含义相同; +- NOT IN 与 <>ALL含义相同; + +如果子查询中没有group by子句,也没有聚集函数,则以下表达式可以使用聚集函数MAX/MIN做等价转换: + +- `val>=ALL (select ...)` 等价变换为:`val>= (select MAX...)` +- `val<=ALL (select ...)` 等价变换为:`val<= (select MAX...)` +- `val>=ANY (select ...)` 等价变换为:`val>= (select MIN...)` +- `val>=ANY (select ...)` 等价变换为:`val>= (select MAX...)` + +**(3) EXISTS类型子查询** + +存在谓词子查询格式为:[NOT] EXISTS (subquery) + +需要注意几点: + +- EXISTS(subquery)值为TRUE/FALSE,不关心subquery返回的内容。 +- EXISTS(subquery)自身有"半连接"的语义,部分DBMS用半连接来实现它;NOT EXISTS通常会被标识为"反半连接"处理。 +- IN(subquery)等子查询可以被转换为EXISTS(subquery)格式。 + +所谓半连接(Semi Join),是一种特殊的连接类型。如果用"t1.x semi= t2.y"来表示表T1和表T2做半连接,则其含义是:只要在表T2中找到一条记录满足t1.x=t2.y,则马上停止搜索表T2,并直接返回表T1中满足条件t1.x=t2.y的记录,因此半连接的执行效率高于普通的内连接。 + +#### 5.2.2.2 等价谓词重写 + +等价谓词重写包括:LIKE规则、BETWEEN-AND规则、IN转换OR规则、IN转换ANY规则、OR转换ANY规则、ALL/ANY转换集函数规则、NOT规则等,相关原理比较简单,有兴趣的同学可以自行查找相关查询重写规则。 + +#### 5.2.2.3 条件化简 + +WHERE、HAVING和ON条件由许多表达式组成,而这些表达式在某些时候彼此间存在一定的联系。利用等式和不等式性质,可将WHERE、HAVING和ON条件简化,但不同数据库的实现可能不完全相同。 + +将WHERE、HAVING和ON条件简化的方式通常包括如下几个: + +1. 去除表达式中冗余的括号:以减少语法分析时产生的AND和OR树的层次; + +2. 常量传递:对不同关系可使用条件分离后有效实施"选择下推",从而减小中间关系的规模。如: + + `col1=col2 AND col2=3` 可化简为:`col1=3 AND col2=3` + + 操作符=、<、>、<=、>=、<>、LIKE中的任何一个,在`col1<操作符>col2`条件中都会发生常量传递 + +3. 消除死码。化简条件,将不必要的条件去除。如: + + `WHERE (0>1 AND s1=5)`, `0>1`使得`AND`为恒假,去除即可。 + +4. 表达式变换。化简条件(如反转关系操作符的操作数顺序),从而改变某些表的访问路径。如:-a=3可化简为a=-3,若a上有索引,则可利用。 +5. 不等式变换。化简条件,将不必要的重复条件去除。如: + + `a>10 AND b=6 AND a>2` 可化简为:`a>10 AND b=6`。 + +6. 布尔表达式变换。包括: + +- 谓词传递闭包。如:`a>b AND b>2`可推导出`a>2`,减少a、b比较元组数。 +- 任何一个布尔表达式都能被转换为一个等价的合取范式。一个合取项为假,则整个表达式为假。 + +## 5.3 物理优化 + +代数优化改变查询语句中操作的次序和组合,但不涉及底层的存取路径。物理优化就是要选择高效合理的操作算法或存取路径,求得优化的查询计划,达到查询优化的目标。 + +查询优化器在物理优化阶段,主要解决的问题是: + +- 从可选的单表扫描方式中,挑选什么样的单表扫描方式最优? +- 对于两表连接,如何连接最优? +- 对于多表连接,哪种连接顺序最优? +- 对于多表连接,是否需要对每种连接顺序都探索?如果不全部探索,如何找到一种最优组合? + +选择的方法可以是: + +1. 基于规则的启发式优化。 +2. 基于代价估算的优化。 +3. 两者结合的优化方法。常常先使用启发式规则选取若干个较优的候选方案,减少代价估算的工作量,然后分别计算这些候选方案的执行代价,较快地选出最终的优化方法。 + +启发式规则优化是定性的选择,比较粗糙,但是实现简单而且优化本身的代价较小,适合解释执行的系统。因为解释执行的系统,其优开销包含在查询总开销之中,在编译执行的系统中,一次编译优化,多次执行,查询优化和查询执行是分开的,因此,可以用精细复杂一些的基于代价的优化方法。 + +### 5.3.1 基于代价的优化 + +#### 5.3.1.1 查询代价估算 + +查询代价估算基于CPU代价和I/O代价,计算公式如下: + +``` +总代价 = I/O代价 + CPU代价 +COST = P * a_page_cpu_time + W * T +``` + +其中: + +P是计划运行时访问的页面数,a\_page\_cpu\_time是每个页面读取的时间开销,其乘积反映了I/O开销。 + +T为访问的元组数,如果是索引扫描,还要考虑索引读取的开销,反映了数据读取到内存的CPU开销。 + +W为权重因子,表明I/O到CPU的相关性,又称选择率(selectivity),用于表示在关系R中,满足条件“A a”的元组数与R的所有元组数N的比值。 + +选择率在代价估算模型中占有重要地位,其精确程度直接影响最优计划的选取。选择率计算常用方法如下: + +1. 无参数方法:使用ad hoc(点对点)数据结构或直方图维护属性值的分布,直方图最常用; +2. 参数法:使用具有一些自由统计参数(参数是预先估计出来的)的数学分布函数逼近真实分布; +3. 曲线拟合法:为克服参数法的不灵活性,用一般多项式来标准最小方差来逼近属性值的分布; +4. 抽样法:从数据库中抽取部分样本元组,针对这些样本进行查询,然后收集统计数据; +5. 综合法:将以上几种方法结合起来,如抽样法和直方图法结合。 + +由于其中I/O代价占比最大,通常以I/O代价为主来进行代价估算。 + +1. 全表扫描算法的代价估算公式 + +- 如果基本表大小为 B 块,全表扫描算法的代价 cost = B; +- 如果选择条件是"码=值",则平均搜索代价 cost = B/2。 + +​ 2. 索引扫描算法的代价估算公式 + +- 如果选择条件为"码=值",则采用该表的主索引,若为B+树,设索引层数为L,需要存取B+树中从根节点到叶节点L块,再加上基本表中该元组所在的那一块,cost=L+1。 +- 如果选择条件涉及非码属性,若为B+树索引,选择条件是相等比较,S为索引选择基数(有S个元组满足条件),假设满足条件的元组保存在不同块上,则最坏情况下cost=L+S。 +- l 若比较条件为>,>=,<,<=,假设有一半元组满足条件,则需要存取一半的叶节点,并通过索引访问一半的表存储块,cost=L+Y/2+B/2。若可以获得更准确的选择基数,可进一步修正Y/2与B/2。 + +​ 3.嵌套循环连接算法的代价估算公式 + +- 嵌套循环连接算法的代价为:cost=Br+BrBs/(K-1), 且Kr+BrBs/(K-1) + (Frs\*Nr\*Ns)/Mrs。Frs为连接选择率,表示连接结果数的比例,Mrs为块因子,表示每块中可以存放的结果元组数目。 + +​ 4.排序合并连接算法的代价估算公式 + +- 如 果 连 接 表 已 经 按 照 连 接 属 性 排 好 序 , 则 cost =Br+Bs+(Frs\*Nr\*Ns)/Mrs。 +- 如果必须对文件排序,需要在代价函数中加上排序的代价对 于 包 含 B 个 块 的 文 件 排 序 的 代 价 大 约 是:cost =(2\*B)+(2\*B\*log2B)。 + +#### 5.3.1.2 基于代价的连接顺序选择 + +多表连接算法实现的是在查询路径生成的过程中,根据代价估算,从各种可能的候选路径中找出最优的路径。它需要解决两个问题: + +- 多表连接的顺序 +- 多表连接的搜索空间:N个表的连接可能有N!种连接组合,这可能构成一个巨大的搜索空间。如何将搜索空间限制在一个可接受的范围内,并高效生成查询执行计划将成为一个难点。 + +多表间的连接顺序表示了查询计划树的基本形态。在1990年,Schneder等人在研究查询树模型时提出了左深树,右深树和紧密树3种形态,如图5-1所示。 + +图5-1 三种树的形态 + +
图5-1 三种树的形态
+ +即使是同一种树的生成方式,也有细节需要考虑。如图5-1-a中{A,B}和{B,A}两种连接方式开销可能不同。比如最终连接结果{A,B,C}则需要验证比较6种连接方式,找出最优的一种作为下次和其他表连接的依据。 + +多表连接搜索最优查询树,有很多算法,如启发式、分枝界定计划枚举、贪心、动态规划、爬山法、System R优化方法等。其中,常用算法如下。 + +1. **动态规划** + + 在数据库领域,动态规划算法主要解决多表连接的问题。它是自底向上进行的,即从叶子开始做第一层,然后开始对每层的关系做两两连接(如果满足内连接进行两两连接,不满足则不可对全部表进行两两连接),构造出上层,逐次递推到树根。以下介绍具体步骤: + + 初始状态:构造第一层关系,即叶子结点,每个叶子对应一个单表,为每一个待连接的关系计算最优路径(单表的最优路径就是单表的最佳访问方式,通过评估不同的单表的数据扫描方式代价,找出代价最小的作为每个单表的局部最优路径) + + 归纳:当第1层到第n-1层的关系已经生成,那么求解第n层的关系方法为:将第n-1层的关系与第一层中的每个关系连接,生成新的关系(对新关系的大小进行估算),放于第n层,且每一个新关系,均求解最优路径。每层路径的生成都是基于下层生成的最优路径,这满足最优化原理的要求。 + + 还有的改进算法,在生成第n层的时候,除了通过第n-1层和第一层连接外,还可以通过第n-2层和第二层连接...。 + + PostgreSQL查询优化器求解多表连接时,采用了这种算法。 + +2. 启发式算法 + + 启发式算法是相对最优化算法提出的,是一个基于直观或者经验构造的算法,不能保证找到最好的查询计划。在数据库的查询优化器中,启发式一直贯穿于整个查询优化阶段,在逻辑查询优化阶段和物理查询优化阶段,都有一些启发式规则可用。PostgreSQL,MySQL,Oracle等数据库在实现查询优化器时,采用了启发式和其他方式相结合的方式。 + + 物理查询优化阶段常用启发式规则如下: + + - 关系R在列X上建立索引,且对R的选择操作发生在列X上,则采用索引扫描方式; + - R连接S,其中一个关系上的连接列存在索引,则采用索引连接且此关系作为内表; + - R连接S,其中一个关系上的连接列是排序的,则采用排序连接比hash连接好。 + +3. 贪心算法 + + 贪心算法最后得到的是局部最优解,不一定全局最优,其实现步骤如下: + + (1) 初始,算法选出的候选对象集合为空; + + (2) 根据选择函数,从剩余候选对象中选出最有可能构成解的对象; + + (3) 如果集合中加上该对象后不可行,那么该对象就被丢弃并不再考虑; + + (4) 如果集合中加上该对象后可行,就加到集合里; + + (5) 扩充集合,检查该集合是否构成解; + + (6) 如果贪心算法正确工作,那么找到的第一个解通常都是最优的,可以终止算法; + + (7) 继续执行第二步。 + + MySQL查询优化器求解多表连接时采用了这种算法。 + +4. **System-R算法** + +对自底向上的动态规划算法进行了改进,主要思想是把子树的查询计划的最优查询计划和次优查询计划保留,用于上层的查询计划生成,以便使得查询计划总体上最优。 + +
表5-5 多表连接常用算法比较
+ +| **算法名称** | **特点与适用范围** | **缺点** | +| ------------ | ------------------------------------------------------------ | ---------------------------------- | +| 启发式算法 | 适用于任何范围,与其它算法结合,能有效提高整体效率 | 不知道得到的解是否最优 | +| 贪婪算法 | 非穷举类型的算法。适合解决较多关系的搜索 | 得到局部最优解 | +| 爬山法 | 适合查询中包含较多关系的搜索,基于贪婪算法 | 随机性强,得到局部最优解 | +| 遗传算法 | 非穷举类型的算法。适合解决较多关系的搜索 | 得到局部最优解 | +| 动态规划算法 | 穷举类型的算法。适合查询中包含较少关系的搜索,可得到全局最优解 | 搜索空间随关系个数增长呈指数增长 | +| System R优化 | 基于自底向上的动态规划算法,为上层提供更多可能的备选路径,可得到全局最优解 | 搜索空间可能比动态规划算法更大一些 | + +### 5.3.2 基于规则的优化 + +基于代价优化的一个缺点是优化本身的代价。因此,查询优化器使用启发式方法来减少优化代价。 + +- 选择操作的启发式规则: + +1) 对于小关系,全表扫描; + +2) 对于大关系: + +(1) 若选择条件是主码,则可以选择主码索引,因为主码索引一般是被自动建立的; + +(2) 若选择条件是非主属性的等职查询,并且选择列上有索引,如果选择比例较小(10%)可以使用索引扫描,否则全表扫描; + +(3) 若选择条件是属性上的非等值查询或者范围查询,同上; + +(4) 对于用and连接的合取选择条件,若有组合索引,优先用组合索引方法;如果某些属性上有一般索引,则用索引扫描,否则全表扫描; + +(5) 对于用OR连接的析取选择条件,全表扫描。 + +- 连接操作的启发式规则 + +1) 若两个表都已经按连接属性排序,则选用排序-合并算法; + +2) 若一个表在连接属性上有索引,则使用索引连接方法; + +3) 若其中一个表较小,则选用hash join; + +4) 最后可以使用嵌套循环,小表作为外表。 + +还有嵌套子查询优化、物化视图等多种优化手段,这里不再展开。 + diff --git a/docs/lectures/lecture-6.md b/docs/lectures/lecture-6.md new file mode 100644 index 0000000000000000000000000000000000000000..0389380a4fd34ccbd41c6324dbdb789db2ae34e7 --- /dev/null +++ b/docs/lectures/lecture-6.md @@ -0,0 +1,318 @@ + +# 第6章 事务处理 + +## 6.1 事务概念 + +在数据库系统中,事务是指由一系列数据库操作组成的一个完整的逻辑过程。数据库提供了增、删、改、查等几种基础操作,用户可以灵活地组合这几种操作来实现复杂的语义。在很多场景下,用户希望一组操作可以做为一个整体一起生效,这就是事务的产生背景。 + +例如,一个银行转帐业务,在数据库中需要通过两个修改操作来实现:1. 从账户A扣除指定金额;2. 向账户B添加指定金额。这两个操作构成了一个完整的逻辑过程,不可拆分。如果第一个操作成功而第二个操作失败,说明转账没有成功。在这种情况下,对于银行来说,数据库中的账户数据是处于一种不正确的状态的,必须撤销掉第一个操作对数据库的修改,让账户数据恢复到转账前的状态。由此例可见,事务是数据库状态变更的基本单元,在事务将数据库从一个正确状态变更到另一个正确状态的过程中,数据库的那些中间状态,既不应该被其他事务看到或干扰,也不应该在事务结束后依然保留。 + +根据以上描述的事务概念,事务应具有四个特性,称为事务的ACID特性。它们分别是: + +- **原子性** (Atomicity):一个事务中的所有操作,要么全做,要么全不做。事务如果在执行过程中发生错误,该事务修改过的数据应该被恢复到事务开始前的状态,就像这个事务从来没有执行过一样。 +- **一致性** (Consistency):当数据库只包含成功事务提交的结果时,称数据库处于一致性状态。事务执行的结果必须使数据库从一个一致性状态变到另一个一致性状态。由此可见,一致性与原子性是密切相关的。 +- **隔离性** (Isolation):一个事务的执行不能被其他事务干扰。DBMS允许多个并发事务同时执行,隔离性可以防止多个事务并发执行时由于相互干扰而导致数据的不一致。 +- **持久性** (Durability):事务处理结束后,对数据的修改就是永久的,即便系统故障也不会丢失。 + +在SQL中,开始和结束事务的语句如下: + +- BEGIN TRANSACTION:开始一个事务。除了用该语句显式地开始一个事务,DBMS也允许隐式的开始一个事务。隐式开始事务时无需执行任何语句,每当用户连接成功,即开始一个事务,前一个事务结束时,即自动开始下一个事务。 +- COMMIT:提交一个事务。此语句表示事务正常结束,DBMS应永久保存该事务对数据库的修改。 +- ROLLBACK:回滚一个事务。此语句表示事务异常结束,DBMS应撤销该事务对数据库的所有修改。需要注意的是,当事务发生故障时,即使用户没有显式执行ROLLBACK语句,DBMS也应自动回滚事务。 + +一个支持事务的DBMS必须能保证事务的ACID特性,这部分工作是由事务处理机制来负责的。事务处理机制又分为并发控制机制和故障恢复机制两部分,以下分别介绍。 + +## 6.2 并发控制 + +所谓并发操作,是指在多用户共享的数据库中,多个事务可能同时对同一数据进行操作。如果对这些操作不加控制,则可能导致数据的不一致问题。因此,为了保证事务的一致性和隔离性,DBMS需要对并发操作进行正确调度。这就是并发控制机制的任务。 + +### 6.2.1 并发错误 + +并发操作带来的数据不一致性包括丢失修改、读脏和不可重复读。 + +1. 丢失修改 + + 两个以上事务从数据库中读入同一数据并修改,其中一个事务(后提交的事务)的提交结果破坏了另一事务(先提交的事务)的提交结果,导致先提交的事务对数据库的修改被丢失。 + +2. 读脏 + + 事务读取了被其他事务修改且未提交的数据,即从数据库中读到了临时性数据。 + +3. 不可重复读 + + 一个事务读取数据后,该数据又被另一事务修改,导致前一事务无法再现前一次的读取结果。 + + 不可重复读又可分为两种情况:一种情况是第一次读到的数据的值在第二次读取时发生了变化;还有一种情况是事务第二次按相同条件读取数据时,返回结果中多了或者少了一些记录。后者又被称为幻读。 + +### 6.2.2 并发控制的正确性标准 + +并发控制机制的任务就是对并发事务进行正确的调度,但是什么样的调度才是正确的呢?我们需要一个正确性的判断标准。 + +#### 6.2.2.1 可串行化 + +串行调度是指多个事务依序串行执行,仅当一个事务的所有操作执行完后才执行另一个事务。这种调度方式下,不可能出现多个事务同时访问同一数据的问题,自然也就不可能出现并发错误。串行调度显然是正确的,但是串行调度无法充分利用系统资源,因此其效率显然也是用户难以接受的。 + +并发调度是指在数据库系统中同时执行多个事务。DBMS对多个并发事务进行调度时,可能产生多个不同的调度序列,从而得到不同的执行结果。如何判断某个调度是不是正确呢?如果这些并发事务的执行结果与它们按某一次序串行执行的结果相同,则认为该并发调度是正确的,我们称之为可串行化调度。 + +#### 6.2.2.2 冲突可串行化 + +可串行化是并发控制的正确性准则。但是按照可串行化的定义,如果想要判断一个并发调度是不是可串行化调度,需要知道这批事务所有可能的串行调度的结果,然后将该并发调度的结果与这些结果进行比较,这显然是难以实施的。因此,我们需要一种可操作的判断标准,即冲突可串行化。 + +冲突可串行化是可串行化的充分条件。如果一个并发调度是冲突可串行化的,那么它一定是可串行化的。在定义冲突可串行化之前,需要先了解什么是冲突操作。 + +冲突操作是指不同的事务对同一个数据的读写操作或写写操作。例如,事务1对数据A的读操作"r1(A)"与事务2对数据A的写操作"w2(A)"就是一对冲突操作。 + +我们规定,不同事务的冲突操作和同一事务的两个操作是不能交换的。因为如果改变冲突操作的次序,则最后的数据库状态会发生变化。按照这个规定,在保证一个并发调度中的冲突操作次序不变的情况下,如果通过交换两个事务的非冲突操作,能够得到一个串行调度,则称该并发调度是冲突可串行化的。 + +例如,对于以下两个并发调度序列: + +SC1:r1(A) w1(B) r2(B) w1(C) w2(B) + +SC2:r1(B) r2(A) w1(A) w2(B) + +SC1就是冲突可串行化的,因为可以通过交换非冲突操作3和4得到一个串行调度序列。而SC2则是非冲突可串行化的,因为操作2和3是冲突操作,无法交换。 + +### 6.2.3 事务隔离级别 + +可串行化是一个很严格的正确性标准。在实际应用中,有时候可能会希望降低这个标准,通过牺牲一定的正确性,达到提高并发度的目的。为此,SQL标准将事务的隔离程度划分为四个等级,允许用户根据需要自己指定事务的隔离级。这四种隔离级包括读未提交(Read Uncommitted)、读提交(Read Committed)、可重复读(Repeatable Read)和可串行化(Serializable)。 + +1. 读未提交:在该隔离级别,事务可以看到其他未提交事务的执行结果,即允许读脏数据。 +2. 读提交:这是大多数DBMS的默认隔离级别,它要求事务只能看见已提交事务所做的修改,因此可以避免读脏数据。但是由于在某个事务的执行期间,同一个数据可能被另一个事务修改并提交,所以该事务对该数据的两次读取可能会返回不同的值,即出现不可重复读错误。 +3. 可重复读:在该隔离级别,同一事务多次读取同一数据时,总是会读到同样的值。不过理论上,该隔离级不能避免幻读,即使用相同条件多次读取时,满足读取条件的数据的数量可能有变化,比如多出一些满足条件的数据。 +4. 可串行化:这是最高的隔离级别,能够避免所有并发错误。可串行化的概念前面已经介绍过,此处不再赘述。 + +## 6.3 封锁机制 + +### 6.3.1什么是封锁 + +封锁机制是一种常用的并发控制手段,它包括三个环节:第一个环节是申请加锁,即事务在操作前对它要使用的数据提出加锁请求;第二个环节是获得锁,即当条件满足时,系统允许事务对数据加锁,使事务获得数据的控制权;第三个环节是释放锁,即完成操作后事务放弃数据的控制权。为了达到并发控制的目的,在使用时事务应选择合适的锁,并遵从一定的封锁协议。 + +基本的封锁类型有两种:排它锁(Exclusive Locks,简称X锁)和共享锁(Share Locks,简称S锁)。 + +1. 排它锁 + + 排它锁也称为独占锁或写锁。一旦事务T对数据对象A加上了排它锁(X锁),则其他任何事务不能再对A加任何类型的锁,直到T释放A上的锁为止。 + +2. 共享锁 + + 共享锁又称读锁。如果事务T对数据对象A加上了共享锁(S锁),其他事务对A就只能加S锁而不能加X锁,直到事务T释放A上的S锁为止。 + +### 6.3.2 封锁协议 + +简单地对数据加X锁和S锁并不能保证数据库的一致性。在对数据对象加锁时,还需要约定一些规则,包括何时申请锁、申请什么类型的锁、何时释放锁等,这些规则称为封锁协议。不同的规则形成了各种不同的封锁协议。封锁协议分三级,它们对并发操作带来的丢失修改、读脏和不可重复读等并发错误,可以在不同程度上予以解决。 + +1. 一级封锁协议 + + 一级封锁协议是指事务T在修改数据之前必须先对其加X锁,直到事务结束才释放。 + + 一级封锁协议可有效地防止丢失修改,并能够保证事务T的可恢复性。但是,由于一级封锁没有要求对读数据进行加锁,所以不能防止读脏和不可重复读。遵循一级封锁协议的事务可以达到读未提交的事务隔离级。 + +2. 二级封锁协议 + + 二级封锁协议是指事务T在修改数据之前必须先加X锁,直到事务结束才释放X锁;在读取数据之前必须先加S锁,读完后即可释放S锁。 + + 二级封锁协议不但能够防止丢失修改,还可进一步防止读脏。遵循二级封锁协议的事务可以达到读提交的事务隔离级。 + +3. 三级封锁协议 + + 三级封锁协议是事务T在读取数据之前必须先对其加S锁,在修改数据之前必须先对其加X锁,直到事务结束后才释放所有锁。 + + 由于三级封锁协议强调即使事务读完数据A之后也不释放S锁,从而使得别的事务无法更改数据A,所以三级封锁协议不但能够防止丢失修改和读脏,而且能够防止不可重复读。遵循三级封锁协议的事务至少可以达到可重复读的事务隔离级,至于是否能到达可串行化级别,则取决于S锁的粒度。比如,如果只对要读取的记录加锁,则无法避免幻读问题;但如果是对整个表加锁,则幻读问题可以避免,代价是并发度的下降。 + +### 6.3.3 封锁的实现 + +锁管理器可以实现为一个进程或线程,它从事务接受请求消息并反馈结果消息。对于事务的加锁请求消息,锁管理器返回授予锁消息,或者要求事务回滚的消息(发生死锁时);对于事务的解锁请求消息,只需返回一个确认消息,但可能触发锁管理器向正在等待该事务解锁的其他事务发送授予锁消息。 + +锁管理器使用以下数据结构: + +- 为目前已加锁的每个数据对象维护一个链表,链表中的每个结点代表一个加锁请求,按请求到达的顺序排序。一个加锁请求包含的信息有:提出请求的事务ID,请求的锁的类型,以及该请求是否已被授予锁。 +- 使用一个以数据对象ID为索引的散列表来查找数据对象(如果有的话),这个散列表叫做锁表。 + +图6-1是一个锁表的示例图,该表包含5个不同的数据对象14、17、123、144和1912的锁。锁表采用溢出链表示法,因此对于锁表的每一个表项都有一个数据对象的链表。每一个数据对象都有一个已授予锁或等待授予锁的事务请求列表,已授予锁的请求用深色阴影方块表示,等待授予锁的请求则用浅色阴影方块表示。 例如,事务T23在数据对象17和1912上已被授予锁,并且正在等待对数据对象14加锁。 + +![图6-1 一个锁表的示例图](images/6-1.png) + +
图6-1 一个锁表的示例图
+ +虽然图6-1没有标示出来,但对锁表还应当维护一个基于事务标识符的索引,这样它可以快速确定一个给定事务持有的锁的集合。 + +锁管理器这样处理请求: + +- 当一条加锁请求消息到达时,如果锁表中存在相应数据对象的链表,则在该链表末尾增加一个请求;否则,新建一个仅包含该请求的链表。对于当前没有加锁的数据对象,总是满足事务对其的第一次加锁请求,但当事务向已被加锁的数据对象申请加锁时,只有当该请求与当前持有的锁相容、并且所有之前的请求都已授予锁的条件下,锁管理器才为该请求授予锁,否则,该请求只能等待。 +- 当锁管理器收到一个事务的解锁消息时,它先找到对应的数据对象链表,删除其中该事务的请求,然后检查其后的请求,如果有,则看该请求能否被满足,如果能,锁管理器授权该请求,再按相同的方式处理后续的请求。 +- 如果一个事务被中止,锁管理器首先删除该事务产生的正在等待加锁的所有请求;当系统采取适当动作撤销了该事务后,该中止事务持有的所有锁也将被释放。 + +这个算法保证了锁请求无饿死现象,因为在先接收到的请求正在等待加锁时,后来的请求不可能获得授权。 + +为了避免消息传递的开销,在许多DBMS中,事务通过直接更新锁表来实现封锁,而不是向锁管理器发送请求消息。事务加锁和解锁的操作逻辑与上述锁管理器的处理方法类似,但是有两个明显的区别: + +- 由于多个事务可以同时访问锁表,因此必须确保对锁表的互斥访问。 +- 如果因为锁冲突而不能立刻获得锁,加锁事务需要知道自己何时可以被授予锁,解锁事务需要标记出那些可以被授予锁的事务并通知它们。这个功能可以通过操作系统的信号量机制来实现。 + +### 6.3.4 死锁处理 + +封锁机制有可能导致死锁,DBMS必须妥善地解决死锁问题,才能保障系统的正常运行。 + +如果事务T1和T2都需要修改数据Rl和R2,并发执行时Tl封锁了数据R1,T2封锁了数据R2;然后T1又请求封锁R2,T2又请求封锁Rl;因T2已封锁了R2,故T1等待T2释放R2上的锁。同理,因T1已封锁了R1,故T2等待T1释放R1上的锁。由于Tl和T2都没有获得全部需要的数据,所以它们不会结束,只能继续等待。这种多事务交错等待的僵持局面称为死锁。 + +一般来讲,死锁是不可避免的。DBMS的并发控制子系统一旦检测到系统中存在死锁,就要设法解除。通常采用的方法是选择一个处理死锁代价最小的事务,将其中止,释放此事务持有的所有的锁,使其他事务得以继续运行下去。当然,被中止的事务已经执行的所有数据修改操作都必须被撤销。 + +数据库中解决死锁问题主要有两类方法:一类方法是允许发生死锁,然后采用一定手段定期诊断系统中有无死锁,若有则解除之,称为死锁检测;另一类方法是采用一定措施来预防死锁的发生,称为死锁预防。 + +#### 6.3.4.1 死锁检测 + +锁管理器通过waits-for图记录事务的等待关系,如图6-2所示。其中结点代表事务,有向边代表事务在等待另一个事务解锁。当waits-for图出现环路时,就说明出现了死锁。锁管理器会定时检测waits-for图,如果发现环路,则需要选择一个合适的事务中止它。 + +![图6-2 waits-for图示例图](images/6-2.png) + +
图6-2 waits-for图示例图
+ +#### 6.3.4.2 死锁避免 + +当事务请求的锁与其他事务出现锁冲突时,系统为防止死锁,杀死其中一个事务。选择要杀死的事务时,一般持续越久的事务,保留的优先级越高。这种防患于未然的方法不需要waits-for图,但提高了事务被杀死的比率。 + +### 6.3.7 封锁粒度 + +封锁粒度是指封锁对象的大小。封锁对象可以是逻辑单元,也可以是物理单元。以关系数据库为例,封锁对象可以是属性值、属性值的集合、记录、表、直至整个数据库;也可以是一些物理单元,例如页(数据页或索引页)、块等。封锁粒度与系统的并发度及并发控制的开销密切相关。封锁的粒度越小,并发度越高,系统开销也越大;封锁的粒度越大,并发度越低,系统开销也越小。 + +如果一个DBMS能够同时支持多种封锁粒度供不同的事务选择,这种封锁方法称为多粒度封锁。选择封锁粒度时应该综合考虑封锁开销和并发度两个因素,选择适当的封锁粒度以求得最优的效果。通常,需要处理一个表中大量记录的事务可以以表为封锁粒度;需要处理多个表中大量记录的事务可以以数据库为封锁粒度;而对于只处理少量记录的事务,则以记录为封锁粒度比较合适。 + +## 6.4 故障恢复 + +故障恢复机制是在数据库发生故障时确保数据库一致性、事务原子性和持久性的技术。当崩溃发生时,内存中未提交到磁盘的所有数据都有丢失的风险。故障恢复的作用是防止崩溃后的信息丢失。 + +故障恢复机制包含两个部分: + +- 为了确保DBMS能从故障中恢复,在正常事务处理过程中需要执行的操作,如登记日志、备份数据等。 +- 发生故障后,将数据库恢复到原子性、一致性和持久性状态的操作。 + +### 6.4.1 故障分类 + +由于DBMS根据底层存储设备被划分为不同的组件,因此DBMS需要处理许多不同类型的故障。 + +1. 事务故障 + + 一个事务出现错误且必须中止,称其为事务故障。可能导致事务失败的两种错误是逻辑错误和内部状态错误。逻辑错误是指事务由于某些内部条件无法继续正常执行,如非法输入、找不到数据、溢出等;内部状态错误是指系统进入一种不良状态,使当前事务无法继续正常执行,如死锁。 + +2. 系统故障 + + 系统故障是指导致系统停止运转、需要重新启动的事件。系统故障可能由软件或硬件的问题引起。软件问题是指由于DBMS的实现问题(如未捕获的除零异常)导致系统不得不停止;硬件问题是指DBMS所在的计算机出现崩溃,如系统突然掉电、CPU故障等。发生系统故障时,内存中的数据会丢失,但外存数据不受影响。 + +3. 介质故障 + + 介质故障是指当物理存储损坏时发生的不可修复的故障,如磁盘损坏、磁头碰撞、强磁场干扰等。当存储介质失效时,DBMS必须通过备份版本进行恢复。 + + + +### 6.4.2 缓冲池管理策略 + +缓冲池管理策略是指,对于已提交和未提交的事务,它们在内存缓冲池中修改的数据页被写出到磁盘的时机。 + +对于已提交事务,存在两种策略: + +- FORCE:事务提交时必须强制将其修改的数据页写盘; +- NOFORCE:允许在事务提交后延迟执行写盘操作。 + +对于未提交事务,也存在两种策略: + +- STEAL:允许在事务提交前就将其修改的数据页写盘; +- NOSTEAL:不允许在事务提交前执行写盘操作。 + +对于恢复来说,FORCE+ NOSTEAL是最简单的策略,但是这种策略的一个缺点是要求内存能放下事务需要修改的所有数据,否则该事务将无法执行,因为DBMS不允许在事务提交之前将脏页写入磁盘。 + +从高效利用内存和降低磁盘I/O开销的角度出发,NOFORCE+ STEAL策略是最灵活的,这也是很多DBMS采用的策略。在这种策略下,一旦发生故障,恢复机制可能需要执行以下操作: + +- UNDO:发生故障时,尚未完成的事务的结果可能已写入磁盘,为保证数据一致性,需要清除这些事务对数据库的修改。 +- REDO:发生故障时,已完成事务提交的结果可能尚未写回到磁盘,故障使得这些事务对数据库的修改丢失,这也会使数据库处于不一致状态,因此应将这些事务已提交的结果重新写入磁盘。 + +为了保证在恢复时能够得到足够的信息进行UNDO和REDO,DBMS在事务正常执行期间需要登记事务对数据库所做的修改,这就是日志机制。 + +### 6.4.3 日志 + +#### 6.4.3.1 日志的原理 + +日志是由日志记录构成的文件,几乎所有DBMS都采用基于日志的恢复机制。它的基本思路是:DBMS在对磁盘页面进行修改之前,先将其对数据库所做的所有更改记录到磁盘上的日志文件中,日志文件包含足够的信息来执行必要的UNDO和REDO操作,以便在故障后恢复数据库。DBMS必须先将对数据库对象所做修改的日志记录写入日志文件,然后才能将该对象刷新到磁盘,这一过程称为WAL(Write Ahead Log)。WAL的执行过程如图6-3所示。事务开始后,所有对数据库的修改在发送到缓冲池之前都被记录在内存中的WAL缓冲区中。事务提交时,必须把WAL缓冲区刷新到磁盘。一旦WAL缓冲区被安全地写进磁盘,事务的修改结果就也可以写盘了。 + +![图6-3 WAL过程示意图](images/6-3.png) + +
图6-3 WAL过程示意图
+ +日志文件中应该记录以下信息: + +- l 事务开始时,向日志中写入一条该事务的开始记录。 +- l 事务结束时,向日志中写入一条该事务的结束记录,结束记录包括两类:正常结束记录,和异常结束记录。 +- 事务对每个数据对象的修改操作对应一条日志记录,其中包含以下信息: + - 事务ID + - 对象ID + - 修改前的值(用于UNDO) + - 修改后的值(用于REDO) + +将日志记录从日志缓冲区写入磁盘的时机有这样几个: + +- 接收到提交事务的命令后,在返回提交成功的消息之前,DBMS必须将该事务的所有日志记录写入磁盘。系统可以使用"组提交"的方式来批处理多个事务的提交,以降低I/O开销。 +- 日志缓冲区空间不足的时候,需要将缓冲区中的日子记录写入磁盘。 +- 在将一个脏数据页写入磁盘之前,与更新该页有关的所有日志记录都必须先被写入磁盘。 + +需要注意的是,登记日志时必须严格按事务的操作顺序记录,并且写到磁盘中的日志记录顺序必须与写入日志缓冲区的顺序完全一致。 + +#### 6.4.3.2 日志的类型 + +根据实现时采用的恢复方法的不同,日志中记录的内容也不一样,分为以下几类。 + +1. 物理日志:物理日志中记录的是事务对数据库中特定位置的字节级更改。例如,日志中记录的是事务对指定数据页中从指定位置开始的若干字节的修改。 +2. 逻辑日志:逻辑日志中记录的是事务执行的逻辑操作。例如,日志中记录的是事务执行的UPDATE、DELETE和INSERT语句。与物理日志相比,逻辑日志需要写的数据更少,因为每条日志记录可以在多个页面上更新多个元组。然而,当系统中存在并发事务时,通过逻辑日志实现恢复很困难。 +3. 混合日志:日志中记录的是事务对指定页面中指定槽号内元组的更改,而不是对页中指定偏移位置的更改。 + +### 6.4.4 恢复算法 + +#### 6.4.4.1 事务故障的恢复 + +事务故障是指事务在运行至正常终止点前被终止,这时恢复子系统应利用日志文件UNDO此事务己对数据库进行的修改。事务故障的恢复应由DBMS自动完成,对用户完全透明。恢复步骤如下: + +1. 反向扫描日志文件,查找该事务的更新日志记录。 +2. 对该事务的更新操作执行逆操作, 即将日志记录中 "更新前的值" 写入数据库。如果记录中是插入操作,则逆操作相当于做删除操作:若记录中是删除操作,则逆操作相当于做插入操作;若是修改操作,则逆操作相当于用修改前的值代替修改后的值。 +3. 继续反向扫描日志文件,查找该事务的其他更新日志记录并做相同处理,直至读到此事务的开始标记。 + +#### 6.4.4.2 系统故障的恢复 + +系统故障导致数据库处于不一致状态的原因,一方面是未提交事务对数据库的更新已经被写入数据库,另一方面则是已提交事务对数据库的更新没有被完全写入数据库。因此对于系统故障的恢复操作,就是要UNDO故障发生时未提交的事务,REDO已提交的事务。系统故障也是由DBMS在重启时自动完成,对用户完全透明。恢复步骤如下: + +1. 正向扫描日志文件,通过事务开始记录和COMMIT记录找出在故障发生前已提交的事务集合和未提交的事务集合。已提交的事务既有开始记录也有COMMIT记录,未提交的事务则只有开始记录,没有相应的COMMIT记录。将已提交的事务加入重做队列(REDO-LIST),未提交的事务加入撤销队列(UNDO-LIST)。 +2. 反向扫描日志文件,对UNDO-LIST中的各个事务进行UNDO处理。 +3. 正向扫描日志文件,对REDO-LIST中的各个事务进行REDO处理。 + +#### 6.4.4.3 介质故障的恢复 + +发生介质故障后,磁盘上的物理数据和日志文件被破坏,这是最严重的一种故障,恢复方法是重装数据库,然后重做已完成的事务。介质故障的恢复需要用户人工介入,由DBA装入最新的数据库备份及日志文件备份,然后执行系统提供的恢复命令。 + +DBA装入相关备份文件后,系统执行的恢复过程与系统故障的恢复过程类似,也是通过扫描日志文件构造REDO-LIST和UNDO-LIST,然后对REDO-LIST和UNDO-LIST中的事务分别进行REDO和UNDO处理,这样就可以将数据库恢复到最近一次备份时的一致性状态。 + +### 6.4.5 检查点 + +以上讨论的基于日志的恢复算法存在两个问题:1. 构造REDO-LIST和UNDO-LIST需要搜索整个日志文件,耗费大量的时间;2.处理REDO-LIST时,很多事务的修改实际上已经写入了磁盘,但是仍然不得不进行REDO处理,浪费大量时间。为了解决上述问题,提高恢复效率,很多DBMS都采用了检查点技术,通过周期性地对日志做检查点来避免故障恢复时检查整个日志。 + +检查点技术的基本思路是:在日志文件中增加一类记录——检查点记录,并增加一个文件——重新开始文件。恢复子系统周期性地执行以下操作: + +1. 将日志缓冲区中的日志记录全部写入磁盘中的日志文件; +2. 在日志文件中写入一个检查点记录; +3. 将数据缓冲区中的数据写入磁盘; +4. 将检查点记录在日志文件中的地址写入重新开始文件。 + +其中,检查点记录中包含以下信息: + +- 检查点时刻,当前所有正在执行的事务清单 +- 清单中每个事务最近一个日志记录的地址 + +![图6-4 带检查点的日志文件和重新开始文件](images/6-4.png) + +
图6-4 带检查点的日志文件和重新开始文件
+ +由检查点时刻系统执行的操作可知,如果一个事务在一个检查点之前已经提交了,那么它对数据库所做的修改一定都被写入了磁盘,因此在进行恢复处理时,就没有必要再对该事务执行REDO操作了。 + +增加了检查点之后,基于日志的恢复步骤如下: + +1. 从重新开始文件中找到最后一个检查点记录在日志文件中的地址,根据该地址在日志文件中找到最后一个检查点记录。 +2. 由该检查点记录得到检查点时刻正在执行的事务清单ACTIVE-LIST。初始化两个事务队列UNDO-LIST和REDO-LIST,令UNDO-LIST = ACTIVE-LIST,令REDO队列为空。 +3. 从检查点开始正向扫描日志文件直到日志文件结束,如有新开始的事务,则将其放入UNDO-LIST,如有提交的事务,则将其从UNDO-LIST队列移到REDO-LIST队列。 +4. 对UNDO-LIST和REDO-LIST中的每个事务,分别执行UNDO和REDO操作。 + diff --git a/docs/lectures/references.md b/docs/lectures/references.md new file mode 100644 index 0000000000000000000000000000000000000000..6c9754463100119d906cb78c93491898527b5194 --- /dev/null +++ b/docs/lectures/references.md @@ -0,0 +1,8 @@ +# 参考资料 + +1. 王珊, 萨师煊. 数据库系统概论(第5版). 北京: 高等教育出版社, 2014 +2. Hector Garcia-Mlina, Jeffrey D. Ullman, Jennifer Widom. 杨冬青 等译. 数据库系统实现. 北京: 机械工业出版社, 2010 +3. [Abraham](http://search.dangdang.com/?key2=Abraham&medium=01&category_path=01.00.00.00.00.00) [Silberschatz](http://search.dangdang.com/?key2=Silberschatz&medium=01&category_path=01.00.00.00.00.00), [Henry](http://search.dangdang.com/?key2=Henry&medium=01&category_path=01.00.00.00.00.00) [F.Korth](http://search.dangdang.com/?key2=F.Korth&medium=01&category_path=01.00.00.00.00.00), S. Sudarshan. 杨冬青 等译. 数据库系统概念(第6版). 北京: 机械工业出版社, 2012 +4. 李海翔. 数据库查询优化器的艺术原理解析与SQL性能优化. 北京: 机械工业出版社, 2014 +5. [https://15445.courses.cs.cmu.edu/fall2020/schedule.html](https://15445.courses.cs.cmu.edu/fall2020/schedule.html) + diff --git a/src/obclient/client.cpp b/src/obclient/client.cpp index aaffa43c70f0a65c997310922793b729b097249d..b6372eac9f3dc0a117113259dbc33a1963df3606 100644 --- a/src/obclient/client.cpp +++ b/src/obclient/client.cpp @@ -66,7 +66,8 @@ bool is_exit_command(const char *cmd) { 0 == strncasecmp("bye", cmd, 3); } -int init_unix_sock(const char *unix_sock_path) { +int init_unix_sock(const char *unix_sock_path) +{ int sockfd = socket(PF_UNIX, SOCK_STREAM, 0); if (sockfd < 0) { fprintf(stderr, "failed to create unix socket. %s", strerror(errno)); @@ -79,15 +80,15 @@ int init_unix_sock(const char *unix_sock_path) { snprintf(sockaddr.sun_path, sizeof(sockaddr.sun_path), "%s", unix_sock_path); if (connect(sockfd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) { - fprintf(stderr, "failed to connect to server. unix socket path '%s'. error %s", - sockaddr.sun_path, strerror(errno)); + fprintf(stderr, "failed to connect to server. unix socket path '%s'. error %s", sockaddr.sun_path, strerror(errno)); close(sockfd); return -1; } return sockfd; } -int init_tcp_sock(const char *server_host, int server_port) { +int init_tcp_sock(const char *server_host, int server_port) +{ struct hostent *host; struct sockaddr_in serv_addr; @@ -107,8 +108,7 @@ int init_tcp_sock(const char *server_host, int server_port) { serv_addr.sin_addr = *((struct in_addr *)host->h_addr); bzero(&(serv_addr.sin_zero), 8); - if (connect(sockfd, (struct sockaddr *)&serv_addr, sizeof(struct sockaddr)) == - -1) { + if (connect(sockfd, (struct sockaddr *)&serv_addr, sizeof(struct sockaddr)) == -1) { fprintf(stderr, "Failed to connect. errmsg=%d:%s\n", errno, strerror(errno)); close(sockfd); return -1; @@ -116,7 +116,8 @@ int init_tcp_sock(const char *server_host, int server_port) { return sockfd; } -int main(int argc, char *argv[]) { +int main(int argc, char *argv[]) +{ const char *unix_socket_path = nullptr; const char *server_host = "127.0.0.1"; int server_port = PORT_DEFAULT; @@ -124,15 +125,15 @@ int main(int argc, char *argv[]) { extern char *optarg; while ((opt = getopt(argc, argv, "s:h:p:")) > 0) { switch (opt) { - case 's': - unix_socket_path = optarg; - break; - case 'p': - server_port = atoi(optarg); - break; - case 'h': - server_host = optarg; - break; + case 's': + unix_socket_path = optarg; + break; + case 'p': + server_port = atoi(optarg); + break; + case 'h': + server_host = optarg; + break; } } @@ -171,13 +172,13 @@ int main(int argc, char *argv[]) { memset(send_buf, 0, sizeof(send_buf)); int len = 0; - while((len = recv(sockfd, send_buf, MAX_MEM_BUFFER_SIZE, 0)) > 0){ + while ((len = recv(sockfd, send_buf, MAX_MEM_BUFFER_SIZE, 0)) > 0) { bool msg_end = false; for (int i = 0; i < len; i++) { if (0 == send_buf[i]) { msg_end = true; break; - } + } printf("%c", send_buf[i]); } if (msg_end) { diff --git a/src/observer/event/execution_plan_event.cpp b/src/observer/event/execution_plan_event.cpp index d6197718cb51a80f4e505b9ac9b8e61b10d55f0b..e0c8a7d2040351082d625a4d6ce8c47011960c2b 100644 --- a/src/observer/event/execution_plan_event.cpp +++ b/src/observer/event/execution_plan_event.cpp @@ -15,9 +15,10 @@ See the Mulan PSL v2 for more details. */ #include "event/execution_plan_event.h" #include "event/sql_event.h" -ExecutionPlanEvent::ExecutionPlanEvent(SQLStageEvent *sql_event, Query *sqls) : sql_event_(sql_event), sqls_(sqls) { -} -ExecutionPlanEvent::~ExecutionPlanEvent() { +ExecutionPlanEvent::ExecutionPlanEvent(SQLStageEvent *sql_event, Query *sqls) : sql_event_(sql_event), sqls_(sqls) +{} +ExecutionPlanEvent::~ExecutionPlanEvent() +{ sql_event_ = nullptr; // if (sql_event_) { // sql_event_->doneImmediate(); @@ -26,4 +27,3 @@ ExecutionPlanEvent::~ExecutionPlanEvent() { query_destroy(sqls_); sqls_ = nullptr; } - diff --git a/src/observer/event/execution_plan_event.h b/src/observer/event/execution_plan_event.h index 764a91681e805af25a67a1cf4035f7636dc72d26..67b33d7d9f359869d9edbe18ff999bfdd49c85d1 100644 --- a/src/observer/event/execution_plan_event.h +++ b/src/observer/event/execution_plan_event.h @@ -25,16 +25,19 @@ public: ExecutionPlanEvent(SQLStageEvent *sql_event, Query *sqls); virtual ~ExecutionPlanEvent(); - Query * sqls() const { + Query *sqls() const + { return sqls_; } - SQLStageEvent * sql_event() const { + SQLStageEvent *sql_event() const + { return sql_event_; } + private: - SQLStageEvent * sql_event_; - Query * sqls_; + SQLStageEvent *sql_event_; + Query *sqls_; }; -#endif // __OBSERVER_EVENT_EXECUTION_PLAN_EVENT_H__ \ No newline at end of file +#endif // __OBSERVER_EVENT_EXECUTION_PLAN_EVENT_H__ \ No newline at end of file diff --git a/src/observer/event/session_event.cpp b/src/observer/event/session_event.cpp index f58a1b4a9cff45ef740e5733486dd476147d91dc..14aef424dbf78a847b8fb122ce9589018afe8f47 100644 --- a/src/observer/event/session_event.cpp +++ b/src/observer/event/session_event.cpp @@ -14,33 +14,48 @@ See the Mulan PSL v2 for more details. */ #include "session_event.h" -SessionEvent::SessionEvent(ConnectionContext *client) : client_(client) { -} +SessionEvent::SessionEvent(ConnectionContext *client) : client_(client) +{} -SessionEvent::~SessionEvent() { +SessionEvent::~SessionEvent() +{} +ConnectionContext *SessionEvent::get_client() const +{ + return client_; } -ConnectionContext *SessionEvent::get_client() const { return client_; } - -const char *SessionEvent::get_response() const { - return response_.c_str(); +const char *SessionEvent::get_response() const +{ + return response_.c_str(); } -void SessionEvent::set_response(const char *response) { +void SessionEvent::set_response(const char *response) +{ set_response(response, strlen(response)); } -void SessionEvent::set_response(const char *response, int len) { +void SessionEvent::set_response(const char *response, int len) +{ response_.assign(response, len); } -void SessionEvent::set_response(std::string &&response) { +void SessionEvent::set_response(std::string &&response) +{ response_ = std::move(response); } -int SessionEvent::get_response_len() const { return response_.size(); } +int SessionEvent::get_response_len() const +{ + return response_.size(); +} -char *SessionEvent::get_request_buf() { return client_->buf; } +char *SessionEvent::get_request_buf() +{ + return client_->buf; +} -int SessionEvent::get_request_buf_len() { return SOCKET_BUFFER_SIZE; } \ No newline at end of file +int SessionEvent::get_request_buf_len() +{ + return SOCKET_BUFFER_SIZE; +} \ No newline at end of file diff --git a/src/observer/event/session_event.h b/src/observer/event/session_event.h index c40639844f5bb41c48687337f775f06dde3c827d..67e9b757306113b3f0705a61d86e4e34e56652fc 100644 --- a/src/observer/event/session_event.h +++ b/src/observer/event/session_event.h @@ -42,4 +42,4 @@ private: std::string response_; }; -#endif //__OBSERVER_SESSION_SESSIONEVENT_H__ +#endif //__OBSERVER_SESSION_SESSIONEVENT_H__ diff --git a/src/observer/event/sql_event.cpp b/src/observer/event/sql_event.cpp index 38d479b2c5a1d9eb53b3020847752a677f2f9818..d157970cce9a7e7cd456042000e31e29b06bb260 100644 --- a/src/observer/event/sql_event.cpp +++ b/src/observer/event/sql_event.cpp @@ -15,11 +15,11 @@ See the Mulan PSL v2 for more details. */ #include "event/sql_event.h" #include "event/session_event.h" -SQLStageEvent::SQLStageEvent(SessionEvent *event, std::string &sql) : - session_event_(event), sql_(sql) { -} +SQLStageEvent::SQLStageEvent(SessionEvent *event, std::string &sql) : session_event_(event), sql_(sql) +{} -SQLStageEvent::~SQLStageEvent() noexcept { +SQLStageEvent::~SQLStageEvent() noexcept +{ if (session_event_ != nullptr) { session_event_ = nullptr; // SessionEvent *session_event = session_event_; diff --git a/src/observer/event/sql_event.h b/src/observer/event/sql_event.h index a9d1cc6657716834e1368aa35b03f8179feed2b5..9f2090ae4972920363d7cbca39223f79930a8b2c 100644 --- a/src/observer/event/sql_event.h +++ b/src/observer/event/sql_event.h @@ -25,17 +25,20 @@ public: SQLStageEvent(SessionEvent *event, std::string &sql); virtual ~SQLStageEvent() noexcept; - const std::string &get_sql() const { + const std::string &get_sql() const + { return sql_; } - SessionEvent * session_event() const { + SessionEvent *session_event() const + { return session_event_; } + private: SessionEvent *session_event_; - std::string & sql_; + std::string &sql_; // void *context_; }; -#endif //__SRC_OBSERVER_SQL_EVENT_SQLEVENT_H__ +#endif //__SRC_OBSERVER_SQL_EVENT_SQLEVENT_H__ diff --git a/src/observer/event/storage_event.cpp b/src/observer/event/storage_event.cpp index 85c65b9ab0cd67f29f4cab687fcae15db187d0c2..605c4d735448f944012f40c11f6395acfc825fa9 100644 --- a/src/observer/event/storage_event.cpp +++ b/src/observer/event/storage_event.cpp @@ -15,10 +15,11 @@ See the Mulan PSL v2 for more details. */ #include "event/storage_event.h" #include "event/execution_plan_event.h" -StorageEvent::StorageEvent(ExecutionPlanEvent *exe_event) - : exe_event_(exe_event) {} +StorageEvent::StorageEvent(ExecutionPlanEvent *exe_event) : exe_event_(exe_event) +{} -StorageEvent::~StorageEvent() { +StorageEvent::~StorageEvent() +{ exe_event_ = nullptr; // if (exe_event_ != nullptr) { // ExecutionPlanEvent *exe_event = exe_event_; diff --git a/src/observer/event/storage_event.h b/src/observer/event/storage_event.h index c6d2fd51cc1316e1ce738079d81d5cd53a5f9c87..53564f9b6210bf42409f8a6bec0ee25e83067466 100644 --- a/src/observer/event/storage_event.h +++ b/src/observer/event/storage_event.h @@ -24,11 +24,13 @@ public: StorageEvent(ExecutionPlanEvent *exe_event); virtual ~StorageEvent(); - ExecutionPlanEvent * exe_event() const { + ExecutionPlanEvent *exe_event() const + { return exe_event_; } + private: ExecutionPlanEvent *exe_event_; }; -#endif //__OBSERVER_SQL_EVENT_STORAGEEVENT_H__ +#endif //__OBSERVER_SQL_EVENT_STORAGEEVENT_H__ diff --git a/src/observer/handler/handler.h b/src/observer/handler/handler.h index fd54a6a344c6e03553984f6249c50d9ac3d8e668..edab0d215938108090f88b291fca356914781ae0 100644 --- a/src/observer/handler/handler.h +++ b/src/observer/handler/handler.h @@ -9,7 +9,8 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/14. +// Created by Meiyi +// Rewritten by Longda on 2021/4/14. // #ifndef __OBSERVER_HANDLER_HANDLER_H__ @@ -28,17 +29,17 @@ See the Mulan PSL v2 for more details. */ //属性结构体 typedef struct { - char *relName; // relation name (may be NULL) 表名 - char *attrName; // attribute name 属性名 + char *relName; // relation name (may be NULL) 表名 + char *attrName; // attribute name 属性名 } RelAttr; typedef enum { - EQual, //"=" 0 - LEqual, //"<=" 1 - NEqual, //"<>" 2 - LessT, //"<" 3 - GEqual, //">=" 4 - GreatT, //">" 5 + EQual, //"=" 0 + LEqual, //"<=" 1 + NEqual, //"<>" 2 + LessT, //"<" 3 + GEqual, //">=" 4 + GreatT, //">" 5 NO_OP } CompOp; @@ -47,85 +48,85 @@ typedef enum { chars, ints, floats } AttrType; //属性值 typedef struct _Value Value; struct _Value { - AttrType type; // type of value - void *data; // value + AttrType type; // type of value + void *data; // value }; typedef struct _Condition { - int bLhsIsAttr; // TRUE if left-hand side is an attribute - // 1时,操作符右边是属性,0时,是属性值 - Value lhsValue; // left-hand side value if bLhsIsAttr = FALSE - RelAttr lhsAttr; // left-hand side attribute - CompOp op; // comparison operator - int bRhsIsAttr; // TRUE if right-hand side is an attribute - // 1时,操作符右边是属性,0时,是属性值 + int bLhsIsAttr; // TRUE if left-hand side is an attribute + // 1时,操作符右边是属性,0时,是属性值 + Value lhsValue; // left-hand side value if bLhsIsAttr = FALSE + RelAttr lhsAttr; // left-hand side attribute + CompOp op; // comparison operator + int bRhsIsAttr; // TRUE if right-hand side is an attribute + // 1时,操作符右边是属性,0时,是属性值 // and not a value - RelAttr rhsAttr; // right-hand side attribute if bRhsIsAttr = TRUE 右边的属性 - Value rhsValue; // right-hand side value if bRhsIsAttr = FALSE -}Condition; + RelAttr rhsAttr; // right-hand side attribute if bRhsIsAttr = TRUE 右边的属性 + Value rhsValue; // right-hand side value if bRhsIsAttr = FALSE +} Condition; // struct of select typedef struct { - int nSelAttrs; // Length of attrs in Select clause - RelAttr *selAttrs[MAX_NUM]; // attrs in Select clause - int nRelations; // Length of relations in Fro clause - char *relations[MAX_NUM]; // relations in From clause - int nConditions; // Length of conditions in Where clause - Condition conditions[MAX_NUM]; // conditions in Where clause + int nSelAttrs; // Length of attrs in Select clause + RelAttr *selAttrs[MAX_NUM]; // attrs in Select clause + int nRelations; // Length of relations in Fro clause + char *relations[MAX_NUM]; // relations in From clause + int nConditions; // Length of conditions in Where clause + Condition conditions[MAX_NUM]; // conditions in Where clause } Selects; // struct of insert typedef struct { - char *relName; // Relation to insert into - int nValues; // Length of values - Value values[MAX_NUM]; // values to insert + char *relName; // Relation to insert into + int nValues; // Length of values + Value values[MAX_NUM]; // values to insert } Inserts; // struct of delete typedef struct { - char *relName; // Relation to delete from - int nConditions; // Length of conditions in Where clause - Condition conditions[MAX_NUM]; // conditions in Where clause + char *relName; // Relation to delete from + int nConditions; // Length of conditions in Where clause + Condition conditions[MAX_NUM]; // conditions in Where clause } Deletes; // struct of update typedef struct { - char *relName; // Relation to update - char *attrName; // Attribute to update - Value value; // update value - int nConditions; // Length of conditions in Where clause - Condition conditions[MAX_NUM]; // conditions in Where clause + char *relName; // Relation to update + char *attrName; // Attribute to update + Value value; // update value + int nConditions; // Length of conditions in Where clause + Condition conditions[MAX_NUM]; // conditions in Where clause } Updates; // struct of AttrInfo typedef struct _AttrInfo AttrInfo; struct _AttrInfo { - char *attrName; // Attribute name - AttrType attrType; // Type of attribute - int attrLength; // Length of attribute + char *attrName; // Attribute name + AttrType attrType; // Type of attribute + int attrLength; // Length of attribute }; // struct of craete_table typedef struct { - char *relName; // Relation name - int attrCount; // Length of attribute - AttrInfo attributes[MAX_NUM]; // attributes + char *relName; // Relation name + int attrCount; // Length of attribute + AttrInfo attributes[MAX_NUM]; // attributes } CreateTable; // struct of drop_table typedef struct { - char *relName; // Relation name + char *relName; // Relation name } DropTable; // struct of create_index typedef struct { - char *indexName; // Index name - char *relName; // Relation name - char *attrName; // Attribute name + char *indexName; // Index name + char *relName; // Relation name + char *attrName; // Attribute name } CreateIndex; // struct of drop_index typedef struct { - char *indexName; // Index name + char *indexName; // Index name } DropIndex; @@ -269,7 +270,6 @@ RC deleteRecord(char *relName, int nConditions, Condition *conditions); * @param conditions * @return */ -RC updateRecord(char *relName, char *attrName, Value *value, int nConditions, - Condition *conditions); +RC updateRecord(char *relName, char *attrName, Value *value, int nConditions, Condition *conditions); -#endif //__OBSERVER_HANDLER_HANDLER_H__ +#endif //__OBSERVER_HANDLER_HANDLER_H__ diff --git a/src/observer/ini_setting.h b/src/observer/ini_setting.h index 77cb8144163ca1e7f3b66308f44e05227c116b67..ebbfe8d7975a59a708e5af92e243ff8e40b7489a 100644 --- a/src/observer/ini_setting.h +++ b/src/observer/ini_setting.h @@ -26,4 +26,4 @@ See the Mulan PSL v2 for more details. */ #define SOCKET_BUFFER_SIZE 8192 #define SESSION_STAGE_NAME "SessionStage" -#endif //__SRC_OBSERVER_INI_SETTING_H__ +#endif //__SRC_OBSERVER_INI_SETTING_H__ diff --git a/src/observer/init.cpp b/src/observer/init.cpp index a4c6f59bb91cc3982dc522c8efc383949ad7d056..788e8f59cb60864676e726a432150d52f0b4c954 100644 --- a/src/observer/init.cpp +++ b/src/observer/init.cpp @@ -12,7 +12,6 @@ See the Mulan PSL v2 for more details. */ // Created by Longda on 2021/5/3. // - #include "init.h" #include "ini_setting.h" @@ -40,20 +39,26 @@ See the Mulan PSL v2 for more details. */ using namespace common; -bool *&_get_init() { +bool *&_get_init() +{ static bool util_init = false; static bool *util_init_p = &util_init; return util_init_p; } -bool get_init() { return *_get_init(); } +bool get_init() +{ + return *_get_init(); +} -void set_init(bool value) { +void set_init(bool value) +{ *_get_init() = value; return; } -void sig_handler(int sig) { +void sig_handler(int sig) +{ // Signal handler will be add in the next step. // Add action to shutdown @@ -62,7 +67,8 @@ void sig_handler(int sig) { return; } -int init_log(ProcessParam *process_cfg, Ini &properties) { +int init_log(ProcessParam *process_cfg, Ini &properties) +{ const std::string &proc_name = process_cfg->get_process_name(); try { // we had better alloc one lock to do so, but simplify the logic @@ -71,8 +77,7 @@ int init_log(ProcessParam *process_cfg, Ini &properties) { } const std::string log_section_name = "LOG"; - std::map log_section = - properties.get(log_section_name); + std::map log_section = properties.get(log_section_name); std::string log_file_name; @@ -81,8 +86,7 @@ int init_log(ProcessParam *process_cfg, Ini &properties) { std::map::iterator it = log_section.find(key); if (it == log_section.end()) { log_file_name = proc_name + ".log"; - std::cout << "Not set log file name, use default " << log_file_name - << std::endl; + std::cout << "Not set log file name, use default " << log_file_name << std::endl; } else { log_file_name = it->second; } @@ -121,15 +125,15 @@ int init_log(ProcessParam *process_cfg, Ini &properties) { return 0; } catch (std::exception &e) { - std::cerr << "Failed to init log for " << proc_name << SYS_OUTPUT_FILE_POS - << SYS_OUTPUT_ERROR << std::endl; + std::cerr << "Failed to init log for " << proc_name << SYS_OUTPUT_FILE_POS << SYS_OUTPUT_ERROR << std::endl; return errno; } return 0; } -void cleanup_log() { +void cleanup_log() +{ if (g_log) { delete g_log; @@ -138,27 +142,22 @@ void cleanup_log() { return; } -int prepare_init_seda() { - static StageFactory session_stage_factory("SessionStage", - &SessionStage::make_stage); - static StageFactory resolve_stage_factory("ResolveStage", - &ResolveStage::make_stage); - static StageFactory query_cache_stage_factory("QueryCacheStage", - &QueryCacheStage::make_stage); +int prepare_init_seda() +{ + static StageFactory session_stage_factory("SessionStage", &SessionStage::make_stage); + static StageFactory resolve_stage_factory("ResolveStage", &ResolveStage::make_stage); + static StageFactory query_cache_stage_factory("QueryCacheStage", &QueryCacheStage::make_stage); static StageFactory parse_stage_factory("ParseStage", &ParseStage::make_stage); - static StageFactory plan_cache_factory("PlanCacheStage", - &PlanCacheStage::make_stage); - static StageFactory optimize_factory("OptimizeStage", - &OptimizeStage::make_stage); + static StageFactory plan_cache_factory("PlanCacheStage", &PlanCacheStage::make_stage); + static StageFactory optimize_factory("OptimizeStage", &OptimizeStage::make_stage); static StageFactory execute_factory("ExecuteStage", &ExecuteStage::make_stage); - static StageFactory default_storage_factory("DefaultStorageStage", - &DefaultStorageStage::make_stage); - static StageFactory mem_storage_factory("MemStorageStage", - &MemStorageStage::make_stage); + static StageFactory default_storage_factory("DefaultStorageStage", &DefaultStorageStage::make_stage); + static StageFactory mem_storage_factory("MemStorageStage", &MemStorageStage::make_stage); return 0; } -int init(ProcessParam *process_param) { +int init(ProcessParam *process_param) +{ if (get_init()) { @@ -170,11 +169,9 @@ int init(ProcessParam *process_param) { // Run as daemon if daemonization requested int rc = STATUS_SUCCESS; if (process_param->is_demon()) { - rc = daemonize_service(process_param->get_std_out().c_str(), - process_param->get_std_err().c_str()); + rc = daemonize_service(process_param->get_std_out().c_str(), process_param->get_std_err().c_str()); if (rc != 0) { - std::cerr << "Shutdown due to failed to daemon current process!" - << std::endl; + std::cerr << "Shutdown due to failed to daemon current process!" << std::endl; return rc; } } @@ -230,7 +227,8 @@ int init(ProcessParam *process_param) { return STATUS_SUCCESS; } -void cleanup_util() { +void cleanup_util() +{ if (nullptr != get_properties()) { delete get_properties(); @@ -246,4 +244,5 @@ void cleanup_util() { return; } -void cleanup() {} +void cleanup() +{} diff --git a/src/observer/init.h b/src/observer/init.h index f78e98a9dce66e4f0e5b450acee2bdffce88e2cd..553f8c5ed459414e9183d95dfe586f48d7921ec9 100644 --- a/src/observer/init.h +++ b/src/observer/init.h @@ -21,4 +21,4 @@ See the Mulan PSL v2 for more details. */ int init(common::ProcessParam *processParam); void cleanup(); -#endif //__OBSERVER_INIT_H__ +#endif //__OBSERVER_INIT_H__ diff --git a/src/observer/main.cpp b/src/observer/main.cpp index 51379b3a8b29f7fac8cb6a2f2ef8231406c29a07..4fff6dabafcb2e033e0149b145f6b2de212730c0 100644 --- a/src/observer/main.cpp +++ b/src/observer/main.cpp @@ -15,7 +15,6 @@ See the Mulan PSL v2 for more details. */ * Author: Longda Feng */ - #include #include #include @@ -32,7 +31,8 @@ using namespace common; static Server *g_server = nullptr; -void usage() { +void usage() +{ std::cout << "Useage " << std::endl; std::cout << "-p: server port. if not specified, the item in the config file will be used" << std::endl; std::cout << "-f: path of config file." << std::endl; @@ -40,7 +40,8 @@ void usage() { exit(0); } -void parse_parameter(int argc, char **argv) { +void parse_parameter(int argc, char **argv) +{ std::string process_name = get_process_name(argv[0]); ProcessParam *process_param = the_process_param(); @@ -52,35 +53,35 @@ void parse_parameter(int argc, char **argv) { extern char *optarg; while ((opt = getopt(argc, argv, "dp:s:f:o:e:h")) > 0) { switch (opt) { - case 's': - process_param->set_unix_socket_path(optarg); - break; - case 'p': - process_param->set_server_port(atoi(optarg)); - break; - case 'f': - process_param->set_conf(optarg); - break; - case 'o': - process_param->set_std_out(optarg); - break; - case 'e': - process_param->set_std_err(optarg); - break; - case 'd': - process_param->set_demon(true); - break; - case 'h': - default: - usage(); - return; + case 's': + process_param->set_unix_socket_path(optarg); + break; + case 'p': + process_param->set_server_port(atoi(optarg)); + break; + case 'f': + process_param->set_conf(optarg); + break; + case 'o': + process_param->set_std_out(optarg); + break; + case 'e': + process_param->set_std_err(optarg); + break; + case 'd': + process_param->set_demon(true); + break; + case 'h': + default: + usage(); + return; } } } -Server *init_server() { - std::map net_section = - get_properties()->get(NET); +Server *init_server() +{ + std::map net_section = get_properties()->get(NET); ProcessParam *process_param = the_process_param(); @@ -130,7 +131,8 @@ Server *init_server() { * 那么直接在signal_handler里面处理的话,可能会导致死锁 * 所以这里单独创建一个线程 */ -void *quit_thread_func(void *_signum) { +void *quit_thread_func(void *_signum) +{ intptr_t signum = (intptr_t)_signum; LOG_INFO("Receive signal: %ld", signum); if (g_server) { @@ -140,12 +142,14 @@ void *quit_thread_func(void *_signum) { } return nullptr; } -void quit_signal_handle(int signum) { +void quit_signal_handle(int signum) +{ pthread_t tid; pthread_create(&tid, nullptr, quit_thread_func, (void *)(intptr_t)signum); } -int main(int argc, char **argv) { +int main(int argc, char **argv) +{ setSignalHandler(quit_signal_handle); parse_parameter(argc, argv); diff --git a/src/observer/net/connection_context.h b/src/observer/net/connection_context.h index 03e62e8fda9ca35a71b81734680317688813a51f..1f83afa64b23c9af061d95fd1b33208262611c58 100644 --- a/src/observer/net/connection_context.h +++ b/src/observer/net/connection_context.h @@ -29,4 +29,4 @@ typedef struct _ConnectionContext { char buf[SOCKET_BUFFER_SIZE]; } ConnectionContext; -#endif //__SRC_OBSERVER_NET_CONNECTION_CONTEXT_H__ +#endif //__SRC_OBSERVER_NET_CONNECTION_CONTEXT_H__ diff --git a/src/observer/net/server.cpp b/src/observer/net/server.cpp index 8497a743f56b4b62f40a8fcadb5e284eea0773f4..e87ad0df02f8af3daad8f90945cbc427d33547a6 100644 --- a/src/observer/net/server.cpp +++ b/src/observer/net/server.cpp @@ -43,26 +43,30 @@ Stage *Server::session_stage_ = nullptr; common::SimpleTimer *Server::read_socket_metric_ = nullptr; common::SimpleTimer *Server::write_socket_metric_ = nullptr; -ServerParam::ServerParam() { +ServerParam::ServerParam() +{ listen_addr = INADDR_ANY; max_connection_num = MAX_CONNECTION_NUM_DEFAULT; port = PORT_DEFAULT; } -Server::Server(ServerParam input_server_param) : server_param_(input_server_param) { +Server::Server(ServerParam input_server_param) : server_param_(input_server_param) +{ started_ = false; server_socket_ = 0; event_base_ = nullptr; listen_ev_ = nullptr; } -Server::~Server() { +Server::~Server() +{ if (started_) { shutdown(); } } -void Server::init(){ +void Server::init() +{ session_stage_ = get_seda_config()->get_stage(SESSION_STAGE_NAME); MetricsRegistry &metricsRegistry = get_metrics_registry(); @@ -74,10 +78,11 @@ void Server::init(){ if (Server::write_socket_metric_ == nullptr) { Server::write_socket_metric_ = new SimpleTimer(); metricsRegistry.register_metric(WRITE_SOCKET_METRIC_TAG, Server::write_socket_metric_); - } + } } -int Server::set_non_block(int fd) { +int Server::set_non_block(int fd) +{ int flags = fcntl(fd, F_GETFL); if (flags == -1) { @@ -93,7 +98,8 @@ int Server::set_non_block(int fd) { return 0; } -void Server::close_connection(ConnectionContext *client_context) { +void Server::close_connection(ConnectionContext *client_context) +{ LOG_INFO("Close connection of %s.", client_context->addr); event_del(&client_context->read_event); ::close(client_context->fd); @@ -102,9 +108,10 @@ void Server::close_connection(ConnectionContext *client_context) { delete client_context; } -void Server::recv(int fd, short ev, void *arg) { +void Server::recv(int fd, short ev, void *arg) +{ ConnectionContext *client = (ConnectionContext *)arg; - //Server::send(sev->getClient(), sev->getRequestBuf(), strlen(sev->getRequestBuf())); + // Server::send(sev->getClient(), sev->getRequestBuf(), strlen(sev->getRequestBuf())); int data_len = 0; int read_len = 0; @@ -113,7 +120,7 @@ void Server::recv(int fd, short ev, void *arg) { TimerStat timer_stat(*read_socket_metric_); MUTEX_LOCK(&client->mutex); - // 持续接收消息,直到遇到'\0'。将'\0'遇到的后续数据直接丢弃没有处理,因为目前仅支持一收一发的模式 + // 持续接收消息,直到遇到'\0'。将'\0'遇到的后续数据直接丢弃没有处理,因为目前仅支持一收一发的模式 while (true) { read_len = ::read(client->fd, client->buf + data_len, buf_size - data_len); if (read_len < 0) { @@ -131,9 +138,8 @@ void Server::recv(int fd, short ev, void *arg) { break; } - bool msg_end = false; - for(int i = 0; i < read_len; i++) { + for (int i = 0; i < read_len; i++) { if (client->buf[data_len + i] == 0) { data_len += i + 1; msg_end = true; @@ -151,7 +157,7 @@ void Server::recv(int fd, short ev, void *arg) { MUTEX_UNLOCK(&client->mutex); timer_stat.end(); - if(data_len > buf_size) { + if (data_len > buf_size) { LOG_WARN("The length of sql exceeds the limitation %d\n", buf_size); close_connection(client); return; @@ -161,8 +167,7 @@ void Server::recv(int fd, short ev, void *arg) { close_connection(client); return; } else if (read_len < 0) { - LOG_ERROR("Failed to read socket of %s, %s\n", client->addr, - strerror(errno)); + LOG_ERROR("Failed to read socket of %s, %s\n", client->addr, strerror(errno)); close_connection(client); return; } @@ -173,7 +178,8 @@ void Server::recv(int fd, short ev, void *arg) { } // 这个函数仅负责发送数据,至于是否是一个完整的消息,由调用者控制 -int Server::send(ConnectionContext *client, const char *buf, int data_len) { +int Server::send(ConnectionContext *client, const char *buf, int data_len) +{ if (buf == nullptr || data_len == 0) { return 0; } @@ -201,7 +207,8 @@ int Server::send(ConnectionContext *client, const char *buf, int data_len) { return 0; } -void Server::accept(int fd, short ev, void *arg) { +void Server::accept(int fd, short ev, void *arg) +{ Server *instance = (Server *)arg; struct sockaddr_in addr; socklen_t addrlen = sizeof(addr); @@ -226,8 +233,7 @@ void Server::accept(int fd, short ev, void *arg) { ret = instance->set_non_block(client_fd); if (ret < 0) { - LOG_ERROR("Failed to set socket of %s as non blocking, %s", addr_str.c_str(), - strerror(errno)); + LOG_ERROR("Failed to set socket of %s as non blocking, %s", addr_str.c_str(), strerror(errno)); ::close(client_fd); return; } @@ -237,8 +243,7 @@ void Server::accept(int fd, short ev, void *arg) { int yes = 1; ret = setsockopt(client_fd, IPPROTO_TCP, TCP_NODELAY, &yes, sizeof(yes)); if (ret < 0) { - LOG_ERROR("Failed to set socket of %s option as : TCP_NODELAY %s\n", - addr_str.c_str(), strerror(errno)); + LOG_ERROR("Failed to set socket of %s option as : TCP_NODELAY %s\n", addr_str.c_str(), strerror(errno)); ::close(client_fd); return; } @@ -250,14 +255,12 @@ void Server::accept(int fd, short ev, void *arg) { snprintf(client_context->addr, sizeof(client_context->addr), "%s", addr_str.c_str()); pthread_mutex_init(&client_context->mutex, nullptr); - event_set(&client_context->read_event, client_context->fd, EV_READ | EV_PERSIST, - recv, client_context); + event_set(&client_context->read_event, client_context->fd, EV_READ | EV_PERSIST, recv, client_context); ret = event_base_set(instance->event_base_, &client_context->read_event); if (ret < 0) { LOG_ERROR( - "Failed to do event_base_set for read event of %s into libevent, %s", - client_context->addr, strerror(errno)); + "Failed to do event_base_set for read event of %s into libevent, %s", client_context->addr, strerror(errno)); delete client_context; ::close(instance->server_socket_); return; @@ -265,8 +268,7 @@ void Server::accept(int fd, short ev, void *arg) { ret = event_add(&client_context->read_event, nullptr); if (ret < 0) { - LOG_ERROR("Failed to event_add for read event of %s into libevent, %s", - client_context->addr, strerror(errno)); + LOG_ERROR("Failed to event_add for read event of %s into libevent, %s", client_context->addr, strerror(errno)); delete client_context; ::close(instance->server_socket_); return; @@ -276,14 +278,16 @@ void Server::accept(int fd, short ev, void *arg) { LOG_INFO("Accepted connection from %s\n", client_context->addr); } -int Server::start() { +int Server::start() +{ if (server_param_.use_unix_socket) { return start_unix_socket_server(); } else { return start_tcp_server(); } } -int Server::start_tcp_server() { +int Server::start_tcp_server() +{ int ret = 0; struct sockaddr_in sa; @@ -296,8 +300,7 @@ int Server::start_tcp_server() { int yes = 1; ret = setsockopt(server_socket_, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)); if (ret < 0) { - LOG_ERROR("Failed to set socket option of reuse address: %s.", - strerror(errno)); + LOG_ERROR("Failed to set socket option of reuse address: %s.", strerror(errno)); ::close(server_socket_); return -1; } @@ -338,8 +341,7 @@ int Server::start_tcp_server() { ret = event_add(listen_ev_, nullptr); if (ret < 0) { - LOG_ERROR("event_add(): can not add accept event into libevent, %s", - strerror(errno)); + LOG_ERROR("event_add(): can not add accept event into libevent, %s", strerror(errno)); ::close(server_socket_); return -1; } @@ -349,7 +351,8 @@ int Server::start_tcp_server() { return 0; } -int Server::start_unix_socket_server() { +int Server::start_unix_socket_server() +{ int ret = 0; server_socket_ = socket(PF_UNIX, SOCK_STREAM, 0); @@ -396,8 +399,7 @@ int Server::start_unix_socket_server() { ret = event_add(listen_ev_, nullptr); if (ret < 0) { - LOG_ERROR("event_add(): can not add accept event into libevent, %s", - strerror(errno)); + LOG_ERROR("event_add(): can not add accept event into libevent, %s", strerror(errno)); ::close(server_socket_); return -1; } @@ -407,7 +409,8 @@ int Server::start_unix_socket_server() { return 0; } -int Server::serve() { +int Server::serve() +{ event_base_ = event_base_new(); if (event_base_ == nullptr) { LOG_ERROR("Failed to create event base, %s.", strerror(errno)); @@ -425,7 +428,8 @@ int Server::serve() { return 0; } -void Server::shutdown() { +void Server::shutdown() +{ LOG_INFO("Server shutting down"); // cleanup diff --git a/src/observer/net/server.h b/src/observer/net/server.h index b18d85b2057beef942705cc7ae7054495b301155..82efbd5f7eb78dc4d073f7d81881dff983fd5143 100644 --- a/src/observer/net/server.h +++ b/src/observer/net/server.h @@ -68,4 +68,4 @@ public: virtual int stop() = 0; }; -#endif //__OBSERVER_NET_SERVER_H__ +#endif //__OBSERVER_NET_SERVER_H__ diff --git a/src/observer/net/server_param.h b/src/observer/net/server_param.h index 8e06b0f39a7b089d2248222500aa7a2a9f05a8f2..aa0d0121bb9045d4b327dacb5c773226ab5a5ec2 100644 --- a/src/observer/net/server_param.h +++ b/src/observer/net/server_param.h @@ -38,4 +38,4 @@ public: bool use_unix_socket = false; }; -#endif //__SRC_OBSERVER_NET_SERVER_PARAM_H__ +#endif //__SRC_OBSERVER_NET_SERVER_PARAM_H__ diff --git a/src/observer/rc.cpp b/src/observer/rc.cpp index d974d99284ee2d36ab0f995258ac91db052bb873..db3f053d698c76097195d36c0f87b2d7f9007823 100644 --- a/src/observer/rc.cpp +++ b/src/observer/rc.cpp @@ -14,9 +14,12 @@ See the Mulan PSL v2 for more details. */ #include "rc.h" -#define RC_CASE_STRING(rc) case rc : return #rc +#define RC_CASE_STRING(rc) \ + case rc: \ + return #rc -const char *strrc(RC rc) { +const char *strrc(RC rc) +{ switch (rc) { RC_CASE_STRING(SUCCESS); RC_CASE_STRING(GENERIC_ERROR); diff --git a/src/observer/rc.h b/src/observer/rc.h index 70c57b3a9cc45179156495d44e8966beb4c6bf53..e6240a723669558ead9dcbb9611e8cf1d6f3f76a 100644 --- a/src/observer/rc.h +++ b/src/observer/rc.h @@ -69,9 +69,7 @@ enum RCSchema { INDEX_NAME_ILLEGAL, }; -enum RCSQL { - SQL_SELECT = 1 -}; +enum RCSQL { SQL_SELECT = 1 }; enum RCIOError { READ = 1, @@ -129,11 +127,7 @@ enum RCCantOpen { SYMLINK, }; -enum RCCorrupt { - CORRUPT_VIRT = 1, - CORRUPT_SEQUENCE, - CORRUPT_INDEX -}; +enum RCCorrupt { CORRUPT_VIRT = 1, CORRUPT_SEQUENCE, CORRUPT_INDEX }; enum RCReadonly { RO_RECOVERY = 1, @@ -176,37 +170,37 @@ enum RC { SUCCESS = 0, /* Successful result */ /* beginning-of-error-codes */ - GENERIC_ERROR, /* Generic error */ - INVALID_ARGUMENT,/* Invalid argument */ - SQL_SYNTAX, /* SQL Syntax error */ - BUFFERPOOL, /* Buffer pool error*/ - RECORD, /* Record error */ - INTERNAL, /* Internal logic error in SQLite */ - PERM, /* Access permission denied */ - ABORT, /* Callback routine requested an abort */ - BUSY, /* The database file is locked */ - LOCKED, /* A table in the database is locked */ - NOMEM, /* A malloc() failed */ - READONLY, /* Attempt to write a readonly database */ - INTERRUPT, /* Operation terminated by interrupt()*/ - IOERR, /* Some kind of disk I/O error occurred */ - CORRUPT, /* The database disk image is malformed */ - NOTFOUND, /* Unknown opcode in file_control() */ - FULL, /* Insertion failed because database is full */ - CANTOPEN, /* Unable to open the database file */ - PROTOCOL, /* Database lock protocol error */ - EMPTY, /* Internal use only */ - SCHEMA, /* The database schema error */ - TOOBIG, /* String or BLOB exceeds size limit */ - CONSTRAINT, /* Abort due to constraint violation */ - MISMATCH, /* Data type mismatch */ - MISUSE, /* Library used incorrectly */ - NOLFS, /* Uses OS features not supported on host */ - AUTH, /* Authorization denied */ - FORMAT, /* Not used */ - RANGE, /* 2nd parameter to bind out of range */ - NOTADB, /* File opened that is not a database file */ - NOTICE = 100, /* Notifications from log() */ + GENERIC_ERROR, /* Generic error */ + INVALID_ARGUMENT, /* Invalid argument */ + SQL_SYNTAX, /* SQL Syntax error */ + BUFFERPOOL, /* Buffer pool error*/ + RECORD, /* Record error */ + INTERNAL, /* Internal logic error in SQLite */ + PERM, /* Access permission denied */ + ABORT, /* Callback routine requested an abort */ + BUSY, /* The database file is locked */ + LOCKED, /* A table in the database is locked */ + NOMEM, /* A malloc() failed */ + READONLY, /* Attempt to write a readonly database */ + INTERRUPT, /* Operation terminated by interrupt()*/ + IOERR, /* Some kind of disk I/O error occurred */ + CORRUPT, /* The database disk image is malformed */ + NOTFOUND, /* Unknown opcode in file_control() */ + FULL, /* Insertion failed because database is full */ + CANTOPEN, /* Unable to open the database file */ + PROTOCOL, /* Database lock protocol error */ + EMPTY, /* Internal use only */ + SCHEMA, /* The database schema error */ + TOOBIG, /* String or BLOB exceeds size limit */ + CONSTRAINT, /* Abort due to constraint violation */ + MISMATCH, /* Data type mismatch */ + MISUSE, /* Library used incorrectly */ + NOLFS, /* Uses OS features not supported on host */ + AUTH, /* Authorization denied */ + FORMAT, /* Not used */ + RANGE, /* 2nd parameter to bind out of range */ + NOTADB, /* File opened that is not a database file */ + NOTICE = 100, /* Notifications from log() */ /* buffer pool part */ BUFFERPOOL_EXIST = (BUFFERPOOL | (RCBufferPool::BP_EXIST << 8)), @@ -242,7 +236,7 @@ enum RC { /* schema part */ SCHEMA_DB_EXIST = (SCHEMA | (RCSchema::DB_EXIST << 8)), SCHEMA_DB_NOT_EXIST = (SCHEMA | (RCSchema::DB_NOT_EXIST << 8)), - SCHEMA_DB_NOT_OPENED = (SCHEMA | (RCSchema::DB_NOT_OPENED<< 8)), + SCHEMA_DB_NOT_OPENED = (SCHEMA | (RCSchema::DB_NOT_OPENED << 8)), SCHEMA_TABLE_NOT_EXIST = (SCHEMA | (RCSchema::TABLE_NOT_EXIST << 8)), SCHEMA_TABLE_EXIST = (SCHEMA | (RCSchema::TABLE_EXIST << 8)), SCHEMA_TABLE_NAME_ILLEGAL = (SCHEMA | (RCSchema::TABLE_NAME_ILLEGAL << 8)), @@ -309,7 +303,7 @@ enum RC { CANTOPEN_DIRTYWAL = (CANTOPEN | (RCCantOpen::DIRTYWAL << 8)), CANTOPEN_SYMLINK = (CANTOPEN | (RCCantOpen::SYMLINK << 8)), - /* corrupt part */ // compile error + /* corrupt part */ // compile error // CORRUPT_VIRT = (CORRUPT | (RCCorrupt::CORRUPT_VIRT << 8)), // CORRUPT_SEQUENCE = (CORRUPT | (RCCorrupt::CORRUPT_SEQUENCE << 8)), // CORRUPT_INDEX = (CORRUPT | (RCCorrupt::CORRUPT_INDEX << 8)), @@ -348,4 +342,4 @@ enum RC { extern const char *strrc(RC rc); -#endif //__OBSERVER_RC_H__ +#endif //__OBSERVER_RC_H__ diff --git a/src/observer/session/session.cpp b/src/observer/session/session.cpp index 5c2ac6398c64a1ad2283ea7c2066ec1ccb6fe043..94a46f1ee1a4dacf480cd8f6e79d127c99cd639a 100644 --- a/src/observer/session/session.cpp +++ b/src/observer/session/session.cpp @@ -15,35 +15,42 @@ See the Mulan PSL v2 for more details. */ #include "session/session.h" #include "storage/trx/trx.h" -Session &Session::default_session() { +Session &Session::default_session() +{ static Session session; return session; } -Session::Session(const Session &other) : current_db_(other.current_db_){ -} +Session::Session(const Session &other) : current_db_(other.current_db_) +{} -Session::~Session() { +Session::~Session() +{ delete trx_; trx_ = nullptr; } -const std::string &Session::get_current_db() const { +const std::string &Session::get_current_db() const +{ return current_db_; } -void Session::set_current_db(const std::string &dbname) { +void Session::set_current_db(const std::string &dbname) +{ current_db_ = dbname; } -void Session::set_trx_multi_operation_mode(bool multi_operation_mode) { +void Session::set_trx_multi_operation_mode(bool multi_operation_mode) +{ trx_multi_operation_mode_ = multi_operation_mode; } -bool Session::is_trx_multi_operation_mode() const { +bool Session::is_trx_multi_operation_mode() const +{ return trx_multi_operation_mode_; } -Trx *Session::current_trx() { +Trx *Session::current_trx() +{ if (trx_ == nullptr) { trx_ = new Trx; } diff --git a/src/observer/session/session.h b/src/observer/session/session.h index 4d76903ae73445719112d0821329f94a4d9b9c8d..83767828ae62e812eb7a48646330227dc850c506 100644 --- a/src/observer/session/session.h +++ b/src/observer/session/session.h @@ -28,8 +28,8 @@ public: Session() = default; ~Session(); - Session(const Session & other); - void operator =(Session &) = delete; + Session(const Session &other); + void operator=(Session &) = delete; const std::string &get_current_db() const; void set_current_db(const std::string &dbname); @@ -37,12 +37,12 @@ public: void set_trx_multi_operation_mode(bool multi_operation_mode); bool is_trx_multi_operation_mode() const; - Trx * current_trx(); + Trx *current_trx(); private: - std::string current_db_; - Trx *trx_ = nullptr; - bool trx_multi_operation_mode_ = false; // 当前事务的模式,是否多语句模式. 单语句模式自动提交 + std::string current_db_; + Trx *trx_ = nullptr; + bool trx_multi_operation_mode_ = false; // 当前事务的模式,是否多语句模式. 单语句模式自动提交 }; -#endif // __OBSERVER_SESSION_SESSION_H__ \ No newline at end of file +#endif // __OBSERVER_SESSION_SESSION_H__ \ No newline at end of file diff --git a/src/observer/session/session_stage.cpp b/src/observer/session/session_stage.cpp index 9a115c638cb0d659893b40d43425c5c693993e55..6a7c051144018f89ea32e788586db187959f6ebd 100644 --- a/src/observer/session/session_stage.cpp +++ b/src/observer/session/session_stage.cpp @@ -34,14 +34,16 @@ using namespace common; const std::string SessionStage::SQL_METRIC_TAG = "SessionStage.sql"; // Constructor -SessionStage::SessionStage(const char *tag) - : Stage(tag), resolve_stage_(nullptr), sql_metric_(nullptr) {} +SessionStage::SessionStage(const char *tag) : Stage(tag), resolve_stage_(nullptr), sql_metric_(nullptr) +{} // Destructor -SessionStage::~SessionStage() {} +SessionStage::~SessionStage() +{} // Parse properties, instantiate a stage object -Stage *SessionStage::make_stage(const std::string &tag) { +Stage *SessionStage::make_stage(const std::string &tag) +{ SessionStage *stage = new (std::nothrow) SessionStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new ExecutorStage failed"); @@ -52,7 +54,8 @@ Stage *SessionStage::make_stage(const std::string &tag) { } // Set properties for this object set in stage specific properties -bool SessionStage::set_properties() { +bool SessionStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -65,7 +68,8 @@ bool SessionStage::set_properties() { } // Initialize stage params and validate outputs -bool SessionStage::initialize() { +bool SessionStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -79,7 +83,8 @@ bool SessionStage::initialize() { } // Cleanup after disconnection -void SessionStage::cleanup() { +void SessionStage::cleanup() +{ LOG_TRACE("Enter"); MetricsRegistry &metricsRegistry = get_metrics_registry(); @@ -92,7 +97,8 @@ void SessionStage::cleanup() { LOG_TRACE("Exit"); } -void SessionStage::handle_event(StageEvent *event) { +void SessionStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); // right now, we just support only one event. @@ -102,7 +108,8 @@ void SessionStage::handle_event(StageEvent *event) { return; } -void SessionStage::callback_event(StageEvent *event, CallbackContext *context) { +void SessionStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); SessionEvent *sev = dynamic_cast(event); @@ -118,18 +125,19 @@ void SessionStage::callback_event(StageEvent *event, CallbackContext *context) { len = strlen(response) + 1; } Server::send(sev->get_client(), response, len); - if ('\0' != response[len - 1]) { - // 这里强制性的给发送一个消息终结符,如果需要发送多条消息,需要调整 - char end = 0; - Server::send(sev->get_client(), &end, 1); - } + if ('\0' != response[len - 1]) { + // 这里强制性的给发送一个消息终结符,如果需要发送多条消息,需要调整 + char end = 0; + Server::send(sev->get_client(), &end, 1); + } // sev->done(); LOG_TRACE("Exit\n"); return; } -void SessionStage::handle_request(StageEvent *event) { +void SessionStage::handle_request(StageEvent *event) +{ SessionEvent *sev = dynamic_cast(event); if (nullptr == sev) { @@ -141,7 +149,7 @@ void SessionStage::handle_request(StageEvent *event) { if (nullptr == sev->get_request_buf()) { LOG_ERROR("Invalid request buffer."); sev->done_immediate(); - return ; + return; } std::string sql = sev->get_request_buf(); diff --git a/src/observer/session/session_stage.h b/src/observer/session/session_stage.h index e4059877d66b2982f61fcc924eb64d0c5953c5e1..39639adc57642e9c4b2540b23e28a1b95470b36f 100644 --- a/src/observer/session/session_stage.h +++ b/src/observer/session/session_stage.h @@ -39,20 +39,17 @@ protected: bool initialize() override; void cleanup() override; void handle_event(common::StageEvent *event) override; - void callback_event(common::StageEvent *event, - common::CallbackContext *context) override; + void callback_event(common::StageEvent *event, common::CallbackContext *context) override; protected: void handle_input(common::StageEvent *event); - void handle_request(common::StageEvent *event); private: Stage *resolve_stage_; common::SimpleTimer *sql_metric_; static const std::string SQL_METRIC_TAG; - }; -#endif //__OBSERVER_SESSION_SESSIONSTAGE_H__ +#endif //__OBSERVER_SESSION_SESSIONSTAGE_H__ diff --git a/src/observer/sql/executor/execute_stage.cpp b/src/observer/sql/executor/execute_stage.cpp index c6bd44aeaeabc0bfe9f6ef94faa4f761cca58550..ce18b014385b2b2aa1b8bb74e7918d505293a833 100644 --- a/src/observer/sql/executor/execute_stage.cpp +++ b/src/observer/sql/executor/execute_stage.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #include @@ -35,16 +35,20 @@ See the Mulan PSL v2 for more details. */ using namespace common; -RC create_selection_executor(Trx *trx, const Selects &selects, const char *db, const char *table_name, SelectExeNode &select_node); +RC create_selection_executor( + Trx *trx, const Selects &selects, const char *db, const char *table_name, SelectExeNode &select_node); //! Constructor -ExecuteStage::ExecuteStage(const char *tag) : Stage(tag) {} +ExecuteStage::ExecuteStage(const char *tag) : Stage(tag) +{} //! Destructor -ExecuteStage::~ExecuteStage() {} +ExecuteStage::~ExecuteStage() +{} //! Parse properties, instantiate a stage object -Stage *ExecuteStage::make_stage(const std::string &tag) { +Stage *ExecuteStage::make_stage(const std::string &tag) +{ ExecuteStage *stage = new (std::nothrow) ExecuteStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new ExecuteStage failed"); @@ -55,7 +59,8 @@ Stage *ExecuteStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool ExecuteStage::set_properties() { +bool ExecuteStage::set_properties() +{ // std::string stageNameStr(stageName); // std::map section = theGlobalProperties()->get( // stageNameStr); @@ -68,7 +73,8 @@ bool ExecuteStage::set_properties() { } //! Initialize stage params and validate outputs -bool ExecuteStage::initialize() { +bool ExecuteStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -80,13 +86,15 @@ bool ExecuteStage::initialize() { } //! Cleanup after disconnection -void ExecuteStage::cleanup() { +void ExecuteStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void ExecuteStage::handle_event(StageEvent *event) { +void ExecuteStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); handle_request(event); @@ -95,7 +103,8 @@ void ExecuteStage::handle_event(StageEvent *event) { return; } -void ExecuteStage::callback_event(StageEvent *event, CallbackContext *context) { +void ExecuteStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); // here finish read all data from disk or network, but do nothing here. @@ -107,7 +116,8 @@ void ExecuteStage::callback_event(StageEvent *event, CallbackContext *context) { return; } -void ExecuteStage::handle_request(common::StageEvent *event) { +void ExecuteStage::handle_request(common::StageEvent *event) +{ ExecutionPlanEvent *exe_event = static_cast(event); SessionEvent *session_event = exe_event->sql_event()->session_event(); Query *sql = exe_event->sqls(); @@ -122,11 +132,10 @@ void ExecuteStage::handle_request(common::StageEvent *event) { exe_event->push_callback(cb); switch (sql->flag) { - case SCF_SELECT: { // select + case SCF_SELECT: { // select do_select(current_db, sql, exe_event->sql_event()->session_event()); exe_event->done_immediate(); - } - break; + } break; case SCF_INSERT: case SCF_UPDATE: @@ -136,7 +145,7 @@ void ExecuteStage::handle_request(common::StageEvent *event) { case SCF_DESC_TABLE: case SCF_DROP_TABLE: case SCF_CREATE_INDEX: - case SCF_DROP_INDEX: + case SCF_DROP_INDEX: case SCF_LOAD_DATA: { StorageEvent *storage_event = new (std::nothrow) StorageEvent(exe_event); if (storage_event == nullptr) { @@ -146,56 +155,49 @@ void ExecuteStage::handle_request(common::StageEvent *event) { } default_storage_stage_->handle_event(storage_event); - } - break; + } break; case SCF_SYNC: { RC rc = DefaultHandler::get_default().sync(); session_event->set_response(strrc(rc)); exe_event->done_immediate(); - } - break; + } break; case SCF_BEGIN: { session_event->get_client()->session->set_trx_multi_operation_mode(true); session_event->set_response(strrc(RC::SUCCESS)); exe_event->done_immediate(); - } - break; + } break; case SCF_COMMIT: { Trx *trx = session_event->get_client()->session->current_trx(); RC rc = trx->commit(); session_event->get_client()->session->set_trx_multi_operation_mode(false); session_event->set_response(strrc(rc)); exe_event->done_immediate(); - } - break; + } break; case SCF_ROLLBACK: { Trx *trx = session_event->get_client()->session->current_trx(); RC rc = trx->rollback(); session_event->get_client()->session->set_trx_multi_operation_mode(false); session_event->set_response(strrc(rc)); exe_event->done_immediate(); - } - break; + } break; case SCF_HELP: { const char *response = "show tables;\n" - "desc `table name`;\n" - "create table `table name` (`column name` `column type`, ...);\n" - "create index `index name` on `table` (`column`);\n" - "insert into `table` values(`value1`,`value2`);\n" - "update `table` set column=value [where `column`=`value`];\n" - "delete from `table` [where `column`=`value`];\n" - "select [ * | `columns` ] from `table`;\n"; + "desc `table name`;\n" + "create table `table name` (`column name` `column type`, ...);\n" + "create index `index name` on `table` (`column`);\n" + "insert into `table` values(`value1`,`value2`);\n" + "update `table` set column=value [where `column`=`value`];\n" + "delete from `table` [where `column`=`value`];\n" + "select [ * | `columns` ] from `table`;\n"; session_event->set_response(response); exe_event->done_immediate(); - } - break; + } break; case SCF_EXIT: { // do nothing const char *response = "Unsupported\n"; session_event->set_response(response); exe_event->done_immediate(); - } - break; + } break; default: { exe_event->done_immediate(); LOG_ERROR("Unsupported command=%d\n", sql->flag); @@ -203,7 +205,8 @@ void ExecuteStage::handle_request(common::StageEvent *event) { } } -void end_trx_if_need(Session *session, Trx *trx, bool all_right) { +void end_trx_if_need(Session *session, Trx *trx, bool all_right) +{ if (!session->is_trx_multi_operation_mode()) { if (all_right) { trx->commit(); @@ -215,7 +218,8 @@ void end_trx_if_need(Session *session, Trx *trx, bool all_right) { // 这里没有对输入的某些信息做合法性校验,比如查询的列名、where条件中的列名等,没有做必要的合法性校验 // 需要补充上这一部分. 校验部分也可以放在resolve,不过跟execution放一起也没有关系 -RC ExecuteStage::do_select(const char *db, Query *sql, SessionEvent *session_event) { +RC ExecuteStage::do_select(const char *db, Query *sql, SessionEvent *session_event) +{ RC rc = RC::SUCCESS; Session *session = session_event->get_client()->session; @@ -229,7 +233,7 @@ RC ExecuteStage::do_select(const char *db, Query *sql, SessionEvent *session_eve rc = create_selection_executor(trx, selects, db, table_name, *select_node); if (rc != RC::SUCCESS) { delete select_node; - for (SelectExeNode *& tmp_node: select_nodes) { + for (SelectExeNode *&tmp_node : select_nodes) { delete tmp_node; } end_trx_if_need(session, trx, false); @@ -245,11 +249,11 @@ RC ExecuteStage::do_select(const char *db, Query *sql, SessionEvent *session_eve } std::vector tuple_sets; - for (SelectExeNode *&node: select_nodes) { + for (SelectExeNode *&node : select_nodes) { TupleSet tuple_set; rc = node->execute(tuple_set); if (rc != RC::SUCCESS) { - for (SelectExeNode *& tmp_node: select_nodes) { + for (SelectExeNode *&tmp_node : select_nodes) { delete tmp_node; } end_trx_if_need(session, trx, false); @@ -267,7 +271,7 @@ RC ExecuteStage::do_select(const char *db, Query *sql, SessionEvent *session_eve tuple_sets.front().print(ss); } - for (SelectExeNode *& tmp_node: select_nodes) { + for (SelectExeNode *&tmp_node : select_nodes) { delete tmp_node; } session_event->set_response(ss.str()); @@ -275,7 +279,8 @@ RC ExecuteStage::do_select(const char *db, Query *sql, SessionEvent *session_eve return rc; } -bool match_table(const Selects &selects, const char *table_name_in_condition, const char *table_name_to_match) { +bool match_table(const Selects &selects, const char *table_name_in_condition, const char *table_name_to_match) +{ if (table_name_in_condition != nullptr) { return 0 == strcmp(table_name_in_condition, table_name_to_match); } @@ -283,7 +288,8 @@ bool match_table(const Selects &selects, const char *table_name_in_condition, co return selects.relation_num == 1; } -static RC schema_add_field(Table *table, const char *field_name, TupleSchema &schema) { +static RC schema_add_field(Table *table, const char *field_name, TupleSchema &schema) +{ const FieldMeta *field_meta = table->table_meta().field(field_name); if (nullptr == field_meta) { LOG_WARN("No such field. %s.%s", table->name(), field_name); @@ -295,10 +301,12 @@ static RC schema_add_field(Table *table, const char *field_name, TupleSchema &sc } // 把所有的表和只跟这张表关联的condition都拿出来,生成最底层的select 执行节点 -RC create_selection_executor(Trx *trx, const Selects &selects, const char *db, const char *table_name, SelectExeNode &select_node) { +RC create_selection_executor( + Trx *trx, const Selects &selects, const char *db, const char *table_name, SelectExeNode &select_node) +{ // 列出跟这张表关联的Attr TupleSchema schema; - Table * table = DefaultHandler::get_default().find_table(db, table_name); + Table *table = DefaultHandler::get_default().find_table(db, table_name); if (nullptr == table) { LOG_WARN("No such table [%s] in db [%s]", table_name, db); return RC::SCHEMA_TABLE_NOT_EXIST; @@ -310,7 +318,7 @@ RC create_selection_executor(Trx *trx, const Selects &selects, const char *db, c if (0 == strcmp("*", attr.attribute_name)) { // 列出这张表所有字段 TupleSchema::from_table(table, schema); - break; // 没有校验,给出* 之后,再写字段的错误 + break; // 没有校验,给出* 之后,再写字段的错误 } else { // 列出这张表相关字段 RC rc = schema_add_field(table, attr.attribute_name, schema); @@ -325,17 +333,20 @@ RC create_selection_executor(Trx *trx, const Selects &selects, const char *db, c std::vector condition_filters; for (size_t i = 0; i < selects.condition_num; i++) { const Condition &condition = selects.conditions[i]; - if ((condition.left_is_attr == 0 && condition.right_is_attr == 0) || // 两边都是值 - (condition.left_is_attr == 1 && condition.right_is_attr == 0 && match_table(selects, condition.left_attr.relation_name, table_name)) || // 左边是属性右边是值 - (condition.left_is_attr == 0 && condition.right_is_attr == 1 && match_table(selects, condition.right_attr.relation_name, table_name)) || // 左边是值,右边是属性名 + if ((condition.left_is_attr == 0 && condition.right_is_attr == 0) || // 两边都是值 + (condition.left_is_attr == 1 && condition.right_is_attr == 0 && + match_table(selects, condition.left_attr.relation_name, table_name)) || // 左边是属性右边是值 + (condition.left_is_attr == 0 && condition.right_is_attr == 1 && + match_table(selects, condition.right_attr.relation_name, table_name)) || // 左边是值,右边是属性名 (condition.left_is_attr == 1 && condition.right_is_attr == 1 && - match_table(selects, condition.left_attr.relation_name, table_name) && match_table(selects, condition.right_attr.relation_name, table_name)) // 左右都是属性名,并且表名都符合 - ) { + match_table(selects, condition.left_attr.relation_name, table_name) && + match_table(selects, condition.right_attr.relation_name, table_name)) // 左右都是属性名,并且表名都符合 + ) { DefaultConditionFilter *condition_filter = new DefaultConditionFilter(); RC rc = condition_filter->init(*table, condition); if (rc != RC::SUCCESS) { delete condition_filter; - for (DefaultConditionFilter * &filter : condition_filters) { + for (DefaultConditionFilter *&filter : condition_filters) { delete filter; } return rc; diff --git a/src/observer/sql/executor/execute_stage.h b/src/observer/sql/executor/execute_stage.h index 5266f07b7ac11d1c518ac734caf2828df04eb17a..12d977d4e5653a689fbb82e994b972e4ed98a4b4 100644 --- a/src/observer/sql/executor/execute_stage.h +++ b/src/observer/sql/executor/execute_stage.h @@ -34,15 +34,15 @@ protected: bool initialize() override; void cleanup() override; void handle_event(common::StageEvent *event) override; - void callback_event(common::StageEvent *event, - common::CallbackContext *context) override; + void callback_event(common::StageEvent *event, common::CallbackContext *context) override; void handle_request(common::StageEvent *event); RC do_select(const char *db, Query *sql, SessionEvent *session_event); + protected: private: Stage *default_storage_stage_ = nullptr; Stage *mem_storage_stage_ = nullptr; }; -#endif //__OBSERVER_SQL_EXECUTE_STAGE_H__ +#endif //__OBSERVER_SQL_EXECUTE_STAGE_H__ diff --git a/src/observer/sql/executor/execution_node.cpp b/src/observer/sql/executor/execution_node.cpp index 83003f826d6da16061fbe77ae9b8e5570fca038f..4533b6014db89da7d52cd8a3592d9eac2b806d23 100644 --- a/src/observer/sql/executor/execution_node.cpp +++ b/src/observer/sql/executor/execution_node.cpp @@ -9,25 +9,27 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/14. +// Created by Meiyi & Wangyunlai on 2021/5/14. // #include "sql/executor/execution_node.h" #include "storage/common/table.h" #include "common/log/log.h" -SelectExeNode::SelectExeNode() : table_(nullptr) { -} +SelectExeNode::SelectExeNode() : table_(nullptr) +{} -SelectExeNode::~SelectExeNode() { - for (DefaultConditionFilter * &filter : condition_filters_) { +SelectExeNode::~SelectExeNode() +{ + for (DefaultConditionFilter *&filter : condition_filters_) { delete filter; } condition_filters_.clear(); } -RC -SelectExeNode::init(Trx *trx, Table *table, TupleSchema &&tuple_schema, std::vector &&condition_filters) { +RC SelectExeNode::init( + Trx *trx, Table *table, TupleSchema &&tuple_schema, std::vector &&condition_filters) +{ trx_ = trx; table_ = table; tuple_schema_ = tuple_schema; @@ -35,11 +37,13 @@ SelectExeNode::init(Trx *trx, Table *table, TupleSchema &&tuple_schema, std::vec return RC::SUCCESS; } -void record_reader(const char *data, void *context) { +void record_reader(const char *data, void *context) +{ TupleRecordConverter *converter = (TupleRecordConverter *)context; converter->add_record(data); } -RC SelectExeNode::execute(TupleSet &tuple_set) { +RC SelectExeNode::execute(TupleSet &tuple_set) +{ CompositeConditionFilter condition_filter; condition_filter.init((const ConditionFilter **)condition_filters_.data(), condition_filters_.size()); diff --git a/src/observer/sql/executor/execution_node.h b/src/observer/sql/executor/execution_node.h index 00e41a7077e7025330fd7a0dcdf9a13511cb36aa..49362a57204676694fa772ec8b6c480110cfcdba 100644 --- a/src/observer/sql/executor/execution_node.h +++ b/src/observer/sql/executor/execution_node.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/13. +// Created by Meiyi & Wangyunlai on 2021/5/13. // #ifndef __OBSERVER_SQL_EXECUTOR_EXECUTION_NODE_H_ @@ -35,14 +35,16 @@ public: SelectExeNode(); virtual ~SelectExeNode(); - RC init(Trx *trx, Table *table, TupleSchema && tuple_schema, std::vector &&condition_filters); + RC init( + Trx *trx, Table *table, TupleSchema &&tuple_schema, std::vector &&condition_filters); RC execute(TupleSet &tuple_set) override; + private: Trx *trx_ = nullptr; - Table * table_; - TupleSchema tuple_schema_; + Table *table_; + TupleSchema tuple_schema_; std::vector condition_filters_; }; -#endif //__OBSERVER_SQL_EXECUTOR_EXECUTION_NODE_H_ +#endif //__OBSERVER_SQL_EXECUTOR_EXECUTION_NODE_H_ diff --git a/src/observer/sql/executor/tuple.cpp b/src/observer/sql/executor/tuple.cpp index 574d657ceea88d59fc5192df2294266110a0f27c..19e8a3e1180843c522cbfa5460e6dbc1f48652a3 100644 --- a/src/observer/sql/executor/tuple.cpp +++ b/src/observer/sql/executor/tuple.cpp @@ -9,22 +9,24 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/14. +// Created by Meiyi & Wangyunlai on 2021/5/14. // #include "sql/executor/tuple.h" #include "storage/common/table.h" #include "common/log/log.h" -Tuple::Tuple(const Tuple &other) { +Tuple::Tuple(const Tuple &other) +{ LOG_PANIC("Copy constructor of tuple is not supported"); exit(1); } -Tuple::Tuple(Tuple &&other) noexcept : values_(std::move(other.values_)) { -} +Tuple::Tuple(Tuple &&other) noexcept : values_(std::move(other.values_)) +{} -Tuple & Tuple::operator=(Tuple &&other) noexcept { +Tuple &Tuple::operator=(Tuple &&other) noexcept +{ if (&other == this) { return *this; } @@ -34,36 +36,43 @@ Tuple & Tuple::operator=(Tuple &&other) noexcept { return *this; } -Tuple::~Tuple() { -} +Tuple::~Tuple() +{} // add (Value && value) -void Tuple::add(TupleValue *value) { +void Tuple::add(TupleValue *value) +{ values_.emplace_back(value); } -void Tuple::add(const std::shared_ptr &other) { +void Tuple::add(const std::shared_ptr &other) +{ values_.emplace_back(other); } -void Tuple::add(int value) { +void Tuple::add(int value) +{ add(new IntValue(value)); } -void Tuple::add(float value) { +void Tuple::add(float value) +{ add(new FloatValue(value)); } -void Tuple::add(const char *s, int len) { +void Tuple::add(const char *s, int len) +{ add(new StringValue(s, len)); } //////////////////////////////////////////////////////////////////////////////// -std::string TupleField::to_string() const { +std::string TupleField::to_string() const +{ return std::string(table_name_) + "." + field_name_ + std::to_string(type_); } //////////////////////////////////////////////////////////////////////////////// -void TupleSchema::from_table(const Table *table, TupleSchema &schema) { +void TupleSchema::from_table(const Table *table, TupleSchema &schema) +{ const char *table_name = table->name(); const TableMeta &table_meta = table->table_meta(); const int field_num = table_meta.field_num(); @@ -75,14 +84,15 @@ void TupleSchema::from_table(const Table *table, TupleSchema &schema) { } } -void TupleSchema::add(AttrType type, const char *table_name, const char *field_name) { +void TupleSchema::add(AttrType type, const char *table_name, const char *field_name) +{ fields_.emplace_back(type, table_name, field_name); } -void TupleSchema::add_if_not_exists(AttrType type, const char *table_name, const char *field_name) { - for (const auto &field: fields_) { - if (0 == strcmp(field.table_name(), table_name) && - 0 == strcmp(field.field_name(), field_name)) { +void TupleSchema::add_if_not_exists(AttrType type, const char *table_name, const char *field_name) +{ + for (const auto &field : fields_) { + if (0 == strcmp(field.table_name(), table_name) && 0 == strcmp(field.field_name(), field_name)) { return; } } @@ -90,14 +100,16 @@ void TupleSchema::add_if_not_exists(AttrType type, const char *table_name, const add(type, table_name, field_name); } -void TupleSchema::append(const TupleSchema &other) { +void TupleSchema::append(const TupleSchema &other) +{ fields_.reserve(fields_.size() + other.fields_.size()); - for (const auto &field: other.fields_) { + for (const auto &field : other.fields_) { fields_.emplace_back(field); } } -int TupleSchema::index_of_field(const char *table_name, const char *field_name) const { +int TupleSchema::index_of_field(const char *table_name, const char *field_name) const +{ const int size = fields_.size(); for (int i = 0; i < size; i++) { const TupleField &field = fields_[i]; @@ -108,7 +120,8 @@ int TupleSchema::index_of_field(const char *table_name, const char *field_name) return -1; } -void TupleSchema::print(std::ostream &os) const { +void TupleSchema::print(std::ostream &os) const +{ if (fields_.empty()) { os << "No schema"; return; @@ -116,12 +129,11 @@ void TupleSchema::print(std::ostream &os) const { // 判断有多张表还是只有一张表 std::set table_names; - for (const auto &field: fields_) { + for (const auto &field : fields_) { table_names.insert(field.table_name()); } - for (std::vector::const_iterator iter = fields_.begin(), end = --fields_.end(); - iter != end; ++iter) { + for (std::vector::const_iterator iter = fields_.begin(), end = --fields_.end(); iter != end; ++iter) { if (table_names.size() > 1) { os << iter->table_name() << "."; } @@ -135,11 +147,13 @@ void TupleSchema::print(std::ostream &os) const { } ///////////////////////////////////////////////////////////////////////////// -TupleSet::TupleSet(TupleSet &&other) : tuples_(std::move(other.tuples_)), schema_(other.schema_){ +TupleSet::TupleSet(TupleSet &&other) : tuples_(std::move(other.tuples_)), schema_(other.schema_) +{ other.schema_.clear(); } -TupleSet &TupleSet::operator=(TupleSet &&other) { +TupleSet &TupleSet::operator=(TupleSet &&other) +{ if (this == &other) { return *this; } @@ -153,16 +167,19 @@ TupleSet &TupleSet::operator=(TupleSet &&other) { return *this; } -void TupleSet::add(Tuple &&tuple) { +void TupleSet::add(Tuple &&tuple) +{ tuples_.emplace_back(std::move(tuple)); } -void TupleSet::clear() { +void TupleSet::clear() +{ tuples_.clear(); schema_.clear(); } -void TupleSet::print(std::ostream &os) const { +void TupleSet::print(std::ostream &os) const +{ if (schema_.fields().empty()) { LOG_WARN("Got empty schema"); return; @@ -173,7 +190,8 @@ void TupleSet::print(std::ostream &os) const { for (const Tuple &item : tuples_) { const std::vector> &values = item.values(); for (std::vector>::const_iterator iter = values.begin(), end = --values.end(); - iter != end; ++iter) { + iter != end; + ++iter) { (*iter)->to_string(os); os << " | "; } @@ -182,36 +200,42 @@ void TupleSet::print(std::ostream &os) const { } } -void TupleSet::set_schema(const TupleSchema &schema) { +void TupleSet::set_schema(const TupleSchema &schema) +{ schema_ = schema; } -const TupleSchema &TupleSet::get_schema() const { +const TupleSchema &TupleSet::get_schema() const +{ return schema_; } -bool TupleSet::is_empty() const { +bool TupleSet::is_empty() const +{ return tuples_.empty(); } -int TupleSet::size() const { +int TupleSet::size() const +{ return tuples_.size(); } -const Tuple &TupleSet::get(int index) const { +const Tuple &TupleSet::get(int index) const +{ return tuples_[index]; } -const std::vector &TupleSet::tuples() const { +const std::vector &TupleSet::tuples() const +{ return tuples_; } ///////////////////////////////////////////////////////////////////////////// -TupleRecordConverter::TupleRecordConverter(Table *table, TupleSet &tuple_set) : - table_(table), tuple_set_(tuple_set){ -} +TupleRecordConverter::TupleRecordConverter(Table *table, TupleSet &tuple_set) : table_(table), tuple_set_(tuple_set) +{} -void TupleRecordConverter::add_record(const char *record) { +void TupleRecordConverter::add_record(const char *record) +{ const TupleSchema &schema = tuple_set_.schema(); Tuple tuple; const TableMeta &table_meta = table_->table_meta(); @@ -220,20 +244,17 @@ void TupleRecordConverter::add_record(const char *record) { assert(field_meta != nullptr); switch (field_meta->type()) { case INTS: { - int value = *(int*)(record + field_meta->offset()); + int value = *(int *)(record + field_meta->offset()); tuple.add(value); - } - break; + } break; case FLOATS: { float value = *(float *)(record + field_meta->offset()); tuple.add(value); - } - break; + } break; case CHARS: { const char *s = record + field_meta->offset(); // 现在当做Cstring来处理 tuple.add(s, strlen(s)); - } - break; + } break; default: { LOG_PANIC("Unsupported field type. type=%d", field_meta->type()); } @@ -242,5 +263,3 @@ void TupleRecordConverter::add_record(const char *record) { tuple_set_.add(std::move(tuple)); } - - diff --git a/src/observer/sql/executor/tuple.h b/src/observer/sql/executor/tuple.h index 2b43d3cf123c1f3d64dacf644be271831de8193d..2a21b8b64af4e42841817b298bcdaebabf5ffefd 100644 --- a/src/observer/sql/executor/tuple.h +++ b/src/observer/sql/executor/tuple.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/14. +// Created by Meiyi & Wangyunlai on 2021/5/14. // #ifndef __OBSERVER_SQL_EXECUTOR_TUPLE_H_ @@ -31,8 +31,8 @@ public: ~Tuple(); - Tuple(Tuple &&other) noexcept ; - Tuple & operator=(Tuple &&other) noexcept ; + Tuple(Tuple &&other) noexcept; + Tuple &operator=(Tuple &&other) noexcept; void add(TupleValue *value); void add(const std::shared_ptr &other); @@ -40,46 +40,54 @@ public: void add(float value); void add(const char *s, int len); - const std::vector> &values() const { + const std::vector> &values() const + { return values_; } - int size() const { + int size() const + { return values_.size(); } - const TupleValue &get(int index) const { + const TupleValue &get(int index) const + { return *values_[index]; } - const std::shared_ptr &get_pointer(int index) const { + const std::shared_ptr &get_pointer(int index) const + { return values_[index]; } private: - std::vector> values_; + std::vector> values_; }; class TupleField { public: - TupleField(AttrType type, const char *table_name, const char *field_name) : - type_(type), table_name_(table_name), field_name_(field_name){ - } + TupleField(AttrType type, const char *table_name, const char *field_name) + : type_(type), table_name_(table_name), field_name_(field_name) + {} - AttrType type() const{ + AttrType type() const + { return type_; } - const char *table_name() const { + const char *table_name() const + { return table_name_.c_str(); } - const char *field_name() const { + const char *field_name() const + { return field_name_.c_str(); } std::string to_string() const; + private: - AttrType type_; + AttrType type_; std::string table_name_; std::string field_name_; }; @@ -94,22 +102,27 @@ public: // void merge(const TupleSchema &other); void append(const TupleSchema &other); - const std::vector &fields() const { + const std::vector &fields() const + { return fields_; } - const TupleField &field(int index) const { + const TupleField &field(int index) const + { return fields_[index]; } int index_of_field(const char *table_name, const char *field_name) const; - void clear() { + void clear() + { fields_.clear(); } void print(std::ostream &os) const; + public: static void from_table(const Table *table, TupleSchema &schema); + private: std::vector fields_; }; @@ -118,9 +131,9 @@ class TupleSet { public: TupleSet() = default; TupleSet(TupleSet &&other); - explicit TupleSet(const TupleSchema &schema) : schema_(schema) { - } - TupleSet &operator =(TupleSet &&other); + explicit TupleSet(const TupleSchema &schema) : schema_(schema) + {} + TupleSet &operator=(TupleSet &&other); ~TupleSet() = default; @@ -128,7 +141,7 @@ public: const TupleSchema &get_schema() const; - void add(Tuple && tuple); + void add(Tuple &&tuple); void clear(); @@ -138,10 +151,13 @@ public: const std::vector &tuples() const; void print(std::ostream &os) const; + public: - const TupleSchema &schema() const { + const TupleSchema &schema() const + { return schema_; } + private: std::vector tuples_; TupleSchema schema_; @@ -152,9 +168,10 @@ public: TupleRecordConverter(Table *table, TupleSet &tuple_set); void add_record(const char *record); + private: Table *table_; TupleSet &tuple_set_; }; -#endif //__OBSERVER_SQL_EXECUTOR_TUPLE_H_ +#endif //__OBSERVER_SQL_EXECUTOR_TUPLE_H_ diff --git a/src/observer/sql/executor/value.h b/src/observer/sql/executor/value.h index e3c54f3e2b8ed89de68f77afa5e273e29d3abce7..88e8f077ba35cd4db1e780716f7fb95fc7714e86 100644 --- a/src/observer/sql/executor/value.h +++ b/src/observer/sql/executor/value.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/14. +// Created by Meiyi & Wangyunlai on 2021/5/14. // #ifndef __OBSERVER_SQL_EXECUTOR_VALUE_H_ @@ -27,20 +27,23 @@ public: virtual void to_string(std::ostream &os) const = 0; virtual int compare(const TupleValue &other) const = 0; + private: }; class IntValue : public TupleValue { public: - explicit IntValue(int value) : value_(value) { - } + explicit IntValue(int value) : value_(value) + {} - void to_string(std::ostream &os) const override { + void to_string(std::ostream &os) const override + { os << value_; } - int compare(const TupleValue &other) const override { - const IntValue & int_other = (const IntValue &)other; + int compare(const TupleValue &other) const override + { + const IntValue &int_other = (const IntValue &)other; return value_ - int_other.value_; } @@ -50,17 +53,19 @@ private: class FloatValue : public TupleValue { public: - explicit FloatValue(float value) : value_(value) { - } + explicit FloatValue(float value) : value_(value) + {} - void to_string(std::ostream &os) const override { + void to_string(std::ostream &os) const override + { os << value_; } - int compare(const TupleValue &other) const override { - const FloatValue & float_other = (const FloatValue &)other; + int compare(const TupleValue &other) const override + { + const FloatValue &float_other = (const FloatValue &)other; float result = value_ - float_other.value_; - if (result > 0) { // 浮点数没有考虑精度问题 + if (result > 0) { // 浮点数没有考虑精度问题 return 1; } if (result < 0) { @@ -68,28 +73,31 @@ public: } return 0; } + private: float value_; }; class StringValue : public TupleValue { public: - StringValue(const char *value, int len) : value_(value, len){ - } - explicit StringValue(const char *value) : value_(value) { - } + StringValue(const char *value, int len) : value_(value, len) + {} + explicit StringValue(const char *value) : value_(value) + {} - void to_string(std::ostream &os) const override { + void to_string(std::ostream &os) const override + { os << value_; } - int compare(const TupleValue &other) const override { + int compare(const TupleValue &other) const override + { const StringValue &string_other = (const StringValue &)other; return strcmp(value_.c_str(), string_other.value_.c_str()); } + private: std::string value_; }; - -#endif //__OBSERVER_SQL_EXECUTOR_VALUE_H_ +#endif //__OBSERVER_SQL_EXECUTOR_VALUE_H_ diff --git a/src/observer/sql/optimizer/optimize_stage.cpp b/src/observer/sql/optimizer/optimize_stage.cpp index f063918d0c352d60bf2e67685d08c93ad979295a..4ce4f840ef7ad0391541a90134faa5e9d05c4baa 100644 --- a/src/observer/sql/optimizer/optimize_stage.cpp +++ b/src/observer/sql/optimizer/optimize_stage.cpp @@ -26,13 +26,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; //! Constructor -OptimizeStage::OptimizeStage(const char *tag) : Stage(tag) {} +OptimizeStage::OptimizeStage(const char *tag) : Stage(tag) +{} //! Destructor -OptimizeStage::~OptimizeStage() {} +OptimizeStage::~OptimizeStage() +{} //! Parse properties, instantiate a stage object -Stage *OptimizeStage::make_stage(const std::string &tag) { +Stage *OptimizeStage::make_stage(const std::string &tag) +{ OptimizeStage *stage = new (std::nothrow) OptimizeStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new OptimizeStage failed"); @@ -43,7 +46,8 @@ Stage *OptimizeStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool OptimizeStage::set_properties() { +bool OptimizeStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -56,7 +60,8 @@ bool OptimizeStage::set_properties() { } //! Initialize stage params and validate outputs -bool OptimizeStage::initialize() { +bool OptimizeStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -67,13 +72,15 @@ bool OptimizeStage::initialize() { } //! Cleanup after disconnection -void OptimizeStage::cleanup() { +void OptimizeStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void OptimizeStage::handle_event(StageEvent *event) { +void OptimizeStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); // optimize sql plan, here just pass the event to the next stage @@ -83,7 +90,8 @@ void OptimizeStage::handle_event(StageEvent *event) { return; } -void OptimizeStage::callback_event(StageEvent *event, CallbackContext *context) { +void OptimizeStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); LOG_TRACE("Exit\n"); diff --git a/src/observer/sql/optimizer/optimize_stage.h b/src/observer/sql/optimizer/optimize_stage.h index 1398d6c13b4ffdf0c8a8836247d2de880c59bf0d..47df973e43c6f1aa5691bda9565e9ec7d05d8e1a 100644 --- a/src/observer/sql/optimizer/optimize_stage.h +++ b/src/observer/sql/optimizer/optimize_stage.h @@ -30,12 +30,11 @@ protected: bool initialize(); void cleanup(); void handle_event(common::StageEvent *event); - void callback_event(common::StageEvent *event, - common::CallbackContext *context); + void callback_event(common::StageEvent *event, common::CallbackContext *context); protected: private: Stage *execute_stage = nullptr; }; -#endif //__OBSERVER_SQL_OPTIMIZE_STAGE_H__ +#endif //__OBSERVER_SQL_OPTIMIZE_STAGE_H__ diff --git a/src/observer/sql/parser/lex.yy.c b/src/observer/sql/parser/lex.yy.c index fad402837e8143e97cfbf0c16602a031c45b0cf6..00c4ea2718ed00b2223efd74772014de9be5dd29 100644 --- a/src/observer/sql/parser/lex.yy.c +++ b/src/observer/sql/parser/lex.yy.c @@ -51,38 +51,38 @@ typedef uint64_t flex_uint64_t; typedef signed char flex_int8_t; typedef short int flex_int16_t; typedef int flex_int32_t; -typedef unsigned char flex_uint8_t; +typedef unsigned char flex_uint8_t; typedef unsigned short int flex_uint16_t; typedef unsigned int flex_uint32_t; #endif /* ! C99 */ /* Limits of integral types. */ #ifndef INT8_MIN -#define INT8_MIN (-128) +#define INT8_MIN (-128) #endif #ifndef INT16_MIN -#define INT16_MIN (-32767-1) +#define INT16_MIN (-32767 - 1) #endif #ifndef INT32_MIN -#define INT32_MIN (-2147483647-1) +#define INT32_MIN (-2147483647 - 1) #endif #ifndef INT8_MAX -#define INT8_MAX (127) +#define INT8_MAX (127) #endif #ifndef INT16_MAX -#define INT16_MAX (32767) +#define INT16_MAX (32767) #endif #ifndef INT32_MAX -#define INT32_MAX (2147483647) +#define INT32_MAX (2147483647) #endif #ifndef UINT8_MAX -#define UINT8_MAX (255U) +#define UINT8_MAX (255U) #endif #ifndef UINT16_MAX -#define UINT16_MAX (65535U) +#define UINT16_MAX (65535U) #endif #ifndef UINT32_MAX -#define UINT32_MAX (4294967295U) +#define UINT32_MAX (4294967295U) #endif #endif /* ! FLEXINT_H */ @@ -92,15 +92,15 @@ typedef unsigned int flex_uint32_t; /* The "const" storage-class-modifier is valid. */ #define YY_USE_CONST -#else /* ! __cplusplus */ +#else /* ! __cplusplus */ /* C99 requires __STDC__ to be defined as 1. */ -#if defined (__STDC__) +#if defined(__STDC__) #define YY_USE_CONST -#endif /* defined (__STDC__) */ -#endif /* ! __cplusplus */ +#endif /* defined (__STDC__) */ +#endif /* ! __cplusplus */ #ifdef YY_USE_CONST #define yyconst const @@ -116,12 +116,12 @@ typedef unsigned int flex_uint32_t; * we want to instead treat it as an 8-bit unsigned char, hence the * double cast. */ -#define YY_SC_TO_UI(c) ((unsigned int) (unsigned char) c) +#define YY_SC_TO_UI(c) ((unsigned int)(unsigned char)c) /* An opaque pointer. */ #ifndef YY_TYPEDEF_YY_SCANNER_T #define YY_TYPEDEF_YY_SCANNER_T -typedef void* yyscan_t; +typedef void *yyscan_t; #endif /* For convenience, these vars (plus the bison vars far below) @@ -152,7 +152,7 @@ typedef void* yyscan_t; #define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1) /* Special action meaning "start processing a new file". */ -#define YY_NEW_FILE yyrestart(yyin ,yyscanner ) +#define YY_NEW_FILE yyrestart(yyin, yyscanner) #define YY_END_OF_BUFFER_CHAR 0 @@ -163,7 +163,7 @@ typedef void* yyscan_t; /* The state buf must be large enough to hold one state per character in the main buffer. */ -#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) +#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type)) #ifndef YY_TYPEDEF_YY_BUFFER_STATE #define YY_TYPEDEF_YY_BUFFER_STATE @@ -179,87 +179,83 @@ typedef size_t yy_size_t; #define EOB_ACT_END_OF_FILE 1 #define EOB_ACT_LAST_MATCH 2 - #define YY_LESS_LINENO(n) - +#define YY_LESS_LINENO(n) + /* Return all but the first "n" matched characters back to the input stream. */ -#define yyless(n) \ - do \ - { \ - /* Undo effects of setting up yytext. */ \ - int yyless_macro_arg = (n); \ - YY_LESS_LINENO(yyless_macro_arg);\ - *yy_cp = yyg->yy_hold_char; \ - YY_RESTORE_YY_MORE_OFFSET \ - yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ - YY_DO_BEFORE_ACTION; /* set up yytext again */ \ - } \ - while ( 0 ) - -#define unput(c) yyunput( c, yyg->yytext_ptr , yyscanner ) +#define yyless(n) \ + do { \ + /* Undo effects of setting up yytext. */ \ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg); \ + *yy_cp = yyg->yy_hold_char; \ + YY_RESTORE_YY_MORE_OFFSET \ + yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \ + YY_DO_BEFORE_ACTION; /* set up yytext again */ \ + } while (0) + +#define unput(c) yyunput(c, yyg->yytext_ptr, yyscanner) #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE -struct yy_buffer_state - { - FILE *yy_input_file; - - char *yy_ch_buf; /* input buffer */ - char *yy_buf_pos; /* current position in input buffer */ - - /* Size of input buffer in bytes, not including room for EOB - * characters. - */ - yy_size_t yy_buf_size; - - /* Number of characters read into yy_ch_buf, not including EOB - * characters. - */ - yy_size_t yy_n_chars; - - /* Whether we "own" the buffer - i.e., we know we created it, - * and can realloc() it to grow it, and should free() it to - * delete it. - */ - int yy_is_our_buffer; - - /* Whether this is an "interactive" input source; if so, and - * if we're using stdio for input, then we want to use getc() - * instead of fread(), to make sure we stop fetching input after - * each newline. - */ - int yy_is_interactive; - - /* Whether we're considered to be at the beginning of a line. - * If so, '^' rules will be active on the next match, otherwise - * not. - */ - int yy_at_bol; - - int yy_bs_lineno; /**< The line count. */ - int yy_bs_column; /**< The column count. */ - - /* Whether to try to fill the input buffer when we reach the - * end of it. - */ - int yy_fill_buffer; - - int yy_buffer_status; +struct yy_buffer_state { + FILE *yy_input_file; + + char *yy_ch_buf; /* input buffer */ + char *yy_buf_pos; /* current position in input buffer */ + + /* Size of input buffer in bytes, not including room for EOB + * characters. + */ + yy_size_t yy_buf_size; + + /* Number of characters read into yy_ch_buf, not including EOB + * characters. + */ + yy_size_t yy_n_chars; + + /* Whether we "own" the buffer - i.e., we know we created it, + * and can realloc() it to grow it, and should free() it to + * delete it. + */ + int yy_is_our_buffer; + + /* Whether this is an "interactive" input source; if so, and + * if we're using stdio for input, then we want to use getc() + * instead of fread(), to make sure we stop fetching input after + * each newline. + */ + int yy_is_interactive; + + /* Whether we're considered to be at the beginning of a line. + * If so, '^' rules will be active on the next match, otherwise + * not. + */ + int yy_at_bol; + + int yy_bs_lineno; /**< The line count. */ + int yy_bs_column; /**< The column count. */ + + /* Whether to try to fill the input buffer when we reach the + * end of it. + */ + int yy_fill_buffer; + + int yy_buffer_status; #define YY_BUFFER_NEW 0 #define YY_BUFFER_NORMAL 1 - /* When an EOF's been seen but there's still some text to process - * then we mark the buffer as YY_EOF_PENDING, to indicate that we - * shouldn't try reading from the input source any more. We might - * still have a bunch of tokens to match, though, because of - * possible backing-up. - * - * When we actually see the EOF, we change the status to "new" - * (via yyrestart()), so that the user can continue scanning by - * just pointing yyin at a new input file. - */ + /* When an EOF's been seen but there's still some text to process + * then we mark the buffer as YY_EOF_PENDING, to indicate that we + * shouldn't try reading from the input source any more. We might + * still have a bunch of tokens to match, though, because of + * possible backing-up. + * + * When we actually see the EOF, we change the status to "new" + * (via yyrestart()), so that the user can continue scanning by + * just pointing yyin at a new input file. + */ #define YY_BUFFER_EOF_PENDING 2 - - }; +}; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ /* We provide macros for accessing buffer states in case in the @@ -268,58 +264,54 @@ struct yy_buffer_state * * Returns the top of the stack, or NULL. */ -#define YY_CURRENT_BUFFER ( yyg->yy_buffer_stack \ - ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] \ - : NULL) +#define YY_CURRENT_BUFFER (yyg->yy_buffer_stack ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] : NULL) /* Same as previous macro, but useful when we know that the buffer stack is not * NULL or when we need an lvalue. For internal use only. */ #define YY_CURRENT_BUFFER_LVALUE yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] -void yyrestart (FILE *input_file ,yyscan_t yyscanner ); -void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); -YY_BUFFER_STATE yy_create_buffer (FILE *file,int size ,yyscan_t yyscanner ); -void yy_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); -void yy_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); -void yypush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); -void yypop_buffer_state (yyscan_t yyscanner ); +void yyrestart(FILE *input_file, yyscan_t yyscanner); +void yy_switch_to_buffer(YY_BUFFER_STATE new_buffer, yyscan_t yyscanner); +YY_BUFFER_STATE yy_create_buffer(FILE *file, int size, yyscan_t yyscanner); +void yy_delete_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner); +void yy_flush_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner); +void yypush_buffer_state(YY_BUFFER_STATE new_buffer, yyscan_t yyscanner); +void yypop_buffer_state(yyscan_t yyscanner); -static void yyensure_buffer_stack (yyscan_t yyscanner ); -static void yy_load_buffer_state (yyscan_t yyscanner ); -static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file ,yyscan_t yyscanner ); +static void yyensure_buffer_stack(yyscan_t yyscanner); +static void yy_load_buffer_state(yyscan_t yyscanner); +static void yy_init_buffer(YY_BUFFER_STATE b, FILE *file, yyscan_t yyscanner); -#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER ,yyscanner) +#define YY_FLUSH_BUFFER yy_flush_buffer(YY_CURRENT_BUFFER, yyscanner) -YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner ); -YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); -YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,yy_size_t len ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_scan_buffer(char *base, yy_size_t size, yyscan_t yyscanner); +YY_BUFFER_STATE yy_scan_string(yyconst char *yy_str, yyscan_t yyscanner); +YY_BUFFER_STATE yy_scan_bytes(yyconst char *bytes, yy_size_t len, yyscan_t yyscanner); -void *yyalloc (yy_size_t ,yyscan_t yyscanner ); -void *yyrealloc (void *,yy_size_t ,yyscan_t yyscanner ); -void yyfree (void * ,yyscan_t yyscanner ); +void *yyalloc(yy_size_t, yyscan_t yyscanner); +void *yyrealloc(void *, yy_size_t, yyscan_t yyscanner); +void yyfree(void *, yyscan_t yyscanner); #define yy_new_buffer yy_create_buffer -#define yy_set_interactive(is_interactive) \ - { \ - if ( ! YY_CURRENT_BUFFER ){ \ - yyensure_buffer_stack (yyscanner); \ - YY_CURRENT_BUFFER_LVALUE = \ - yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ - } \ - YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ - } - -#define yy_set_bol(at_bol) \ - { \ - if ( ! YY_CURRENT_BUFFER ){\ - yyensure_buffer_stack (yyscanner); \ - YY_CURRENT_BUFFER_LVALUE = \ - yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); \ - } \ - YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ - } +#define yy_set_interactive(is_interactive) \ + { \ + if (!YY_CURRENT_BUFFER) { \ + yyensure_buffer_stack(yyscanner); \ + YY_CURRENT_BUFFER_LVALUE = yy_create_buffer(yyin, YY_BUF_SIZE, yyscanner); \ + } \ + YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \ + } + +#define yy_set_bol(at_bol) \ + { \ + if (!YY_CURRENT_BUFFER) { \ + yyensure_buffer_stack(yyscanner); \ + YY_CURRENT_BUFFER_LVALUE = yy_create_buffer(yyin, YY_BUF_SIZE, yyscanner); \ + } \ + YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \ + } #define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol) @@ -334,30 +326,29 @@ typedef int yy_state_type; #define yytext_ptr yytext_r -static yy_state_type yy_get_previous_state (yyscan_t yyscanner ); -static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner); -static int yy_get_next_buffer (yyscan_t yyscanner ); -static void yy_fatal_error (yyconst char msg[] ,yyscan_t yyscanner ); +static yy_state_type yy_get_previous_state(yyscan_t yyscanner); +static yy_state_type yy_try_NUL_trans(yy_state_type current_state, yyscan_t yyscanner); +static int yy_get_next_buffer(yyscan_t yyscanner); +static void yy_fatal_error(yyconst char msg[], yyscan_t yyscanner); /* Done after the current pattern has been matched and before the * corresponding action - sets up yytext. */ -#define YY_DO_BEFORE_ACTION \ - yyg->yytext_ptr = yy_bp; \ - yyleng = (yy_size_t) (yy_cp - yy_bp); \ - yyg->yy_hold_char = *yy_cp; \ - *yy_cp = '\0'; \ - yyg->yy_c_buf_p = yy_cp; +#define YY_DO_BEFORE_ACTION \ + yyg->yytext_ptr = yy_bp; \ + yyleng = (yy_size_t)(yy_cp - yy_bp); \ + yyg->yy_hold_char = *yy_cp; \ + *yy_cp = '\0'; \ + yyg->yy_c_buf_p = yy_cp; #define YY_NUM_RULES 50 #define YY_END_OF_BUFFER 51 /* This struct is not used in this scanner, but its presence is necessary. */ -struct yy_trans_info - { - flex_int32_t yy_verify; - flex_int32_t yy_nxt; - }; +struct yy_trans_info { + flex_int32_t yy_verify; + flex_int32_t yy_nxt; +}; static yyconst flex_int16_t yy_accept[151] = { 0, 0, 0, 0, 0, 51, 49, 1, 2, 49, 39, @@ -550,8 +541,8 @@ static yyconst flex_int16_t yy_chk[325] = #define YY_RESTORE_YY_MORE_OFFSET #line 1 "lex_sql.l" #line 2 "lex_sql.l" -#include -#include +#include +#include struct ParserContext; @@ -560,12 +551,14 @@ extern int atoi(); extern double atof(); #if YYDEBUG > 0 -#define debug_printf printf +#define debug_printf printf #else #define debug_printf(...) -#endif // YYDEBUG +#endif // YYDEBUG -#define RETURN_TOKEN(token) debug_printf("%s\n",#token);return token +#define RETURN_TOKEN(token) \ + debug_printf("%s\n", #token); \ + return token /* Prevent the need for linking with -lfl */ #line 572 "lex.yy.c" @@ -586,83 +579,82 @@ extern double atof(); #endif /* Holds the entire state of the reentrant scanner. */ -struct yyguts_t - { - - /* User-defined. Not touched by flex. */ - YY_EXTRA_TYPE yyextra_r; - - /* The rest are the same as the globals declared in the non-reentrant scanner. */ - FILE *yyin_r, *yyout_r; - size_t yy_buffer_stack_top; /**< index of top of stack. */ - size_t yy_buffer_stack_max; /**< capacity of stack. */ - YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */ - char yy_hold_char; - yy_size_t yy_n_chars; - yy_size_t yyleng_r; - char *yy_c_buf_p; - int yy_init; - int yy_start; - int yy_did_buffer_switch_on_eof; - int yy_start_stack_ptr; - int yy_start_stack_depth; - int *yy_start_stack; - yy_state_type yy_last_accepting_state; - char* yy_last_accepting_cpos; - - int yylineno_r; - int yy_flex_debug_r; - - char *yytext_r; - int yy_more_flag; - int yy_more_len; - - YYSTYPE * yylval_r; - - }; /* end struct yyguts_t */ - -static int yy_init_globals (yyscan_t yyscanner ); - - /* This must go here because YYSTYPE and YYLTYPE are included - * from bison output in section 1.*/ - # define yylval yyg->yylval_r - -int yylex_init (yyscan_t* scanner); - -int yylex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner); +struct yyguts_t { + + /* User-defined. Not touched by flex. */ + YY_EXTRA_TYPE yyextra_r; + + /* The rest are the same as the globals declared in the non-reentrant scanner. */ + FILE *yyin_r, *yyout_r; + size_t yy_buffer_stack_top; /**< index of top of stack. */ + size_t yy_buffer_stack_max; /**< capacity of stack. */ + YY_BUFFER_STATE *yy_buffer_stack; /**< Stack as an array. */ + char yy_hold_char; + yy_size_t yy_n_chars; + yy_size_t yyleng_r; + char *yy_c_buf_p; + int yy_init; + int yy_start; + int yy_did_buffer_switch_on_eof; + int yy_start_stack_ptr; + int yy_start_stack_depth; + int *yy_start_stack; + yy_state_type yy_last_accepting_state; + char *yy_last_accepting_cpos; + + int yylineno_r; + int yy_flex_debug_r; + + char *yytext_r; + int yy_more_flag; + int yy_more_len; + + YYSTYPE *yylval_r; + +}; /* end struct yyguts_t */ + +static int yy_init_globals(yyscan_t yyscanner); + +/* This must go here because YYSTYPE and YYLTYPE are included + * from bison output in section 1.*/ +#define yylval yyg->yylval_r + +int yylex_init(yyscan_t *scanner); + +int yylex_init_extra(YY_EXTRA_TYPE user_defined, yyscan_t *scanner); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ -int yylex_destroy (yyscan_t yyscanner ); +int yylex_destroy(yyscan_t yyscanner); -int yyget_debug (yyscan_t yyscanner ); +int yyget_debug(yyscan_t yyscanner); -void yyset_debug (int debug_flag ,yyscan_t yyscanner ); +void yyset_debug(int debug_flag, yyscan_t yyscanner); -YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner ); +YY_EXTRA_TYPE yyget_extra(yyscan_t yyscanner); -void yyset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner ); +void yyset_extra(YY_EXTRA_TYPE user_defined, yyscan_t yyscanner); -FILE *yyget_in (yyscan_t yyscanner ); +FILE *yyget_in(yyscan_t yyscanner); -void yyset_in (FILE * in_str ,yyscan_t yyscanner ); +void yyset_in(FILE *in_str, yyscan_t yyscanner); -FILE *yyget_out (yyscan_t yyscanner ); +FILE *yyget_out(yyscan_t yyscanner); -void yyset_out (FILE * out_str ,yyscan_t yyscanner ); +void yyset_out(FILE *out_str, yyscan_t yyscanner); -yy_size_t yyget_leng (yyscan_t yyscanner ); +yy_size_t yyget_leng(yyscan_t yyscanner); -char *yyget_text (yyscan_t yyscanner ); +char *yyget_text(yyscan_t yyscanner); -int yyget_lineno (yyscan_t yyscanner ); +int yyget_lineno(yyscan_t yyscanner); -void yyset_lineno (int line_number ,yyscan_t yyscanner ); +void yyset_lineno(int line_number, yyscan_t yyscanner); -YYSTYPE * yyget_lval (yyscan_t yyscanner ); +YYSTYPE *yyget_lval(yyscan_t yyscanner); -void yyset_lval (YYSTYPE * yylval_param ,yyscan_t yyscanner ); +void yyset_lval(YYSTYPE *yylval_param, yyscan_t yyscanner); /* Macros after this point can all be overridden by user definitions in * section 1. @@ -670,28 +662,28 @@ void yyset_lval (YYSTYPE * yylval_param ,yyscan_t yyscanner ); #ifndef YY_SKIP_YYWRAP #ifdef __cplusplus -extern "C" int yywrap (yyscan_t yyscanner ); +extern "C" int yywrap(yyscan_t yyscanner); #else -extern int yywrap (yyscan_t yyscanner ); +extern int yywrap(yyscan_t yyscanner); #endif #endif - static void yyunput (int c,char *buf_ptr ,yyscan_t yyscanner); - +static void yyunput(int c, char *buf_ptr, yyscan_t yyscanner); + #ifndef yytext_ptr -static void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner); +static void yy_flex_strncpy(char *, yyconst char *, int, yyscan_t yyscanner); #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner); +static int yy_flex_strlen(yyconst char *, yyscan_t yyscanner); #endif #ifndef YY_NO_INPUT #ifdef __cplusplus -static int yyinput (yyscan_t yyscanner ); +static int yyinput(yyscan_t yyscanner); #else -static int input (yyscan_t yyscanner ); +static int input(yyscan_t yyscanner); #endif #endif @@ -706,42 +698,35 @@ static int input (yyscan_t yyscanner ); /* This used to be an fputs(), but since the string might contain NUL's, * we now use fwrite(). */ -#define ECHO fwrite( yytext, yyleng, 1, yyout ) +#define ECHO fwrite(yytext, yyleng, 1, yyout) #endif /* Gets input and stuffs it into "buf". number of characters read, or YY_NULL, * is returned in "result". */ #ifndef YY_INPUT -#define YY_INPUT(buf,result,max_size) \ - if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \ - { \ - int c = '*'; \ - yy_size_t n; \ - for ( n = 0; n < max_size && \ - (c = getc( yyin )) != EOF && c != '\n'; ++n ) \ - buf[n] = (char) c; \ - if ( c == '\n' ) \ - buf[n++] = (char) c; \ - if ( c == EOF && ferror( yyin ) ) \ - YY_FATAL_ERROR( "input in flex scanner failed" ); \ - result = n; \ - } \ - else \ - { \ - errno=0; \ - while ( (result = fread(buf, 1, max_size, yyin))==0 && ferror(yyin)) \ - { \ - if( errno != EINTR) \ - { \ - YY_FATAL_ERROR( "input in flex scanner failed" ); \ - break; \ - } \ - errno=0; \ - clearerr(yyin); \ - } \ - }\ -\ +#define YY_INPUT(buf, result, max_size) \ + if (YY_CURRENT_BUFFER_LVALUE->yy_is_interactive) { \ + int c = '*'; \ + yy_size_t n; \ + for (n = 0; n < max_size && (c = getc(yyin)) != EOF && c != '\n'; ++n) \ + buf[n] = (char)c; \ + if (c == '\n') \ + buf[n++] = (char)c; \ + if (c == EOF && ferror(yyin)) \ + YY_FATAL_ERROR("input in flex scanner failed"); \ + result = n; \ + } else { \ + errno = 0; \ + while ((result = fread(buf, 1, max_size, yyin)) == 0 && ferror(yyin)) { \ + if (errno != EINTR) { \ + YY_FATAL_ERROR("input in flex scanner failed"); \ + break; \ + } \ + errno = 0; \ + clearerr(yyin); \ + } \ + } #endif @@ -760,7 +745,7 @@ static int input (yyscan_t yyscanner ); /* Report a fatal error. */ #ifndef YY_FATAL_ERROR -#define YY_FATAL_ERROR(msg) yy_fatal_error( msg , yyscanner) +#define YY_FATAL_ERROR(msg) yy_fatal_error(msg, yyscanner) #endif /* end tables serialization structures and prototypes */ @@ -771,11 +756,9 @@ static int input (yyscan_t yyscanner ); #ifndef YY_DECL #define YY_DECL_IS_OURS 1 -extern int yylex \ - (YYSTYPE * yylval_param ,yyscan_t yyscanner); +extern int yylex(YYSTYPE *yylval_param, yyscan_t yyscanner); -#define YY_DECL int yylex \ - (YYSTYPE * yylval_param , yyscan_t yyscanner) +#define YY_DECL int yylex(YYSTYPE *yylval_param, yyscan_t yyscanner) #endif /* !YY_DECL */ /* Code executed at the beginning of each rule, after yytext and yyleng @@ -790,489 +773,473 @@ extern int yylex \ #define YY_BREAK break; #endif -#define YY_RULE_SETUP \ - YY_USER_ACTION +#define YY_RULE_SETUP YY_USER_ACTION /** The main scanner function which does all the work. */ YY_DECL { - register yy_state_type yy_current_state; - register char *yy_cp, *yy_bp; - register int yy_act; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + register yy_state_type yy_current_state; + register char *yy_cp, *yy_bp; + register int yy_act; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; #line 33 "lex_sql.l" - #line 809 "lex.yy.c" - yylval = yylval_param; + yylval = yylval_param; - if ( !yyg->yy_init ) - { - yyg->yy_init = 1; + if (!yyg->yy_init) { + yyg->yy_init = 1; #ifdef YY_USER_INIT - YY_USER_INIT; + YY_USER_INIT; #endif - if ( ! yyg->yy_start ) - yyg->yy_start = 1; /* first start state */ - - if ( ! yyin ) - yyin = stdin; - - if ( ! yyout ) - yyout = stdout; - - if ( ! YY_CURRENT_BUFFER ) { - yyensure_buffer_stack (yyscanner); - YY_CURRENT_BUFFER_LVALUE = - yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); - } - - yy_load_buffer_state(yyscanner ); - } - - while ( 1 ) /* loops until end-of-file is reached */ - { - yy_cp = yyg->yy_c_buf_p; - - /* Support of yytext. */ - *yy_cp = yyg->yy_hold_char; - - /* yy_bp points to the position in yy_ch_buf of the start of - * the current run. - */ - yy_bp = yy_cp; - - yy_current_state = yyg->yy_start; -yy_match: - do - { - register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; - if ( yy_accept[yy_current_state] ) - { - yyg->yy_last_accepting_state = yy_current_state; - yyg->yy_last_accepting_cpos = yy_cp; - } - while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) - { - yy_current_state = (int) yy_def[yy_current_state]; - if ( yy_current_state >= 151 ) - yy_c = yy_meta[(unsigned int) yy_c]; - } - yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; - ++yy_cp; - } - while ( yy_base[yy_current_state] != 283 ); - -yy_find_action: - yy_act = yy_accept[yy_current_state]; - if ( yy_act == 0 ) - { /* have to back up */ - yy_cp = yyg->yy_last_accepting_cpos; - yy_current_state = yyg->yy_last_accepting_state; - yy_act = yy_accept[yy_current_state]; - } - - YY_DO_BEFORE_ACTION; - -do_action: /* This label is used only to access EOF actions. */ - - switch ( yy_act ) - { /* beginning of action switch */ - case 0: /* must back up */ - /* undo the effects of YY_DO_BEFORE_ACTION */ - *yy_cp = yyg->yy_hold_char; - yy_cp = yyg->yy_last_accepting_cpos; - yy_current_state = yyg->yy_last_accepting_state; - goto yy_find_action; - -case 1: -YY_RULE_SETUP + if (!yyg->yy_start) + yyg->yy_start = 1; /* first start state */ + + if (!yyin) + yyin = stdin; + + if (!yyout) + yyout = stdout; + + if (!YY_CURRENT_BUFFER) { + yyensure_buffer_stack(yyscanner); + YY_CURRENT_BUFFER_LVALUE = yy_create_buffer(yyin, YY_BUF_SIZE, yyscanner); + } + + yy_load_buffer_state(yyscanner); + } + + while (1) /* loops until end-of-file is reached */ + { + yy_cp = yyg->yy_c_buf_p; + + /* Support of yytext. */ + *yy_cp = yyg->yy_hold_char; + + /* yy_bp points to the position in yy_ch_buf of the start of + * the current run. + */ + yy_bp = yy_cp; + + yy_current_state = yyg->yy_start; + yy_match: + do { + register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; + if (yy_accept[yy_current_state]) { + yyg->yy_last_accepting_state = yy_current_state; + yyg->yy_last_accepting_cpos = yy_cp; + } + while (yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state) { + yy_current_state = (int)yy_def[yy_current_state]; + if (yy_current_state >= 151) + yy_c = yy_meta[(unsigned int)yy_c]; + } + yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int)yy_c]; + ++yy_cp; + } while (yy_base[yy_current_state] != 283); + + yy_find_action: + yy_act = yy_accept[yy_current_state]; + if (yy_act == 0) { /* have to back up */ + yy_cp = yyg->yy_last_accepting_cpos; + yy_current_state = yyg->yy_last_accepting_state; + yy_act = yy_accept[yy_current_state]; + } + + YY_DO_BEFORE_ACTION; + + do_action: /* This label is used only to access EOF actions. */ + + switch (yy_act) { /* beginning of action switch */ + case 0: /* must back up */ + /* undo the effects of YY_DO_BEFORE_ACTION */ + *yy_cp = yyg->yy_hold_char; + yy_cp = yyg->yy_last_accepting_cpos; + yy_current_state = yyg->yy_last_accepting_state; + goto yy_find_action; + + case 1: + YY_RULE_SETUP #line 35 "lex_sql.l" -// ignore whitespace - YY_BREAK -case 2: -/* rule 2 can match eol */ -YY_RULE_SETUP + // ignore whitespace + YY_BREAK + case 2: + /* rule 2 can match eol */ + YY_RULE_SETUP #line 36 "lex_sql.l" -; - YY_BREAK -case 3: -YY_RULE_SETUP + ; + YY_BREAK + case 3: + YY_RULE_SETUP #line 38 "lex_sql.l" -yylval->number=atoi(yytext); RETURN_TOKEN(NUMBER); - YY_BREAK -case 4: -YY_RULE_SETUP + yylval->number = atoi(yytext); + RETURN_TOKEN(NUMBER); + YY_BREAK + case 4: + YY_RULE_SETUP #line 39 "lex_sql.l" -yylval->floats=(float)(atof(yytext)); RETURN_TOKEN(FLOAT); - YY_BREAK -case 5: -YY_RULE_SETUP + yylval->floats = (float)(atof(yytext)); + RETURN_TOKEN(FLOAT); + YY_BREAK + case 5: + YY_RULE_SETUP #line 41 "lex_sql.l" -RETURN_TOKEN(SEMICOLON); - YY_BREAK -case 6: -YY_RULE_SETUP + RETURN_TOKEN(SEMICOLON); + YY_BREAK + case 6: + YY_RULE_SETUP #line 42 "lex_sql.l" -RETURN_TOKEN(DOT); - YY_BREAK -case 7: -YY_RULE_SETUP + RETURN_TOKEN(DOT); + YY_BREAK + case 7: + YY_RULE_SETUP #line 43 "lex_sql.l" -RETURN_TOKEN(STAR); - YY_BREAK -case 8: -YY_RULE_SETUP + RETURN_TOKEN(STAR); + YY_BREAK + case 8: + YY_RULE_SETUP #line 44 "lex_sql.l" -RETURN_TOKEN(EXIT); - YY_BREAK -case 9: -YY_RULE_SETUP + RETURN_TOKEN(EXIT); + YY_BREAK + case 9: + YY_RULE_SETUP #line 45 "lex_sql.l" -RETURN_TOKEN(HELP); - YY_BREAK -case 10: -YY_RULE_SETUP + RETURN_TOKEN(HELP); + YY_BREAK + case 10: + YY_RULE_SETUP #line 46 "lex_sql.l" -RETURN_TOKEN(DESC); - YY_BREAK -case 11: -YY_RULE_SETUP + RETURN_TOKEN(DESC); + YY_BREAK + case 11: + YY_RULE_SETUP #line 47 "lex_sql.l" -RETURN_TOKEN(CREATE); - YY_BREAK -case 12: -YY_RULE_SETUP + RETURN_TOKEN(CREATE); + YY_BREAK + case 12: + YY_RULE_SETUP #line 48 "lex_sql.l" -RETURN_TOKEN(DROP); - YY_BREAK -case 13: -YY_RULE_SETUP + RETURN_TOKEN(DROP); + YY_BREAK + case 13: + YY_RULE_SETUP #line 49 "lex_sql.l" -RETURN_TOKEN(TABLE); - YY_BREAK -case 14: -YY_RULE_SETUP + RETURN_TOKEN(TABLE); + YY_BREAK + case 14: + YY_RULE_SETUP #line 50 "lex_sql.l" -RETURN_TOKEN(TABLES); - YY_BREAK -case 15: -YY_RULE_SETUP + RETURN_TOKEN(TABLES); + YY_BREAK + case 15: + YY_RULE_SETUP #line 51 "lex_sql.l" -RETURN_TOKEN(INDEX); - YY_BREAK -case 16: -YY_RULE_SETUP + RETURN_TOKEN(INDEX); + YY_BREAK + case 16: + YY_RULE_SETUP #line 52 "lex_sql.l" -RETURN_TOKEN(ON); - YY_BREAK -case 17: -YY_RULE_SETUP + RETURN_TOKEN(ON); + YY_BREAK + case 17: + YY_RULE_SETUP #line 53 "lex_sql.l" -RETURN_TOKEN(SHOW); - YY_BREAK -case 18: -YY_RULE_SETUP + RETURN_TOKEN(SHOW); + YY_BREAK + case 18: + YY_RULE_SETUP #line 54 "lex_sql.l" -RETURN_TOKEN(SYNC); - YY_BREAK -case 19: -YY_RULE_SETUP + RETURN_TOKEN(SYNC); + YY_BREAK + case 19: + YY_RULE_SETUP #line 55 "lex_sql.l" -RETURN_TOKEN(SELECT); - YY_BREAK -case 20: -YY_RULE_SETUP + RETURN_TOKEN(SELECT); + YY_BREAK + case 20: + YY_RULE_SETUP #line 56 "lex_sql.l" -RETURN_TOKEN(FROM); - YY_BREAK -case 21: -YY_RULE_SETUP + RETURN_TOKEN(FROM); + YY_BREAK + case 21: + YY_RULE_SETUP #line 57 "lex_sql.l" -RETURN_TOKEN(WHERE); - YY_BREAK -case 22: -YY_RULE_SETUP + RETURN_TOKEN(WHERE); + YY_BREAK + case 22: + YY_RULE_SETUP #line 58 "lex_sql.l" -RETURN_TOKEN(AND); - YY_BREAK -case 23: -YY_RULE_SETUP + RETURN_TOKEN(AND); + YY_BREAK + case 23: + YY_RULE_SETUP #line 59 "lex_sql.l" -RETURN_TOKEN(INSERT); - YY_BREAK -case 24: -YY_RULE_SETUP + RETURN_TOKEN(INSERT); + YY_BREAK + case 24: + YY_RULE_SETUP #line 60 "lex_sql.l" -RETURN_TOKEN(INTO); - YY_BREAK -case 25: -YY_RULE_SETUP + RETURN_TOKEN(INTO); + YY_BREAK + case 25: + YY_RULE_SETUP #line 61 "lex_sql.l" -RETURN_TOKEN(VALUES); - YY_BREAK -case 26: -YY_RULE_SETUP + RETURN_TOKEN(VALUES); + YY_BREAK + case 26: + YY_RULE_SETUP #line 62 "lex_sql.l" -RETURN_TOKEN(DELETE); - YY_BREAK -case 27: -YY_RULE_SETUP + RETURN_TOKEN(DELETE); + YY_BREAK + case 27: + YY_RULE_SETUP #line 63 "lex_sql.l" -RETURN_TOKEN(UPDATE); - YY_BREAK -case 28: -YY_RULE_SETUP + RETURN_TOKEN(UPDATE); + YY_BREAK + case 28: + YY_RULE_SETUP #line 64 "lex_sql.l" -RETURN_TOKEN(SET); - YY_BREAK -case 29: -YY_RULE_SETUP + RETURN_TOKEN(SET); + YY_BREAK + case 29: + YY_RULE_SETUP #line 65 "lex_sql.l" -RETURN_TOKEN(TRX_BEGIN); - YY_BREAK -case 30: -YY_RULE_SETUP + RETURN_TOKEN(TRX_BEGIN); + YY_BREAK + case 30: + YY_RULE_SETUP #line 66 "lex_sql.l" -RETURN_TOKEN(TRX_COMMIT); - YY_BREAK -case 31: -YY_RULE_SETUP + RETURN_TOKEN(TRX_COMMIT); + YY_BREAK + case 31: + YY_RULE_SETUP #line 67 "lex_sql.l" -RETURN_TOKEN(TRX_ROLLBACK); - YY_BREAK -case 32: -YY_RULE_SETUP + RETURN_TOKEN(TRX_ROLLBACK); + YY_BREAK + case 32: + YY_RULE_SETUP #line 68 "lex_sql.l" -RETURN_TOKEN(INT_T); - YY_BREAK -case 33: -YY_RULE_SETUP + RETURN_TOKEN(INT_T); + YY_BREAK + case 33: + YY_RULE_SETUP #line 69 "lex_sql.l" -RETURN_TOKEN(STRING_T); - YY_BREAK -case 34: -YY_RULE_SETUP + RETURN_TOKEN(STRING_T); + YY_BREAK + case 34: + YY_RULE_SETUP #line 70 "lex_sql.l" -RETURN_TOKEN(FLOAT_T); - YY_BREAK -case 35: -YY_RULE_SETUP + RETURN_TOKEN(FLOAT_T); + YY_BREAK + case 35: + YY_RULE_SETUP #line 71 "lex_sql.l" -RETURN_TOKEN(LOAD); - YY_BREAK -case 36: -YY_RULE_SETUP + RETURN_TOKEN(LOAD); + YY_BREAK + case 36: + YY_RULE_SETUP #line 72 "lex_sql.l" -RETURN_TOKEN(DATA); - YY_BREAK -case 37: -YY_RULE_SETUP + RETURN_TOKEN(DATA); + YY_BREAK + case 37: + YY_RULE_SETUP #line 73 "lex_sql.l" -RETURN_TOKEN(INFILE); - YY_BREAK -case 38: -YY_RULE_SETUP + RETURN_TOKEN(INFILE); + YY_BREAK + case 38: + YY_RULE_SETUP #line 74 "lex_sql.l" -yylval->string=strdup(yytext); RETURN_TOKEN(ID); - YY_BREAK -case 39: -YY_RULE_SETUP + yylval->string = strdup(yytext); + RETURN_TOKEN(ID); + YY_BREAK + case 39: + YY_RULE_SETUP #line 75 "lex_sql.l" -RETURN_TOKEN(LBRACE); - YY_BREAK -case 40: -YY_RULE_SETUP + RETURN_TOKEN(LBRACE); + YY_BREAK + case 40: + YY_RULE_SETUP #line 76 "lex_sql.l" -RETURN_TOKEN(RBRACE); - YY_BREAK -case 41: -YY_RULE_SETUP + RETURN_TOKEN(RBRACE); + YY_BREAK + case 41: + YY_RULE_SETUP #line 78 "lex_sql.l" -RETURN_TOKEN(COMMA); - YY_BREAK -case 42: -YY_RULE_SETUP + RETURN_TOKEN(COMMA); + YY_BREAK + case 42: + YY_RULE_SETUP #line 79 "lex_sql.l" -RETURN_TOKEN(EQ); - YY_BREAK -case 43: -YY_RULE_SETUP + RETURN_TOKEN(EQ); + YY_BREAK + case 43: + YY_RULE_SETUP #line 80 "lex_sql.l" -RETURN_TOKEN(LE); - YY_BREAK -case 44: -YY_RULE_SETUP + RETURN_TOKEN(LE); + YY_BREAK + case 44: + YY_RULE_SETUP #line 81 "lex_sql.l" -RETURN_TOKEN(NE); - YY_BREAK -case 45: -YY_RULE_SETUP + RETURN_TOKEN(NE); + YY_BREAK + case 45: + YY_RULE_SETUP #line 82 "lex_sql.l" -RETURN_TOKEN(LT); - YY_BREAK -case 46: -YY_RULE_SETUP + RETURN_TOKEN(LT); + YY_BREAK + case 46: + YY_RULE_SETUP #line 83 "lex_sql.l" -RETURN_TOKEN(GE); - YY_BREAK -case 47: -YY_RULE_SETUP + RETURN_TOKEN(GE); + YY_BREAK + case 47: + YY_RULE_SETUP #line 84 "lex_sql.l" -RETURN_TOKEN(GT); - YY_BREAK -case 48: -YY_RULE_SETUP + RETURN_TOKEN(GT); + YY_BREAK + case 48: + YY_RULE_SETUP #line 85 "lex_sql.l" -yylval->string=strdup(yytext); RETURN_TOKEN(SSS); - YY_BREAK -case 49: -YY_RULE_SETUP + yylval->string = strdup(yytext); + RETURN_TOKEN(SSS); + YY_BREAK + case 49: + YY_RULE_SETUP #line 87 "lex_sql.l" -printf("Unknown character [%c]\n",yytext[0]); return yytext[0]; - YY_BREAK -case 50: -YY_RULE_SETUP + printf("Unknown character [%c]\n", yytext[0]); + return yytext[0]; + YY_BREAK + case 50: + YY_RULE_SETUP #line 88 "lex_sql.l" -ECHO; - YY_BREAK + ECHO; + YY_BREAK #line 1145 "lex.yy.c" -case YY_STATE_EOF(INITIAL): -case YY_STATE_EOF(STR): - yyterminate(); - - case YY_END_OF_BUFFER: - { - /* Amount of text matched not including the EOB char. */ - int yy_amount_of_matched_text = (int) (yy_cp - yyg->yytext_ptr) - 1; - - /* Undo the effects of YY_DO_BEFORE_ACTION. */ - *yy_cp = yyg->yy_hold_char; - YY_RESTORE_YY_MORE_OFFSET - - if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW ) - { - /* We're scanning a new file or input source. It's - * possible that this happened because the user - * just pointed yyin at a new source and called - * yylex(). If so, then we have to assure - * consistency between YY_CURRENT_BUFFER and our - * globals. Here is the right place to do so, because - * this is the first action (other than possibly a - * back-up) that will match for the new input source. - */ - yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; - YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin; - YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; - } - - /* Note that here we test for yy_c_buf_p "<=" to the position - * of the first EOB in the buffer, since yy_c_buf_p will - * already have been incremented past the NUL character - * (since all states make transitions on EOB to the - * end-of-buffer state). Contrast this with the test - * in input(). - */ - if ( yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] ) - { /* This was really a NUL. */ - yy_state_type yy_next_state; - - yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text; - - yy_current_state = yy_get_previous_state( yyscanner ); - - /* Okay, we're now positioned to make the NUL - * transition. We couldn't have - * yy_get_previous_state() go ahead and do it - * for us because it doesn't know how to deal - * with the possibility of jamming (and we don't - * want to build jamming into it because then it - * will run more slowly). - */ - - yy_next_state = yy_try_NUL_trans( yy_current_state , yyscanner); - - yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; - - if ( yy_next_state ) - { - /* Consume the NUL. */ - yy_cp = ++yyg->yy_c_buf_p; - yy_current_state = yy_next_state; - goto yy_match; - } - - else - { - yy_cp = yyg->yy_c_buf_p; - goto yy_find_action; - } - } - - else switch ( yy_get_next_buffer( yyscanner ) ) - { - case EOB_ACT_END_OF_FILE: - { - yyg->yy_did_buffer_switch_on_eof = 0; - - if ( yywrap(yyscanner ) ) - { - /* Note: because we've taken care in - * yy_get_next_buffer() to have set up - * yytext, we can now set up - * yy_c_buf_p so that if some total - * hoser (like flex itself) wants to - * call the scanner after we return the - * YY_NULL, it'll still work - another - * YY_NULL will get returned. - */ - yyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ; - - yy_act = YY_STATE_EOF(YY_START); - goto do_action; - } - - else - { - if ( ! yyg->yy_did_buffer_switch_on_eof ) - YY_NEW_FILE; - } - break; - } - - case EOB_ACT_CONTINUE_SCAN: - yyg->yy_c_buf_p = - yyg->yytext_ptr + yy_amount_of_matched_text; - - yy_current_state = yy_get_previous_state( yyscanner ); - - yy_cp = yyg->yy_c_buf_p; - yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; - goto yy_match; - - case EOB_ACT_LAST_MATCH: - yyg->yy_c_buf_p = - &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars]; - - yy_current_state = yy_get_previous_state( yyscanner ); - - yy_cp = yyg->yy_c_buf_p; - yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; - goto yy_find_action; - } - break; - } - - default: - YY_FATAL_ERROR( - "fatal flex scanner internal error--no action found" ); - } /* end of action switch */ - } /* end of scanning one token */ + case YY_STATE_EOF(INITIAL): + case YY_STATE_EOF(STR): + yyterminate(); + + case YY_END_OF_BUFFER: { + /* Amount of text matched not including the EOB char. */ + int yy_amount_of_matched_text = (int)(yy_cp - yyg->yytext_ptr) - 1; + + /* Undo the effects of YY_DO_BEFORE_ACTION. */ + *yy_cp = yyg->yy_hold_char; + YY_RESTORE_YY_MORE_OFFSET + + if (YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW) { + /* We're scanning a new file or input source. It's + * possible that this happened because the user + * just pointed yyin at a new source and called + * yylex(). If so, then we have to assure + * consistency between YY_CURRENT_BUFFER and our + * globals. Here is the right place to do so, because + * this is the first action (other than possibly a + * back-up) that will match for the new input source. + */ + yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; + YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin; + YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL; + } + + /* Note that here we test for yy_c_buf_p "<=" to the position + * of the first EOB in the buffer, since yy_c_buf_p will + * already have been incremented past the NUL character + * (since all states make transitions on EOB to the + * end-of-buffer state). Contrast this with the test + * in input(). + */ + if (yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars]) { /* This was really a NUL. */ + yy_state_type yy_next_state; + + yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text; + + yy_current_state = yy_get_previous_state(yyscanner); + + /* Okay, we're now positioned to make the NUL + * transition. We couldn't have + * yy_get_previous_state() go ahead and do it + * for us because it doesn't know how to deal + * with the possibility of jamming (and we don't + * want to build jamming into it because then it + * will run more slowly). + */ + + yy_next_state = yy_try_NUL_trans(yy_current_state, yyscanner); + + yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; + + if (yy_next_state) { + /* Consume the NUL. */ + yy_cp = ++yyg->yy_c_buf_p; + yy_current_state = yy_next_state; + goto yy_match; + } + + else { + yy_cp = yyg->yy_c_buf_p; + goto yy_find_action; + } + } + + else + switch (yy_get_next_buffer(yyscanner)) { + case EOB_ACT_END_OF_FILE: { + yyg->yy_did_buffer_switch_on_eof = 0; + + if (yywrap(yyscanner)) { + /* Note: because we've taken care in + * yy_get_next_buffer() to have set up + * yytext, we can now set up + * yy_c_buf_p so that if some total + * hoser (like flex itself) wants to + * call the scanner after we return the + * YY_NULL, it'll still work - another + * YY_NULL will get returned. + */ + yyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ; + + yy_act = YY_STATE_EOF(YY_START); + goto do_action; + } + + else { + if (!yyg->yy_did_buffer_switch_on_eof) + YY_NEW_FILE; + } + break; + } + + case EOB_ACT_CONTINUE_SCAN: + yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text; + + yy_current_state = yy_get_previous_state(yyscanner); + + yy_cp = yyg->yy_c_buf_p; + yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; + goto yy_match; + + case EOB_ACT_LAST_MATCH: + yyg->yy_c_buf_p = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars]; + + yy_current_state = yy_get_previous_state(yyscanner); + + yy_cp = yyg->yy_c_buf_p; + yy_bp = yyg->yytext_ptr + YY_MORE_ADJ; + goto yy_find_action; + } + break; + } + + default: + YY_FATAL_ERROR("fatal flex scanner internal error--no action found"); + } /* end of action switch */ + } /* end of scanning one token */ } /* end of yylex */ /* yy_get_next_buffer - try to read in a new buffer @@ -1282,167 +1249,147 @@ case YY_STATE_EOF(STR): * EOB_ACT_CONTINUE_SCAN - continue scanning from current position * EOB_ACT_END_OF_FILE - end of file */ -static int yy_get_next_buffer (yyscan_t yyscanner) +static int yy_get_next_buffer(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; - register char *source = yyg->yytext_ptr; - register int number_to_move, i; - int ret_val; - - if ( yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] ) - YY_FATAL_ERROR( - "fatal flex scanner internal error--end of buffer missed" ); - - if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 ) - { /* Don't try to fill the buffer, so this is an EOF. */ - if ( yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1 ) - { - /* We matched a single character, the EOB, so - * treat this as a final EOF. - */ - return EOB_ACT_END_OF_FILE; - } - - else - { - /* We matched some text prior to the EOB, first - * process it. - */ - return EOB_ACT_LAST_MATCH; - } - } - - /* Try to read more data. */ - - /* First move last chars to start of buffer. */ - number_to_move = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr) - 1; - - for ( i = 0; i < number_to_move; ++i ) - *(dest++) = *(source++); - - if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING ) - /* don't do the read, it's not guaranteed to return an EOF, - * just force an EOF - */ - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0; - - else - { - yy_size_t num_to_read = - YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; - - while ( num_to_read <= 0 ) - { /* Not enough room in the buffer - grow it. */ - - /* just a shorter name for the current buffer */ - YY_BUFFER_STATE b = YY_CURRENT_BUFFER; - - int yy_c_buf_p_offset = - (int) (yyg->yy_c_buf_p - b->yy_ch_buf); - - if ( b->yy_is_our_buffer ) - { - yy_size_t new_size = b->yy_buf_size * 2; - - if ( new_size <= 0 ) - b->yy_buf_size += b->yy_buf_size / 8; - else - b->yy_buf_size *= 2; - - b->yy_ch_buf = (char *) - /* Include room in for 2 EOB chars. */ - yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner ); - } - else - /* Can't grow it, we don't own it. */ - b->yy_ch_buf = 0; - - if ( ! b->yy_ch_buf ) - YY_FATAL_ERROR( - "fatal error - scanner input buffer overflow" ); - - yyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset]; - - num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - - number_to_move - 1; - - } - - if ( num_to_read > YY_READ_BUF_SIZE ) - num_to_read = YY_READ_BUF_SIZE; - - /* Read in more data. */ - YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), - yyg->yy_n_chars, num_to_read ); - - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; - } - - if ( yyg->yy_n_chars == 0 ) - { - if ( number_to_move == YY_MORE_ADJ ) - { - ret_val = EOB_ACT_END_OF_FILE; - yyrestart(yyin ,yyscanner); - } - - else - { - ret_val = EOB_ACT_LAST_MATCH; - YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = - YY_BUFFER_EOF_PENDING; - } - } - - else - ret_val = EOB_ACT_CONTINUE_SCAN; - - if ((yy_size_t) (yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { - /* Extend the array by 50%, plus the number we really need. */ - yy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1); - YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner ); - if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) - YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" ); - } - - yyg->yy_n_chars += number_to_move; - YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR; - YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR; - - yyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf; + register char *source = yyg->yytext_ptr; + register int number_to_move, i; + int ret_val; + + if (yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1]) + YY_FATAL_ERROR("fatal flex scanner internal error--end of buffer missed"); + + if (YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0) { /* Don't try to fill the buffer, so this is an EOF. */ + if (yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1) { + /* We matched a single character, the EOB, so + * treat this as a final EOF. + */ + return EOB_ACT_END_OF_FILE; + } - return ret_val; + else { + /* We matched some text prior to the EOB, first + * process it. + */ + return EOB_ACT_LAST_MATCH; + } + } + + /* Try to read more data. */ + + /* First move last chars to start of buffer. */ + number_to_move = (int)(yyg->yy_c_buf_p - yyg->yytext_ptr) - 1; + + for (i = 0; i < number_to_move; ++i) + *(dest++) = *(source++); + + if (YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING) + /* don't do the read, it's not guaranteed to return an EOF, + * just force an EOF + */ + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0; + + else { + yy_size_t num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; + + while (num_to_read <= 0) { /* Not enough room in the buffer - grow it. */ + + /* just a shorter name for the current buffer */ + YY_BUFFER_STATE b = YY_CURRENT_BUFFER; + + int yy_c_buf_p_offset = (int)(yyg->yy_c_buf_p - b->yy_ch_buf); + + if (b->yy_is_our_buffer) { + yy_size_t new_size = b->yy_buf_size * 2; + + if (new_size <= 0) + b->yy_buf_size += b->yy_buf_size / 8; + else + b->yy_buf_size *= 2; + + b->yy_ch_buf = (char *) + /* Include room in for 2 EOB chars. */ + yyrealloc((void *)b->yy_ch_buf, b->yy_buf_size + 2, yyscanner); + } else + /* Can't grow it, we don't own it. */ + b->yy_ch_buf = 0; + + if (!b->yy_ch_buf) + YY_FATAL_ERROR("fatal error - scanner input buffer overflow"); + + yyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset]; + + num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; + } + + if (num_to_read > YY_READ_BUF_SIZE) + num_to_read = YY_READ_BUF_SIZE; + + /* Read in more data. */ + YY_INPUT((&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), yyg->yy_n_chars, num_to_read); + + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; + } + + if (yyg->yy_n_chars == 0) { + if (number_to_move == YY_MORE_ADJ) { + ret_val = EOB_ACT_END_OF_FILE; + yyrestart(yyin, yyscanner); + } + + else { + ret_val = EOB_ACT_LAST_MATCH; + YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_EOF_PENDING; + } + } + + else + ret_val = EOB_ACT_CONTINUE_SCAN; + + if ((yy_size_t)(yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) { + /* Extend the array by 50%, plus the number we really need. */ + yy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1); + YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = + (char *)yyrealloc((void *)YY_CURRENT_BUFFER_LVALUE->yy_ch_buf, new_size, yyscanner); + if (!YY_CURRENT_BUFFER_LVALUE->yy_ch_buf) + YY_FATAL_ERROR("out of dynamic memory in yy_get_next_buffer()"); + } + + yyg->yy_n_chars += number_to_move; + YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR; + YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR; + + yyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0]; + + return ret_val; } /* yy_get_previous_state - get the state just before the EOB char was reached */ - static yy_state_type yy_get_previous_state (yyscan_t yyscanner) +static yy_state_type yy_get_previous_state(yyscan_t yyscanner) { - register yy_state_type yy_current_state; - register char *yy_cp; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - - yy_current_state = yyg->yy_start; - - for ( yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp ) - { - register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); - if ( yy_accept[yy_current_state] ) - { - yyg->yy_last_accepting_state = yy_current_state; - yyg->yy_last_accepting_cpos = yy_cp; - } - while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) - { - yy_current_state = (int) yy_def[yy_current_state]; - if ( yy_current_state >= 151 ) - yy_c = yy_meta[(unsigned int) yy_c]; - } - yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; - } - - return yy_current_state; + register yy_state_type yy_current_state; + register char *yy_cp; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + yy_current_state = yyg->yy_start; + + for (yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp) { + register YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1); + if (yy_accept[yy_current_state]) { + yyg->yy_last_accepting_state = yy_current_state; + yyg->yy_last_accepting_cpos = yy_cp; + } + while (yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state) { + yy_current_state = (int)yy_def[yy_current_state]; + if (yy_current_state >= 151) + yy_c = yy_meta[(unsigned int)yy_c]; + } + yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int)yy_c]; + } + + return yy_current_state; } /* yy_try_NUL_trans - try to make a transition on the NUL character @@ -1450,205 +1397,193 @@ static int yy_get_next_buffer (yyscan_t yyscanner) * synopsis * next_state = yy_try_NUL_trans( current_state ); */ - static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner) +static yy_state_type yy_try_NUL_trans(yy_state_type yy_current_state, yyscan_t yyscanner) { - register int yy_is_jam; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */ - register char *yy_cp = yyg->yy_c_buf_p; - - register YY_CHAR yy_c = 1; - if ( yy_accept[yy_current_state] ) - { - yyg->yy_last_accepting_state = yy_current_state; - yyg->yy_last_accepting_cpos = yy_cp; - } - while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) - { - yy_current_state = (int) yy_def[yy_current_state]; - if ( yy_current_state >= 151 ) - yy_c = yy_meta[(unsigned int) yy_c]; - } - yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; - yy_is_jam = (yy_current_state == 150); - - return yy_is_jam ? 0 : yy_current_state; + register int yy_is_jam; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; /* This var may be unused depending upon options. */ + register char *yy_cp = yyg->yy_c_buf_p; + + register YY_CHAR yy_c = 1; + if (yy_accept[yy_current_state]) { + yyg->yy_last_accepting_state = yy_current_state; + yyg->yy_last_accepting_cpos = yy_cp; + } + while (yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state) { + yy_current_state = (int)yy_def[yy_current_state]; + if (yy_current_state >= 151) + yy_c = yy_meta[(unsigned int)yy_c]; + } + yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int)yy_c]; + yy_is_jam = (yy_current_state == 150); + + return yy_is_jam ? 0 : yy_current_state; } - static void yyunput (int c, register char * yy_bp , yyscan_t yyscanner) +static void yyunput(int c, register char *yy_bp, yyscan_t yyscanner) { - register char *yy_cp; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + register char *yy_cp; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - yy_cp = yyg->yy_c_buf_p; + yy_cp = yyg->yy_c_buf_p; - /* undo effects of setting up yytext */ - *yy_cp = yyg->yy_hold_char; + /* undo effects of setting up yytext */ + *yy_cp = yyg->yy_hold_char; - if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) - { /* need to shift things up to make room */ - /* +2 for EOB chars. */ - register yy_size_t number_to_move = yyg->yy_n_chars + 2; - register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[ - YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2]; - register char *source = - &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]; + if (yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2) { /* need to shift things up to make room */ + /* +2 for EOB chars. */ + register yy_size_t number_to_move = yyg->yy_n_chars + 2; + register char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2]; + register char *source = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]; - while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf ) - *--dest = *--source; + while (source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf) + *--dest = *--source; - yy_cp += (int) (dest - source); - yy_bp += (int) (dest - source); - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = - yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_buf_size; + yy_cp += (int)(dest - source); + yy_bp += (int)(dest - source); + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_buf_size; - if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 ) - YY_FATAL_ERROR( "flex scanner push-back overflow" ); - } + if (yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2) + YY_FATAL_ERROR("flex scanner push-back overflow"); + } - *--yy_cp = (char) c; + *--yy_cp = (char)c; - yyg->yytext_ptr = yy_bp; - yyg->yy_hold_char = *yy_cp; - yyg->yy_c_buf_p = yy_cp; + yyg->yytext_ptr = yy_bp; + yyg->yy_hold_char = *yy_cp; + yyg->yy_c_buf_p = yy_cp; } #ifndef YY_NO_INPUT #ifdef __cplusplus - static int yyinput (yyscan_t yyscanner) +static int yyinput(yyscan_t yyscanner) #else - static int input (yyscan_t yyscanner) +static int input(yyscan_t yyscanner) #endif { - int c; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - - *yyg->yy_c_buf_p = yyg->yy_hold_char; - - if ( *yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR ) - { - /* yy_c_buf_p now points to the character we want to return. - * If this occurs *before* the EOB characters, then it's a - * valid NUL; if not, then we've hit the end of the buffer. - */ - if ( yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] ) - /* This was really a NUL. */ - *yyg->yy_c_buf_p = '\0'; - - else - { /* need more input */ - yy_size_t offset = yyg->yy_c_buf_p - yyg->yytext_ptr; - ++yyg->yy_c_buf_p; - - switch ( yy_get_next_buffer( yyscanner ) ) - { - case EOB_ACT_LAST_MATCH: - /* This happens because yy_g_n_b() - * sees that we've accumulated a - * token and flags that we need to - * try matching the token before - * proceeding. But for input(), - * there's no matching to consider. - * So convert the EOB_ACT_LAST_MATCH - * to EOB_ACT_END_OF_FILE. - */ - - /* Reset buffer status. */ - yyrestart(yyin ,yyscanner); - - /*FALLTHROUGH*/ - - case EOB_ACT_END_OF_FILE: - { - if ( yywrap(yyscanner ) ) - return 0; - - if ( ! yyg->yy_did_buffer_switch_on_eof ) - YY_NEW_FILE; + int c; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + *yyg->yy_c_buf_p = yyg->yy_hold_char; + + if (*yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR) { + /* yy_c_buf_p now points to the character we want to return. + * If this occurs *before* the EOB characters, then it's a + * valid NUL; if not, then we've hit the end of the buffer. + */ + if (yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars]) + /* This was really a NUL. */ + *yyg->yy_c_buf_p = '\0'; + + else { /* need more input */ + yy_size_t offset = yyg->yy_c_buf_p - yyg->yytext_ptr; + ++yyg->yy_c_buf_p; + + switch (yy_get_next_buffer(yyscanner)) { + case EOB_ACT_LAST_MATCH: + /* This happens because yy_g_n_b() + * sees that we've accumulated a + * token and flags that we need to + * try matching the token before + * proceeding. But for input(), + * there's no matching to consider. + * So convert the EOB_ACT_LAST_MATCH + * to EOB_ACT_END_OF_FILE. + */ + + /* Reset buffer status. */ + yyrestart(yyin, yyscanner); + + /*FALLTHROUGH*/ + + case EOB_ACT_END_OF_FILE: { + if (yywrap(yyscanner)) + return 0; + + if (!yyg->yy_did_buffer_switch_on_eof) + YY_NEW_FILE; #ifdef __cplusplus - return yyinput(yyscanner); + return yyinput(yyscanner); #else - return input(yyscanner); + return input(yyscanner); #endif - } + } - case EOB_ACT_CONTINUE_SCAN: - yyg->yy_c_buf_p = yyg->yytext_ptr + offset; - break; - } - } - } + case EOB_ACT_CONTINUE_SCAN: + yyg->yy_c_buf_p = yyg->yytext_ptr + offset; + break; + } + } + } - c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */ - *yyg->yy_c_buf_p = '\0'; /* preserve yytext */ - yyg->yy_hold_char = *++yyg->yy_c_buf_p; + c = *(unsigned char *)yyg->yy_c_buf_p; /* cast for 8-bit char's */ + *yyg->yy_c_buf_p = '\0'; /* preserve yytext */ + yyg->yy_hold_char = *++yyg->yy_c_buf_p; - return c; + return c; } -#endif /* ifndef YY_NO_INPUT */ +#endif /* ifndef YY_NO_INPUT */ /** Immediately switch to a different input stream. * @param input_file A readable stream. * @param yyscanner The scanner object. * @note This function does not reset the start condition to @c INITIAL . */ - void yyrestart (FILE * input_file , yyscan_t yyscanner) +void yyrestart(FILE *input_file, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - if ( ! YY_CURRENT_BUFFER ){ - yyensure_buffer_stack (yyscanner); - YY_CURRENT_BUFFER_LVALUE = - yy_create_buffer(yyin,YY_BUF_SIZE ,yyscanner); - } + if (!YY_CURRENT_BUFFER) { + yyensure_buffer_stack(yyscanner); + YY_CURRENT_BUFFER_LVALUE = yy_create_buffer(yyin, YY_BUF_SIZE, yyscanner); + } - yy_init_buffer(YY_CURRENT_BUFFER,input_file ,yyscanner); - yy_load_buffer_state(yyscanner ); + yy_init_buffer(YY_CURRENT_BUFFER, input_file, yyscanner); + yy_load_buffer_state(yyscanner); } /** Switch to a different input buffer. * @param new_buffer The new input buffer. * @param yyscanner The scanner object. */ - void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) +void yy_switch_to_buffer(YY_BUFFER_STATE new_buffer, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - - /* TODO. We should be able to replace this entire function body - * with - * yypop_buffer_state(); - * yypush_buffer_state(new_buffer); - */ - yyensure_buffer_stack (yyscanner); - if ( YY_CURRENT_BUFFER == new_buffer ) - return; - - if ( YY_CURRENT_BUFFER ) - { - /* Flush out information for old buffer. */ - *yyg->yy_c_buf_p = yyg->yy_hold_char; - YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p; - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; - } - - YY_CURRENT_BUFFER_LVALUE = new_buffer; - yy_load_buffer_state(yyscanner ); - - /* We don't actually know whether we did this switch during - * EOF (yywrap()) processing, but the only time this flag - * is looked at is after yywrap() is called, so it's safe - * to go ahead and always set it. - */ - yyg->yy_did_buffer_switch_on_eof = 1; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + /* TODO. We should be able to replace this entire function body + * with + * yypop_buffer_state(); + * yypush_buffer_state(new_buffer); + */ + yyensure_buffer_stack(yyscanner); + if (YY_CURRENT_BUFFER == new_buffer) + return; + + if (YY_CURRENT_BUFFER) { + /* Flush out information for old buffer. */ + *yyg->yy_c_buf_p = yyg->yy_hold_char; + YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p; + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; + } + + YY_CURRENT_BUFFER_LVALUE = new_buffer; + yy_load_buffer_state(yyscanner); + + /* We don't actually know whether we did this switch during + * EOF (yywrap()) processing, but the only time this flag + * is looked at is after yywrap() is called, so it's safe + * to go ahead and always set it. + */ + yyg->yy_did_buffer_switch_on_eof = 1; } -static void yy_load_buffer_state (yyscan_t yyscanner) +static void yy_load_buffer_state(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; - yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; - yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; - yyg->yy_hold_char = *yyg->yy_c_buf_p; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars; + yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos; + yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file; + yyg->yy_hold_char = *yyg->yy_c_buf_p; } /** Allocate and initialize an input buffer state. @@ -1657,109 +1592,109 @@ static void yy_load_buffer_state (yyscan_t yyscanner) * @param yyscanner The scanner object. * @return the allocated buffer state. */ - YY_BUFFER_STATE yy_create_buffer (FILE * file, int size , yyscan_t yyscanner) +YY_BUFFER_STATE yy_create_buffer(FILE *file, int size, yyscan_t yyscanner) { - YY_BUFFER_STATE b; - - b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) ,yyscanner ); - if ( ! b ) - YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); + YY_BUFFER_STATE b; - b->yy_buf_size = size; + b = (YY_BUFFER_STATE)yyalloc(sizeof(struct yy_buffer_state), yyscanner); + if (!b) + YY_FATAL_ERROR("out of dynamic memory in yy_create_buffer()"); - /* yy_ch_buf has to be 2 characters longer than the size given because - * we need to put in 2 end-of-buffer characters. - */ - b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 ,yyscanner ); - if ( ! b->yy_ch_buf ) - YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" ); + b->yy_buf_size = size; - b->yy_is_our_buffer = 1; + /* yy_ch_buf has to be 2 characters longer than the size given because + * we need to put in 2 end-of-buffer characters. + */ + b->yy_ch_buf = (char *)yyalloc(b->yy_buf_size + 2, yyscanner); + if (!b->yy_ch_buf) + YY_FATAL_ERROR("out of dynamic memory in yy_create_buffer()"); - yy_init_buffer(b,file ,yyscanner); + b->yy_is_our_buffer = 1; - return b; + yy_init_buffer(b, file, yyscanner); + + return b; } /** Destroy the buffer. * @param b a buffer created with yy_create_buffer() * @param yyscanner The scanner object. */ - void yy_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) +void yy_delete_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - if ( ! b ) - return; + if (!b) + return; - if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */ - YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0; + if (b == YY_CURRENT_BUFFER) /* Not sure if we should pop here. */ + YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE)0; - if ( b->yy_is_our_buffer ) - yyfree((void *) b->yy_ch_buf ,yyscanner ); + if (b->yy_is_our_buffer) + yyfree((void *)b->yy_ch_buf, yyscanner); - yyfree((void *) b ,yyscanner ); + yyfree((void *)b, yyscanner); } #ifndef __cplusplus -extern int isatty (int ); +extern int isatty(int); #endif /* __cplusplus */ - + /* Initializes or reinitializes a buffer. * This function is sometimes called more than once on the same buffer, * such as during a yyrestart() or at EOF. */ - static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner) +static void yy_init_buffer(YY_BUFFER_STATE b, FILE *file, yyscan_t yyscanner) { - int oerrno = errno; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + int oerrno = errno; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - yy_flush_buffer(b ,yyscanner); + yy_flush_buffer(b, yyscanner); - b->yy_input_file = file; - b->yy_fill_buffer = 1; + b->yy_input_file = file; + b->yy_fill_buffer = 1; - /* If b is the current buffer, then yy_init_buffer was _probably_ - * called from yyrestart() or through yy_get_next_buffer. - * In that case, we don't want to reset the lineno or column. - */ - if (b != YY_CURRENT_BUFFER){ - b->yy_bs_lineno = 1; - b->yy_bs_column = 0; - } + /* If b is the current buffer, then yy_init_buffer was _probably_ + * called from yyrestart() or through yy_get_next_buffer. + * In that case, we don't want to reset the lineno or column. + */ + if (b != YY_CURRENT_BUFFER) { + b->yy_bs_lineno = 1; + b->yy_bs_column = 0; + } - b->yy_is_interactive = file ? (isatty( fileno(file) ) > 0) : 0; - - errno = oerrno; + b->yy_is_interactive = file ? (isatty(fileno(file)) > 0) : 0; + + errno = oerrno; } /** Discard all buffered characters. On the next scan, YY_INPUT will be called. * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER. * @param yyscanner The scanner object. */ - void yy_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner) +void yy_flush_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - if ( ! b ) - return; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + if (!b) + return; - b->yy_n_chars = 0; + b->yy_n_chars = 0; - /* We always need two end-of-buffer characters. The first causes - * a transition to the end-of-buffer state. The second causes - * a jam in that state. - */ - b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; - b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; + /* We always need two end-of-buffer characters. The first causes + * a transition to the end-of-buffer state. The second causes + * a jam in that state. + */ + b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR; + b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR; - b->yy_buf_pos = &b->yy_ch_buf[0]; + b->yy_buf_pos = &b->yy_ch_buf[0]; - b->yy_at_bol = 1; - b->yy_buffer_status = YY_BUFFER_NEW; + b->yy_at_bol = 1; + b->yy_buffer_status = YY_BUFFER_NEW; - if ( b == YY_CURRENT_BUFFER ) - yy_load_buffer_state(yyscanner ); + if (b == YY_CURRENT_BUFFER) + yy_load_buffer_state(yyscanner); } /** Pushes the new state onto the stack. The new state becomes @@ -1768,134 +1703,128 @@ extern int isatty (int ); * @param new_buffer The new state. * @param yyscanner The scanner object. */ -void yypush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner) +void yypush_buffer_state(YY_BUFFER_STATE new_buffer, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - if (new_buffer == NULL) - return; - - yyensure_buffer_stack(yyscanner); - - /* This block is copied from yy_switch_to_buffer. */ - if ( YY_CURRENT_BUFFER ) - { - /* Flush out information for old buffer. */ - *yyg->yy_c_buf_p = yyg->yy_hold_char; - YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p; - YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; - } - - /* Only push if top exists. Otherwise, replace top. */ - if (YY_CURRENT_BUFFER) - yyg->yy_buffer_stack_top++; - YY_CURRENT_BUFFER_LVALUE = new_buffer; - - /* copied from yy_switch_to_buffer. */ - yy_load_buffer_state(yyscanner ); - yyg->yy_did_buffer_switch_on_eof = 1; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + if (new_buffer == NULL) + return; + + yyensure_buffer_stack(yyscanner); + + /* This block is copied from yy_switch_to_buffer. */ + if (YY_CURRENT_BUFFER) { + /* Flush out information for old buffer. */ + *yyg->yy_c_buf_p = yyg->yy_hold_char; + YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p; + YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars; + } + + /* Only push if top exists. Otherwise, replace top. */ + if (YY_CURRENT_BUFFER) + yyg->yy_buffer_stack_top++; + YY_CURRENT_BUFFER_LVALUE = new_buffer; + + /* copied from yy_switch_to_buffer. */ + yy_load_buffer_state(yyscanner); + yyg->yy_did_buffer_switch_on_eof = 1; } /** Removes and deletes the top of the stack, if present. * The next element becomes the new top. * @param yyscanner The scanner object. */ -void yypop_buffer_state (yyscan_t yyscanner) +void yypop_buffer_state(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - if (!YY_CURRENT_BUFFER) - return; - - yy_delete_buffer(YY_CURRENT_BUFFER ,yyscanner); - YY_CURRENT_BUFFER_LVALUE = NULL; - if (yyg->yy_buffer_stack_top > 0) - --yyg->yy_buffer_stack_top; - - if (YY_CURRENT_BUFFER) { - yy_load_buffer_state(yyscanner ); - yyg->yy_did_buffer_switch_on_eof = 1; - } + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + if (!YY_CURRENT_BUFFER) + return; + + yy_delete_buffer(YY_CURRENT_BUFFER, yyscanner); + YY_CURRENT_BUFFER_LVALUE = NULL; + if (yyg->yy_buffer_stack_top > 0) + --yyg->yy_buffer_stack_top; + + if (YY_CURRENT_BUFFER) { + yy_load_buffer_state(yyscanner); + yyg->yy_did_buffer_switch_on_eof = 1; + } } /* Allocates the stack if it does not exist. * Guarantees space for at least one push. */ -static void yyensure_buffer_stack (yyscan_t yyscanner) +static void yyensure_buffer_stack(yyscan_t yyscanner) { - yy_size_t num_to_alloc; - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + yy_size_t num_to_alloc; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - if (!yyg->yy_buffer_stack) { + if (!yyg->yy_buffer_stack) { - /* First allocation is just for 2 elements, since we don't know if this - * scanner will even need a stack. We use 2 instead of 1 to avoid an - * immediate realloc on the next call. - */ - num_to_alloc = 1; - yyg->yy_buffer_stack = (struct yy_buffer_state**)yyalloc - (num_to_alloc * sizeof(struct yy_buffer_state*) - , yyscanner); - if ( ! yyg->yy_buffer_stack ) - YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); - - memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*)); - - yyg->yy_buffer_stack_max = num_to_alloc; - yyg->yy_buffer_stack_top = 0; - return; - } - - if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){ - - /* Increase the buffer to prepare for a possible push. */ - int grow_size = 8 /* arbitrary grow size */; - - num_to_alloc = yyg->yy_buffer_stack_max + grow_size; - yyg->yy_buffer_stack = (struct yy_buffer_state**)yyrealloc - (yyg->yy_buffer_stack, - num_to_alloc * sizeof(struct yy_buffer_state*) - , yyscanner); - if ( ! yyg->yy_buffer_stack ) - YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" ); - - /* zero only the new slots.*/ - memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*)); - yyg->yy_buffer_stack_max = num_to_alloc; - } + /* First allocation is just for 2 elements, since we don't know if this + * scanner will even need a stack. We use 2 instead of 1 to avoid an + * immediate realloc on the next call. + */ + num_to_alloc = 1; + yyg->yy_buffer_stack = + (struct yy_buffer_state **)yyalloc(num_to_alloc * sizeof(struct yy_buffer_state *), yyscanner); + if (!yyg->yy_buffer_stack) + YY_FATAL_ERROR("out of dynamic memory in yyensure_buffer_stack()"); + + memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state *)); + + yyg->yy_buffer_stack_max = num_to_alloc; + yyg->yy_buffer_stack_top = 0; + return; + } + + if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1) { + + /* Increase the buffer to prepare for a possible push. */ + int grow_size = 8 /* arbitrary grow size */; + + num_to_alloc = yyg->yy_buffer_stack_max + grow_size; + yyg->yy_buffer_stack = (struct yy_buffer_state **)yyrealloc( + yyg->yy_buffer_stack, num_to_alloc * sizeof(struct yy_buffer_state *), yyscanner); + if (!yyg->yy_buffer_stack) + YY_FATAL_ERROR("out of dynamic memory in yyensure_buffer_stack()"); + + /* zero only the new slots.*/ + memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state *)); + yyg->yy_buffer_stack_max = num_to_alloc; + } } /** Setup the input buffer state to scan directly from a user-specified character buffer. * @param base the character buffer * @param size the size in bytes of the character buffer * @param yyscanner The scanner object. - * @return the newly allocated buffer state object. + * @return the newly allocated buffer state object. */ -YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner) +YY_BUFFER_STATE yy_scan_buffer(char *base, yy_size_t size, yyscan_t yyscanner) { - YY_BUFFER_STATE b; - - if ( size < 2 || - base[size-2] != YY_END_OF_BUFFER_CHAR || - base[size-1] != YY_END_OF_BUFFER_CHAR ) - /* They forgot to leave room for the EOB's. */ - return 0; - - b = (YY_BUFFER_STATE) yyalloc(sizeof( struct yy_buffer_state ) ,yyscanner ); - if ( ! b ) - YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" ); - - b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ - b->yy_buf_pos = b->yy_ch_buf = base; - b->yy_is_our_buffer = 0; - b->yy_input_file = 0; - b->yy_n_chars = b->yy_buf_size; - b->yy_is_interactive = 0; - b->yy_at_bol = 1; - b->yy_fill_buffer = 0; - b->yy_buffer_status = YY_BUFFER_NEW; - - yy_switch_to_buffer(b ,yyscanner ); - - return b; + YY_BUFFER_STATE b; + + if (size < 2 || base[size - 2] != YY_END_OF_BUFFER_CHAR || base[size - 1] != YY_END_OF_BUFFER_CHAR) + /* They forgot to leave room for the EOB's. */ + return 0; + + b = (YY_BUFFER_STATE)yyalloc(sizeof(struct yy_buffer_state), yyscanner); + if (!b) + YY_FATAL_ERROR("out of dynamic memory in yy_scan_buffer()"); + + b->yy_buf_size = size - 2; /* "- 2" to take care of EOB's */ + b->yy_buf_pos = b->yy_ch_buf = base; + b->yy_is_our_buffer = 0; + b->yy_input_file = 0; + b->yy_n_chars = b->yy_buf_size; + b->yy_is_interactive = 0; + b->yy_at_bol = 1; + b->yy_fill_buffer = 0; + b->yy_buffer_status = YY_BUFFER_NEW; + + yy_switch_to_buffer(b, yyscanner); + + return b; } /** Setup the input buffer state to scan a string. The next call to yylex() will @@ -1906,10 +1835,10 @@ YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size , yyscan_t yyscann * @note If you want to scan bytes that may contain NUL values, then use * yy_scan_bytes() instead. */ -YY_BUFFER_STATE yy_scan_string (yyconst char * yystr , yyscan_t yyscanner) +YY_BUFFER_STATE yy_scan_string(yyconst char *yystr, yyscan_t yyscanner) { - - return yy_scan_bytes(yystr,strlen(yystr) ,yyscanner); + + return yy_scan_bytes(yystr, strlen(yystr), yyscanner); } /** Setup the input buffer state to scan the given bytes. The next call to yylex() will @@ -1919,174 +1848,172 @@ YY_BUFFER_STATE yy_scan_string (yyconst char * yystr , yyscan_t yyscanner) * @param yyscanner The scanner object. * @return the newly allocated buffer state object. */ -YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, yy_size_t _yybytes_len , yyscan_t yyscanner) +YY_BUFFER_STATE yy_scan_bytes(yyconst char *yybytes, yy_size_t _yybytes_len, yyscan_t yyscanner) { - YY_BUFFER_STATE b; - char *buf; - yy_size_t n, i; - - /* Get memory for full buffer, including space for trailing EOB's. */ - n = _yybytes_len + 2; - buf = (char *) yyalloc(n ,yyscanner ); - if ( ! buf ) - YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" ); - - for ( i = 0; i < _yybytes_len; ++i ) - buf[i] = yybytes[i]; - - buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR; - - b = yy_scan_buffer(buf,n ,yyscanner); - if ( ! b ) - YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" ); - - /* It's okay to grow etc. this buffer, and we should throw it - * away when we're done. - */ - b->yy_is_our_buffer = 1; - - return b; + YY_BUFFER_STATE b; + char *buf; + yy_size_t n, i; + + /* Get memory for full buffer, including space for trailing EOB's. */ + n = _yybytes_len + 2; + buf = (char *)yyalloc(n, yyscanner); + if (!buf) + YY_FATAL_ERROR("out of dynamic memory in yy_scan_bytes()"); + + for (i = 0; i < _yybytes_len; ++i) + buf[i] = yybytes[i]; + + buf[_yybytes_len] = buf[_yybytes_len + 1] = YY_END_OF_BUFFER_CHAR; + + b = yy_scan_buffer(buf, n, yyscanner); + if (!b) + YY_FATAL_ERROR("bad buffer in yy_scan_bytes()"); + + /* It's okay to grow etc. this buffer, and we should throw it + * away when we're done. + */ + b->yy_is_our_buffer = 1; + + return b; } #ifndef YY_EXIT_FAILURE #define YY_EXIT_FAILURE 2 #endif -static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner) +static void yy_fatal_error(yyconst char *msg, yyscan_t yyscanner) { - (void) fprintf( stderr, "%s\n", msg ); - exit( YY_EXIT_FAILURE ); + (void)fprintf(stderr, "%s\n", msg); + exit(YY_EXIT_FAILURE); } /* Redefine yyless() so it works in section 3 code. */ #undef yyless -#define yyless(n) \ - do \ - { \ - /* Undo effects of setting up yytext. */ \ - int yyless_macro_arg = (n); \ - YY_LESS_LINENO(yyless_macro_arg);\ - yytext[yyleng] = yyg->yy_hold_char; \ - yyg->yy_c_buf_p = yytext + yyless_macro_arg; \ - yyg->yy_hold_char = *yyg->yy_c_buf_p; \ - *yyg->yy_c_buf_p = '\0'; \ - yyleng = yyless_macro_arg; \ - } \ - while ( 0 ) +#define yyless(n) \ + do { \ + /* Undo effects of setting up yytext. */ \ + int yyless_macro_arg = (n); \ + YY_LESS_LINENO(yyless_macro_arg); \ + yytext[yyleng] = yyg->yy_hold_char; \ + yyg->yy_c_buf_p = yytext + yyless_macro_arg; \ + yyg->yy_hold_char = *yyg->yy_c_buf_p; \ + *yyg->yy_c_buf_p = '\0'; \ + yyleng = yyless_macro_arg; \ + } while (0) /* Accessor methods (get/set functions) to struct members. */ /** Get the user-defined data for this scanner. * @param yyscanner The scanner object. */ -YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner) +YY_EXTRA_TYPE yyget_extra(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyextra; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yyextra; } /** Get the current line number. * @param yyscanner The scanner object. */ -int yyget_lineno (yyscan_t yyscanner) +int yyget_lineno(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - - if (! YY_CURRENT_BUFFER) - return 0; - - return yylineno; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + if (!YY_CURRENT_BUFFER) + return 0; + + return yylineno; } /** Get the current column number. * @param yyscanner The scanner object. */ -int yyget_column (yyscan_t yyscanner) +int yyget_column(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - - if (! YY_CURRENT_BUFFER) - return 0; - - return yycolumn; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + if (!YY_CURRENT_BUFFER) + return 0; + + return yycolumn; } /** Get the input stream. * @param yyscanner The scanner object. */ -FILE *yyget_in (yyscan_t yyscanner) +FILE *yyget_in(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyin; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yyin; } /** Get the output stream. * @param yyscanner The scanner object. */ -FILE *yyget_out (yyscan_t yyscanner) +FILE *yyget_out(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyout; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yyout; } /** Get the length of the current token. * @param yyscanner The scanner object. */ -yy_size_t yyget_leng (yyscan_t yyscanner) +yy_size_t yyget_leng(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yyleng; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yyleng; } /** Get the current token. * @param yyscanner The scanner object. */ -char *yyget_text (yyscan_t yyscanner) +char *yyget_text(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yytext; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yytext; } /** Set the user-defined data. This data is never touched by the scanner. * @param user_defined The data to be associated with this scanner. * @param yyscanner The scanner object. */ -void yyset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner) +void yyset_extra(YY_EXTRA_TYPE user_defined, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyextra = user_defined ; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + yyextra = user_defined; } /** Set the current line number. * @param line_number * @param yyscanner The scanner object. */ -void yyset_lineno (int line_number , yyscan_t yyscanner) +void yyset_lineno(int line_number, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + /* lineno is only valid if an input buffer exists. */ + if (!YY_CURRENT_BUFFER) + yy_fatal_error("yyset_lineno called with no buffer", yyscanner); - /* lineno is only valid if an input buffer exists. */ - if (! YY_CURRENT_BUFFER ) - yy_fatal_error( "yyset_lineno called with no buffer" , yyscanner); - - yylineno = line_number; + yylineno = line_number; } /** Set the current column. * @param line_number * @param yyscanner The scanner object. */ -void yyset_column (int column_no , yyscan_t yyscanner) +void yyset_column(int column_no, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; - /* column is only valid if an input buffer exists. */ - if (! YY_CURRENT_BUFFER ) - yy_fatal_error( "yyset_column called with no buffer" , yyscanner); - - yycolumn = column_no; + /* column is only valid if an input buffer exists. */ + if (!YY_CURRENT_BUFFER) + yy_fatal_error("yyset_column called with no buffer", yyscanner); + + yycolumn = column_no; } /** Set the input stream. This does not discard the current @@ -2095,42 +2022,42 @@ void yyset_column (int column_no , yyscan_t yyscanner) * @param yyscanner The scanner object. * @see yy_switch_to_buffer */ -void yyset_in (FILE * in_str , yyscan_t yyscanner) +void yyset_in(FILE *in_str, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyin = in_str ; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + yyin = in_str; } -void yyset_out (FILE * out_str , yyscan_t yyscanner) +void yyset_out(FILE *out_str, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yyout = out_str ; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + yyout = out_str; } -int yyget_debug (yyscan_t yyscanner) +int yyget_debug(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yy_flex_debug; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yy_flex_debug; } -void yyset_debug (int bdebug , yyscan_t yyscanner) +void yyset_debug(int bdebug, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yy_flex_debug = bdebug ; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + yy_flex_debug = bdebug; } /* Accessor methods for yylval and yylloc */ -YYSTYPE * yyget_lval (yyscan_t yyscanner) +YYSTYPE *yyget_lval(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - return yylval; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + return yylval; } -void yyset_lval (YYSTYPE * yylval_param , yyscan_t yyscanner) +void yyset_lval(YYSTYPE *yylval_param, yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - yylval = yylval_param; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + yylval = yylval_param; } /* User-visible API */ @@ -2140,25 +2067,25 @@ void yyset_lval (YYSTYPE * yylval_param , yyscan_t yyscanner) * That's why we explicitly handle the declaration, instead of using our macros. */ -int yylex_init(yyscan_t* ptr_yy_globals) +int yylex_init(yyscan_t *ptr_yy_globals) { - if (ptr_yy_globals == NULL){ - errno = EINVAL; - return 1; - } + if (ptr_yy_globals == NULL) { + errno = EINVAL; + return 1; + } - *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), NULL ); + *ptr_yy_globals = (yyscan_t)yyalloc(sizeof(struct yyguts_t), NULL); - if (*ptr_yy_globals == NULL){ - errno = ENOMEM; - return 1; - } + if (*ptr_yy_globals == NULL) { + errno = ENOMEM; + return 1; + } - /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */ - memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); + /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */ + memset(*ptr_yy_globals, 0x00, sizeof(struct yyguts_t)); - return yy_init_globals ( *ptr_yy_globals ); + return yy_init_globals(*ptr_yy_globals); } /* yylex_init_extra has the same functionality as yylex_init, but follows the @@ -2169,95 +2096,95 @@ int yylex_init(yyscan_t* ptr_yy_globals) * the yyextra field. */ -int yylex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals ) +int yylex_init_extra(YY_EXTRA_TYPE yy_user_defined, yyscan_t *ptr_yy_globals) { - struct yyguts_t dummy_yyguts; + struct yyguts_t dummy_yyguts; - yyset_extra (yy_user_defined, &dummy_yyguts); + yyset_extra(yy_user_defined, &dummy_yyguts); - if (ptr_yy_globals == NULL){ - errno = EINVAL; - return 1; - } - - *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), &dummy_yyguts ); - - if (*ptr_yy_globals == NULL){ - errno = ENOMEM; - return 1; - } - - /* By setting to 0xAA, we expose bugs in - yy_init_globals. Leave at 0x00 for releases. */ - memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t)); - - yyset_extra (yy_user_defined, *ptr_yy_globals); - - return yy_init_globals ( *ptr_yy_globals ); -} + if (ptr_yy_globals == NULL) { + errno = EINVAL; + return 1; + } -static int yy_init_globals (yyscan_t yyscanner) -{ - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - /* Initialization is the same as for the non-reentrant scanner. - * This function is called from yylex_destroy(), so don't allocate here. - */ + *ptr_yy_globals = (yyscan_t)yyalloc(sizeof(struct yyguts_t), &dummy_yyguts); - yyg->yy_buffer_stack = 0; - yyg->yy_buffer_stack_top = 0; - yyg->yy_buffer_stack_max = 0; - yyg->yy_c_buf_p = (char *) 0; - yyg->yy_init = 0; - yyg->yy_start = 0; + if (*ptr_yy_globals == NULL) { + errno = ENOMEM; + return 1; + } + + /* By setting to 0xAA, we expose bugs in + yy_init_globals. Leave at 0x00 for releases. */ + memset(*ptr_yy_globals, 0x00, sizeof(struct yyguts_t)); + + yyset_extra(yy_user_defined, *ptr_yy_globals); + + return yy_init_globals(*ptr_yy_globals); +} - yyg->yy_start_stack_ptr = 0; - yyg->yy_start_stack_depth = 0; - yyg->yy_start_stack = NULL; +static int yy_init_globals(yyscan_t yyscanner) +{ + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + /* Initialization is the same as for the non-reentrant scanner. + * This function is called from yylex_destroy(), so don't allocate here. + */ + + yyg->yy_buffer_stack = 0; + yyg->yy_buffer_stack_top = 0; + yyg->yy_buffer_stack_max = 0; + yyg->yy_c_buf_p = (char *)0; + yyg->yy_init = 0; + yyg->yy_start = 0; + + yyg->yy_start_stack_ptr = 0; + yyg->yy_start_stack_depth = 0; + yyg->yy_start_stack = NULL; /* Defined in main.c */ #ifdef YY_STDINIT - yyin = stdin; - yyout = stdout; + yyin = stdin; + yyout = stdout; #else - yyin = (FILE *) 0; - yyout = (FILE *) 0; + yyin = (FILE *)0; + yyout = (FILE *)0; #endif - /* For future reference: Set errno on error, since we are called by - * yylex_init() - */ - return 0; + /* For future reference: Set errno on error, since we are called by + * yylex_init() + */ + return 0; } /* yylex_destroy is for both reentrant and non-reentrant scanners. */ -int yylex_destroy (yyscan_t yyscanner) +int yylex_destroy(yyscan_t yyscanner) { - struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; - - /* Pop the buffer stack, destroying each element. */ - while(YY_CURRENT_BUFFER){ - yy_delete_buffer(YY_CURRENT_BUFFER ,yyscanner ); - YY_CURRENT_BUFFER_LVALUE = NULL; - yypop_buffer_state(yyscanner); - } - - /* Destroy the stack itself. */ - yyfree(yyg->yy_buffer_stack ,yyscanner); - yyg->yy_buffer_stack = NULL; - - /* Destroy the start condition stack. */ - yyfree(yyg->yy_start_stack ,yyscanner ); - yyg->yy_start_stack = NULL; - - /* Reset the globals. This is important in a non-reentrant scanner so the next time - * yylex() is called, initialization will occur. */ - yy_init_globals( yyscanner); - - /* Destroy the main struct (reentrant only). */ - yyfree ( yyscanner , yyscanner ); - yyscanner = NULL; - return 0; + struct yyguts_t *yyg = (struct yyguts_t *)yyscanner; + + /* Pop the buffer stack, destroying each element. */ + while (YY_CURRENT_BUFFER) { + yy_delete_buffer(YY_CURRENT_BUFFER, yyscanner); + YY_CURRENT_BUFFER_LVALUE = NULL; + yypop_buffer_state(yyscanner); + } + + /* Destroy the stack itself. */ + yyfree(yyg->yy_buffer_stack, yyscanner); + yyg->yy_buffer_stack = NULL; + + /* Destroy the start condition stack. */ + yyfree(yyg->yy_start_stack, yyscanner); + yyg->yy_start_stack = NULL; + + /* Reset the globals. This is important in a non-reentrant scanner so the next time + * yylex() is called, initialization will occur. */ + yy_init_globals(yyscanner); + + /* Destroy the main struct (reentrant only). */ + yyfree(yyscanner, yyscanner); + yyscanner = NULL; + return 0; } /* @@ -2265,53 +2192,52 @@ int yylex_destroy (yyscan_t yyscanner) */ #ifndef yytext_ptr -static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner) +static void yy_flex_strncpy(char *s1, yyconst char *s2, int n, yyscan_t yyscanner) { - register int i; - for ( i = 0; i < n; ++i ) - s1[i] = s2[i]; + register int i; + for (i = 0; i < n; ++i) + s1[i] = s2[i]; } #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner) +static int yy_flex_strlen(yyconst char *s, yyscan_t yyscanner) { - register int n; - for ( n = 0; s[n]; ++n ) - ; + register int n; + for (n = 0; s[n]; ++n) + ; - return n; + return n; } #endif -void *yyalloc (yy_size_t size , yyscan_t yyscanner) +void *yyalloc(yy_size_t size, yyscan_t yyscanner) { - return (void *) malloc( size ); + return (void *)malloc(size); } -void *yyrealloc (void * ptr, yy_size_t size , yyscan_t yyscanner) +void *yyrealloc(void *ptr, yy_size_t size, yyscan_t yyscanner) { - /* The cast to (char *) in the following accommodates both - * implementations that use char* generic pointers, and those - * that use void* generic pointers. It works with the latter - * because both ANSI C and C++ allow castless assignment from - * any pointer type to void*, and deal with argument conversions - * as though doing an assignment. - */ - return (void *) realloc( (char *) ptr, size ); + /* The cast to (char *) in the following accommodates both + * implementations that use char* generic pointers, and those + * that use void* generic pointers. It works with the latter + * because both ANSI C and C++ allow castless assignment from + * any pointer type to void*, and deal with argument conversions + * as though doing an assignment. + */ + return (void *)realloc((char *)ptr, size); } -void yyfree (void * ptr , yyscan_t yyscanner) +void yyfree(void *ptr, yyscan_t yyscanner) { - free( (char *) ptr ); /* see yyrealloc() for (char *) cast */ + free((char *)ptr); /* see yyrealloc() for (char *) cast */ } #define YYTABLES_NAME "yytables" #line 88 "lex_sql.l" - - -void scan_string(const char *str, yyscan_t scanner) { - yy_switch_to_buffer(yy_scan_string(str,scanner),scanner); +void scan_string(const char *str, yyscan_t scanner) +{ + yy_switch_to_buffer(yy_scan_string(str, scanner), scanner); } diff --git a/src/observer/sql/parser/lex.yy.h b/src/observer/sql/parser/lex.yy.h index 870c5edce874f4547b4bdebeeed614f11d998d9c..1b934fe45849bd52a4b40775b1da6c3ad10f8d0a 100644 --- a/src/observer/sql/parser/lex.yy.h +++ b/src/observer/sql/parser/lex.yy.h @@ -4,7 +4,7 @@ #line 6 "lex.yy.h" -#define YY_INT_ALIGNED short int +#define YY_INT_ALIGNED short int /* A lexical scanner generated by flex */ @@ -33,10 +33,10 @@ /* C99 systems have . Non-C99 systems may or may not. */ -#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 says to define __STDC_LIMIT_MACROS before including stdint.h, - * if you want the limit (max/min) macros for int types. + * if you want the limit (max/min) macros for int types. */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS 1 @@ -54,38 +54,38 @@ typedef uint64_t flex_uint64_t; typedef signed char flex_int8_t; typedef short int flex_int16_t; typedef int flex_int32_t; -typedef unsigned char flex_uint8_t; +typedef unsigned char flex_uint8_t; typedef unsigned short int flex_uint16_t; typedef unsigned int flex_uint32_t; #endif /* ! C99 */ /* Limits of integral types. */ #ifndef INT8_MIN -#define INT8_MIN (-128) +#define INT8_MIN (-128) #endif #ifndef INT16_MIN -#define INT16_MIN (-32767-1) +#define INT16_MIN (-32767 - 1) #endif #ifndef INT32_MIN -#define INT32_MIN (-2147483647-1) +#define INT32_MIN (-2147483647 - 1) #endif #ifndef INT8_MAX -#define INT8_MAX (127) +#define INT8_MAX (127) #endif #ifndef INT16_MAX -#define INT16_MAX (32767) +#define INT16_MAX (32767) #endif #ifndef INT32_MAX -#define INT32_MAX (2147483647) +#define INT32_MAX (2147483647) #endif #ifndef UINT8_MAX -#define UINT8_MAX (255U) +#define UINT8_MAX (255U) #endif #ifndef UINT16_MAX -#define UINT16_MAX (65535U) +#define UINT16_MAX (65535U) #endif #ifndef UINT32_MAX -#define UINT32_MAX (4294967295U) +#define UINT32_MAX (4294967295U) #endif #endif /* ! FLEXINT_H */ @@ -95,15 +95,15 @@ typedef unsigned int flex_uint32_t; /* The "const" storage-class-modifier is valid. */ #define YY_USE_CONST -#else /* ! __cplusplus */ +#else /* ! __cplusplus */ /* C99 requires __STDC__ to be defined as 1. */ -#if defined (__STDC__) +#if defined(__STDC__) #define YY_USE_CONST -#endif /* defined (__STDC__) */ -#endif /* ! __cplusplus */ +#endif /* defined (__STDC__) */ +#endif /* ! __cplusplus */ #ifdef YY_USE_CONST #define yyconst const @@ -114,7 +114,7 @@ typedef unsigned int flex_uint32_t; /* An opaque pointer. */ #ifndef YY_TYPEDEF_YY_SCANNER_T #define YY_TYPEDEF_YY_SCANNER_T -typedef void* yyscan_t; +typedef void *yyscan_t; #endif /* For convenience, these vars (plus the bison vars far below) @@ -145,70 +145,68 @@ typedef size_t yy_size_t; #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE -struct yy_buffer_state - { - FILE *yy_input_file; - - char *yy_ch_buf; /* input buffer */ - char *yy_buf_pos; /* current position in input buffer */ - - /* Size of input buffer in bytes, not including room for EOB - * characters. - */ - yy_size_t yy_buf_size; - - /* Number of characters read into yy_ch_buf, not including EOB - * characters. - */ - yy_size_t yy_n_chars; - - /* Whether we "own" the buffer - i.e., we know we created it, - * and can realloc() it to grow it, and should free() it to - * delete it. - */ - int yy_is_our_buffer; - - /* Whether this is an "interactive" input source; if so, and - * if we're using stdio for input, then we want to use getc() - * instead of fread(), to make sure we stop fetching input after - * each newline. - */ - int yy_is_interactive; - - /* Whether we're considered to be at the beginning of a line. - * If so, '^' rules will be active on the next match, otherwise - * not. - */ - int yy_at_bol; - - int yy_bs_lineno; /**< The line count. */ - int yy_bs_column; /**< The column count. */ - - /* Whether to try to fill the input buffer when we reach the - * end of it. - */ - int yy_fill_buffer; - - int yy_buffer_status; - - }; +struct yy_buffer_state { + FILE *yy_input_file; + + char *yy_ch_buf; /* input buffer */ + char *yy_buf_pos; /* current position in input buffer */ + + /* Size of input buffer in bytes, not including room for EOB + * characters. + */ + yy_size_t yy_buf_size; + + /* Number of characters read into yy_ch_buf, not including EOB + * characters. + */ + yy_size_t yy_n_chars; + + /* Whether we "own" the buffer - i.e., we know we created it, + * and can realloc() it to grow it, and should free() it to + * delete it. + */ + int yy_is_our_buffer; + + /* Whether this is an "interactive" input source; if so, and + * if we're using stdio for input, then we want to use getc() + * instead of fread(), to make sure we stop fetching input after + * each newline. + */ + int yy_is_interactive; + + /* Whether we're considered to be at the beginning of a line. + * If so, '^' rules will be active on the next match, otherwise + * not. + */ + int yy_at_bol; + + int yy_bs_lineno; /**< The line count. */ + int yy_bs_column; /**< The column count. */ + + /* Whether to try to fill the input buffer when we reach the + * end of it. + */ + int yy_fill_buffer; + + int yy_buffer_status; +}; #endif /* !YY_STRUCT_YY_BUFFER_STATE */ -void yyrestart (FILE *input_file ,yyscan_t yyscanner ); -void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); -YY_BUFFER_STATE yy_create_buffer (FILE *file,int size ,yyscan_t yyscanner ); -void yy_delete_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); -void yy_flush_buffer (YY_BUFFER_STATE b ,yyscan_t yyscanner ); -void yypush_buffer_state (YY_BUFFER_STATE new_buffer ,yyscan_t yyscanner ); -void yypop_buffer_state (yyscan_t yyscanner ); +void yyrestart(FILE *input_file, yyscan_t yyscanner); +void yy_switch_to_buffer(YY_BUFFER_STATE new_buffer, yyscan_t yyscanner); +YY_BUFFER_STATE yy_create_buffer(FILE *file, int size, yyscan_t yyscanner); +void yy_delete_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner); +void yy_flush_buffer(YY_BUFFER_STATE b, yyscan_t yyscanner); +void yypush_buffer_state(YY_BUFFER_STATE new_buffer, yyscan_t yyscanner); +void yypop_buffer_state(yyscan_t yyscanner); -YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner ); -YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str ,yyscan_t yyscanner ); -YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,yy_size_t len ,yyscan_t yyscanner ); +YY_BUFFER_STATE yy_scan_buffer(char *base, yy_size_t size, yyscan_t yyscanner); +YY_BUFFER_STATE yy_scan_string(yyconst char *yy_str, yyscan_t yyscanner); +YY_BUFFER_STATE yy_scan_bytes(yyconst char *bytes, yy_size_t len, yyscan_t yyscanner); -void *yyalloc (yy_size_t ,yyscan_t yyscanner ); -void *yyrealloc (void *,yy_size_t ,yyscan_t yyscanner ); -void yyfree (void * ,yyscan_t yyscanner ); +void *yyalloc(yy_size_t, yyscan_t yyscanner); +void *yyrealloc(void *, yy_size_t, yyscan_t yyscanner); +void yyfree(void *, yyscan_t yyscanner); /* Begin user sect3 */ @@ -235,42 +233,42 @@ void yyfree (void * ,yyscan_t yyscanner ); #define YY_EXTRA_TYPE void * #endif -int yylex_init (yyscan_t* scanner); +int yylex_init(yyscan_t *scanner); -int yylex_init_extra (YY_EXTRA_TYPE user_defined,yyscan_t* scanner); +int yylex_init_extra(YY_EXTRA_TYPE user_defined, yyscan_t *scanner); /* Accessor methods to globals. These are made visible to non-reentrant scanners for convenience. */ -int yylex_destroy (yyscan_t yyscanner ); +int yylex_destroy(yyscan_t yyscanner); -int yyget_debug (yyscan_t yyscanner ); +int yyget_debug(yyscan_t yyscanner); -void yyset_debug (int debug_flag ,yyscan_t yyscanner ); +void yyset_debug(int debug_flag, yyscan_t yyscanner); -YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner ); +YY_EXTRA_TYPE yyget_extra(yyscan_t yyscanner); -void yyset_extra (YY_EXTRA_TYPE user_defined ,yyscan_t yyscanner ); +void yyset_extra(YY_EXTRA_TYPE user_defined, yyscan_t yyscanner); -FILE *yyget_in (yyscan_t yyscanner ); +FILE *yyget_in(yyscan_t yyscanner); -void yyset_in (FILE * in_str ,yyscan_t yyscanner ); +void yyset_in(FILE *in_str, yyscan_t yyscanner); -FILE *yyget_out (yyscan_t yyscanner ); +FILE *yyget_out(yyscan_t yyscanner); -void yyset_out (FILE * out_str ,yyscan_t yyscanner ); +void yyset_out(FILE *out_str, yyscan_t yyscanner); -yy_size_t yyget_leng (yyscan_t yyscanner ); +yy_size_t yyget_leng(yyscan_t yyscanner); -char *yyget_text (yyscan_t yyscanner ); +char *yyget_text(yyscan_t yyscanner); -int yyget_lineno (yyscan_t yyscanner ); +int yyget_lineno(yyscan_t yyscanner); -void yyset_lineno (int line_number ,yyscan_t yyscanner ); +void yyset_lineno(int line_number, yyscan_t yyscanner); -YYSTYPE * yyget_lval (yyscan_t yyscanner ); +YYSTYPE *yyget_lval(yyscan_t yyscanner); -void yyset_lval (YYSTYPE * yylval_param ,yyscan_t yyscanner ); +void yyset_lval(YYSTYPE *yylval_param, yyscan_t yyscanner); /* Macros after this point can all be overridden by user definitions in * section 1. @@ -278,18 +276,18 @@ void yyset_lval (YYSTYPE * yylval_param ,yyscan_t yyscanner ); #ifndef YY_SKIP_YYWRAP #ifdef __cplusplus -extern "C" int yywrap (yyscan_t yyscanner ); +extern "C" int yywrap(yyscan_t yyscanner); #else -extern int yywrap (yyscan_t yyscanner ); +extern int yywrap(yyscan_t yyscanner); #endif #endif #ifndef yytext_ptr -static void yy_flex_strncpy (char *,yyconst char *,int ,yyscan_t yyscanner); +static void yy_flex_strncpy(char *, yyconst char *, int, yyscan_t yyscanner); #endif #ifdef YY_NEED_STRLEN -static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner); +static int yy_flex_strlen(yyconst char *, yyscan_t yyscanner); #endif #ifndef YY_NO_INPUT @@ -312,11 +310,9 @@ static int yy_flex_strlen (yyconst char * ,yyscan_t yyscanner); #ifndef YY_DECL #define YY_DECL_IS_OURS 1 -extern int yylex \ - (YYSTYPE * yylval_param ,yyscan_t yyscanner); +extern int yylex(YYSTYPE *yylval_param, yyscan_t yyscanner); -#define YY_DECL int yylex \ - (YYSTYPE * yylval_param , yyscan_t yyscanner) +#define YY_DECL int yylex(YYSTYPE *yylval_param, yyscan_t yyscanner) #endif /* !YY_DECL */ /* yy_get_previous_state - get the state just before the EOB char was reached */ @@ -335,7 +331,6 @@ extern int yylex \ #line 86 "lex_sql.l" - #line 340 "lex.yy.h" #undef yyIN_HEADER #endif /* yyHEADER_H */ diff --git a/src/observer/sql/parser/parse.cpp b/src/observer/sql/parser/parse.cpp index 25d2293e8b1ede111351190ca885ef9a1c98519b..de2cf51dd7a9acaf9d590ec6dea7080736f1864f 100644 --- a/src/observer/sql/parser/parse.cpp +++ b/src/observer/sql/parser/parse.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi // #include @@ -21,8 +21,9 @@ RC parse(char *st, Query *sqln); #ifdef __cplusplus extern "C" { -#endif // __cplusplus -void relation_attr_init(RelAttr *relation_attr, const char *relation_name, const char *attribute_name) { +#endif // __cplusplus +void relation_attr_init(RelAttr *relation_attr, const char *relation_name, const char *attribute_name) +{ if (relation_name != nullptr) { relation_attr->relation_name = strdup(relation_name); } else { @@ -31,36 +32,41 @@ void relation_attr_init(RelAttr *relation_attr, const char *relation_name, const relation_attr->attribute_name = strdup(attribute_name); } -void relation_attr_destroy(RelAttr *relation_attr) { +void relation_attr_destroy(RelAttr *relation_attr) +{ free(relation_attr->relation_name); free(relation_attr->attribute_name); relation_attr->relation_name = nullptr; relation_attr->attribute_name = nullptr; } -void value_init_integer(Value *value, int v) { +void value_init_integer(Value *value, int v) +{ value->type = INTS; value->data = malloc(sizeof(v)); memcpy(value->data, &v, sizeof(v)); } -void value_init_float(Value *value, float v) { +void value_init_float(Value *value, float v) +{ value->type = FLOATS; value->data = malloc(sizeof(v)); memcpy(value->data, &v, sizeof(v)); } -void value_init_string(Value *value, const char *v) { +void value_init_string(Value *value, const char *v) +{ value->type = CHARS; value->data = strdup(v); } -void value_destroy(Value *value) { +void value_destroy(Value *value) +{ value->type = UNDEFINED; free(value->data); value->data = nullptr; } -void condition_init(Condition *condition, CompOp comp, - int left_is_attr, RelAttr *left_attr, Value *left_value, - int right_is_attr, RelAttr *right_attr, Value *right_value) { +void condition_init(Condition *condition, CompOp comp, int left_is_attr, RelAttr *left_attr, Value *left_value, + int right_is_attr, RelAttr *right_attr, Value *right_value) +{ condition->comp = comp; condition->left_is_attr = left_is_attr; if (left_is_attr) { @@ -76,7 +82,8 @@ void condition_init(Condition *condition, CompOp comp, condition->right_value = *right_value; } } -void condition_destroy(Condition *condition) { +void condition_destroy(Condition *condition) +{ if (condition->left_is_attr) { relation_attr_destroy(&condition->left_attr); } else { @@ -89,33 +96,39 @@ void condition_destroy(Condition *condition) { } } -void attr_info_init(AttrInfo *attr_info, const char *name, AttrType type, size_t length) { +void attr_info_init(AttrInfo *attr_info, const char *name, AttrType type, size_t length) +{ attr_info->name = strdup(name); attr_info->type = type; attr_info->length = length; } -void attr_info_destroy(AttrInfo *attr_info) { +void attr_info_destroy(AttrInfo *attr_info) +{ free(attr_info->name); attr_info->name = nullptr; } void selects_init(Selects *selects, ...); -void selects_append_attribute(Selects *selects, RelAttr *rel_attr) { +void selects_append_attribute(Selects *selects, RelAttr *rel_attr) +{ selects->attributes[selects->attr_num++] = *rel_attr; } -void selects_append_relation(Selects *selects, const char *relation_name) { +void selects_append_relation(Selects *selects, const char *relation_name) +{ selects->relations[selects->relation_num++] = strdup(relation_name); } -void selects_append_conditions(Selects *selects, Condition conditions[], size_t condition_num) { - assert(condition_num <= sizeof(selects->conditions)/sizeof(selects->conditions[0])); +void selects_append_conditions(Selects *selects, Condition conditions[], size_t condition_num) +{ + assert(condition_num <= sizeof(selects->conditions) / sizeof(selects->conditions[0])); for (size_t i = 0; i < condition_num; i++) { selects->conditions[i] = conditions[i]; } selects->condition_num = condition_num; } -void selects_destroy(Selects *selects) { +void selects_destroy(Selects *selects) +{ for (size_t i = 0; i < selects->attr_num; i++) { relation_attr_destroy(&selects->attributes[i]); } @@ -133,8 +146,9 @@ void selects_destroy(Selects *selects) { selects->condition_num = 0; } -void inserts_init(Inserts *inserts, const char *relation_name, Value values[], size_t value_num) { - assert(value_num <= sizeof(inserts->values)/sizeof(inserts->values[0])); +void inserts_init(Inserts *inserts, const char *relation_name, Value values[], size_t value_num) +{ + assert(value_num <= sizeof(inserts->values) / sizeof(inserts->values[0])); inserts->relation_name = strdup(relation_name); for (size_t i = 0; i < value_num; i++) { @@ -142,7 +156,8 @@ void inserts_init(Inserts *inserts, const char *relation_name, Value values[], s } inserts->value_num = value_num; } -void inserts_destroy(Inserts *inserts) { +void inserts_destroy(Inserts *inserts) +{ free(inserts->relation_name); inserts->relation_name = nullptr; @@ -152,18 +167,21 @@ void inserts_destroy(Inserts *inserts) { inserts->value_num = 0; } -void deletes_init_relation(Deletes *deletes, const char *relation_name) { +void deletes_init_relation(Deletes *deletes, const char *relation_name) +{ deletes->relation_name = strdup(relation_name); } -void deletes_set_conditions(Deletes *deletes, Condition conditions[], size_t condition_num) { - assert(condition_num <= sizeof(deletes->conditions)/sizeof(deletes->conditions[0])); +void deletes_set_conditions(Deletes *deletes, Condition conditions[], size_t condition_num) +{ + assert(condition_num <= sizeof(deletes->conditions) / sizeof(deletes->conditions[0])); for (size_t i = 0; i < condition_num; i++) { deletes->conditions[i] = conditions[i]; } deletes->condition_num = condition_num; } -void deletes_destroy(Deletes *deletes) { +void deletes_destroy(Deletes *deletes) +{ for (size_t i = 0; i < deletes->condition_num; i++) { condition_destroy(&deletes->conditions[i]); } @@ -172,20 +190,22 @@ void deletes_destroy(Deletes *deletes) { deletes->relation_name = nullptr; } -void updates_init(Updates *updates, const char *relation_name, const char *attribute_name, - Value *value, Condition conditions[], size_t condition_num) { +void updates_init(Updates *updates, const char *relation_name, const char *attribute_name, Value *value, + Condition conditions[], size_t condition_num) +{ updates->relation_name = strdup(relation_name); updates->attribute_name = strdup(attribute_name); updates->value = *value; - assert(condition_num <= sizeof(updates->conditions)/sizeof(updates->conditions[0])); + assert(condition_num <= sizeof(updates->conditions) / sizeof(updates->conditions[0])); for (size_t i = 0; i < condition_num; i++) { updates->conditions[i] = conditions[i]; } updates->condition_num = condition_num; } -void updates_destroy(Updates *updates) { +void updates_destroy(Updates *updates) +{ free(updates->relation_name); free(updates->attribute_name); updates->relation_name = nullptr; @@ -199,13 +219,18 @@ void updates_destroy(Updates *updates) { updates->condition_num = 0; } -void create_table_append_attribute(CreateTable *create_table, AttrInfo *attr_info) { +void create_table_append_attribute(CreateTable *create_table, AttrInfo *attr_info) +{ create_table->attributes[create_table->attribute_count++] = *attr_info; } -void create_table_init_name(CreateTable *create_table, const char *relation_name) { + +void create_table_init_name(CreateTable *create_table, const char *relation_name) +{ create_table->relation_name = strdup(relation_name); } -void create_table_destroy(CreateTable *create_table) { + +void create_table_destroy(CreateTable *create_table) +{ for (size_t i = 0; i < create_table->attribute_count; i++) { attr_info_destroy(&create_table->attributes[i]); } @@ -214,21 +239,27 @@ void create_table_destroy(CreateTable *create_table) { create_table->relation_name = nullptr; } -void drop_table_init(DropTable *drop_table, const char *relation_name) { +void drop_table_init(DropTable *drop_table, const char *relation_name) +{ drop_table->relation_name = strdup(relation_name); } -void drop_table_destroy(DropTable *drop_table) { + +void drop_table_destroy(DropTable *drop_table) +{ free(drop_table->relation_name); drop_table->relation_name = nullptr; } -void create_index_init(CreateIndex *create_index, const char *index_name, - const char *relation_name, const char *attr_name) { +void create_index_init( + CreateIndex *create_index, const char *index_name, const char *relation_name, const char *attr_name) +{ create_index->index_name = strdup(index_name); create_index->relation_name = strdup(relation_name); create_index->attribute_name = strdup(attr_name); } -void create_index_destroy(CreateIndex *create_index) { + +void create_index_destroy(CreateIndex *create_index) +{ free(create_index->index_name); free(create_index->relation_name); free(create_index->attribute_name); @@ -238,24 +269,30 @@ void create_index_destroy(CreateIndex *create_index) { create_index->attribute_name = nullptr; } -void drop_index_init(DropIndex *drop_index, const char *index_name) { +void drop_index_init(DropIndex *drop_index, const char *index_name) +{ drop_index->index_name = strdup(index_name); } -void drop_index_destroy(DropIndex *drop_index) { + +void drop_index_destroy(DropIndex *drop_index) +{ free((char *)drop_index->index_name); drop_index->index_name = nullptr; } -void desc_table_init(DescTable *desc_table, const char *relation_name) { +void desc_table_init(DescTable *desc_table, const char *relation_name) +{ desc_table->relation_name = strdup(relation_name); } -void desc_table_destroy(DescTable *desc_table) { +void desc_table_destroy(DescTable *desc_table) +{ free((char *)desc_table->relation_name); desc_table->relation_name = nullptr; } -void load_data_init(LoadData *load_data, const char *relation_name, const char *file_name) { +void load_data_init(LoadData *load_data, const char *relation_name, const char *file_name) +{ load_data->relation_name = strdup(relation_name); if (file_name[0] == '\'' || file_name[0] == '\"') { @@ -269,19 +306,22 @@ void load_data_init(LoadData *load_data, const char *relation_name, const char * load_data->file_name = dup_file_name; } -void load_data_destroy(LoadData *load_data) { +void load_data_destroy(LoadData *load_data) +{ free((char *)load_data->relation_name); free((char *)load_data->file_name); load_data->relation_name = nullptr; load_data->file_name = nullptr; } -void query_init(Query *query) { +void query_init(Query *query) +{ query->flag = SCF_ERROR; memset(&query->sstr, 0, sizeof(query->sstr)); } -Query *query_create() { +Query *query_create() +{ Query *query = (Query *)malloc(sizeof(Query)); if (nullptr == query) { LOG_ERROR("Failed to alloc memroy for query. size=%ld", sizeof(Query)); @@ -292,79 +332,71 @@ Query *query_create() { return query; } -void query_reset(Query *query) { +void query_reset(Query *query) +{ switch (query->flag) { case SCF_SELECT: { selects_destroy(&query->sstr.selection); - } - break; + } break; case SCF_INSERT: { inserts_destroy(&query->sstr.insertion); - } - break; + } break; case SCF_DELETE: { deletes_destroy(&query->sstr.deletion); - } - break; + } break; case SCF_UPDATE: { updates_destroy(&query->sstr.update); - } - break; + } break; case SCF_CREATE_TABLE: { create_table_destroy(&query->sstr.create_table); - } - break; + } break; case SCF_DROP_TABLE: { drop_table_destroy(&query->sstr.drop_table); - } - break; + } break; case SCF_CREATE_INDEX: { create_index_destroy(&query->sstr.create_index); - } - break; + } break; case SCF_DROP_INDEX: { drop_index_destroy(&query->sstr.drop_index); - } - break; + } break; case SCF_SYNC: { - } - break; + } break; case SCF_SHOW_TABLES: - break; + break; case SCF_DESC_TABLE: { desc_table_destroy(&query->sstr.desc_table); - } - break; + } break; case SCF_LOAD_DATA: { load_data_destroy(&query->sstr.load_data); - } - break; + } break; case SCF_BEGIN: case SCF_COMMIT: case SCF_ROLLBACK: case SCF_HELP: case SCF_EXIT: case SCF_ERROR: - break; + break; } } -void query_destroy(Query *query) { +void query_destroy(Query *query) +{ query_reset(query); free(query); } #ifdef __cplusplus -} // extern "C" -#endif // __cplusplus +} // extern "C" +#endif // __cplusplus //////////////////////////////////////////////////////////////////////////////// -extern "C" int sql_parse(const char *st, Query *sqls); +extern "C" int sql_parse(const char *st, Query *sqls); -RC parse(const char *st, Query *sqln) { +RC parse(const char *st, Query *sqln) +{ sql_parse(st, sqln); if (sqln->flag == SCF_ERROR) diff --git a/src/observer/sql/parser/parse.h b/src/observer/sql/parser/parse.h index 92978f6dd4a5e05ef6989785841e1c15817c1378..dc2017a4deefeff7ae287917f1f72b0ca323872e 100644 --- a/src/observer/sql/parser/parse.h +++ b/src/observer/sql/parser/parse.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi // #ifndef __OBSERVER_SQL_PARSER_PARSE_H__ @@ -20,5 +20,4 @@ See the Mulan PSL v2 for more details. */ RC parse(const char *st, Query *sqln); -#endif //__OBSERVER_SQL_PARSER_PARSE_H__ - +#endif //__OBSERVER_SQL_PARSER_PARSE_H__ diff --git a/src/observer/sql/parser/parse_defs.h b/src/observer/sql/parser/parse_defs.h index 1af03894a4e2af31a86c7075e5ebd6515cc4faae..28df590e2757966e06c95e099de27f50bd875005 100644 --- a/src/observer/sql/parser/parse_defs.h +++ b/src/observer/sql/parser/parse_defs.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by wangyunlai.wyl on 2021/6/7. +// Created by Meiyi // #ifndef __OBSERVER_SQL_PARSER_PARSE_DEFS_H__ @@ -62,12 +62,12 @@ typedef struct _Condition { // struct of select typedef struct { - size_t attr_num; // Length of attrs in Select clause - RelAttr attributes[MAX_NUM]; // attrs in Select clause - size_t relation_num; // Length of relations in Fro clause - char * relations[MAX_NUM]; // relations in From clause - size_t condition_num; // Length of conditions in Where clause - Condition conditions[MAX_NUM]; // conditions in Where clause + size_t attr_num; // Length of attrs in Select clause + RelAttr attributes[MAX_NUM]; // attrs in Select clause + size_t relation_num; // Length of relations in Fro clause + char *relations[MAX_NUM]; // relations in From clause + size_t condition_num; // Length of conditions in Where clause + Condition conditions[MAX_NUM]; // conditions in Where clause } Selects; // struct of insert diff --git a/src/observer/sql/parser/parse_stage.cpp b/src/observer/sql/parser/parse_stage.cpp index 57c3a94e8bed32b146e04cecdadb62d5818be72a..ede57bf532535c509a6bf99fc09ba4bc845af3fa 100644 --- a/src/observer/sql/parser/parse_stage.cpp +++ b/src/observer/sql/parser/parse_stage.cpp @@ -30,13 +30,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; //! Constructor -ParseStage::ParseStage(const char *tag) : Stage(tag) {} +ParseStage::ParseStage(const char *tag) : Stage(tag) +{} //! Destructor -ParseStage::~ParseStage() {} +ParseStage::~ParseStage() +{} //! Parse properties, instantiate a stage object -Stage *ParseStage::make_stage(const std::string &tag) { +Stage *ParseStage::make_stage(const std::string &tag) +{ ParseStage *stage = new (std::nothrow) ParseStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new ParseStage failed"); @@ -47,7 +50,8 @@ Stage *ParseStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool ParseStage::set_properties() { +bool ParseStage::set_properties() +{ // std::string stageNameStr(stageName); // std::map section = theGlobalProperties()->get( // stageNameStr); @@ -60,7 +64,8 @@ bool ParseStage::set_properties() { } //! Initialize stage params and validate outputs -bool ParseStage::initialize() { +bool ParseStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -71,13 +76,15 @@ bool ParseStage::initialize() { } //! Cleanup after disconnection -void ParseStage::cleanup() { +void ParseStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void ParseStage::handle_event(StageEvent *event) { +void ParseStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); StageEvent *new_event = handle_request(event); @@ -101,7 +108,8 @@ void ParseStage::handle_event(StageEvent *event) { return; } -void ParseStage::callback_event(StageEvent *event, CallbackContext *context) { +void ParseStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); SQLStageEvent *sql_event = static_cast(event); sql_event->session_event()->done_immediate(); @@ -109,10 +117,11 @@ void ParseStage::callback_event(StageEvent *event, CallbackContext *context) { return; } -StageEvent *ParseStage::handle_request(StageEvent *event) { +StageEvent *ParseStage::handle_request(StageEvent *event) +{ SQLStageEvent *sql_event = static_cast(event); const std::string &sql = sql_event->get_sql(); - + Query *result = query_create(); if (nullptr == result) { LOG_ERROR("Failed to create query."); diff --git a/src/observer/sql/parser/parse_stage.h b/src/observer/sql/parser/parse_stage.h index 4cbebd6bcc0d9c8a827025d0ab6891f145fae906..e708bb04ede18db8ba8284c2481de91f84340a53 100644 --- a/src/observer/sql/parser/parse_stage.h +++ b/src/observer/sql/parser/parse_stage.h @@ -30,13 +30,13 @@ protected: bool initialize(); void cleanup(); void handle_event(common::StageEvent *event); - void callback_event(common::StageEvent *event, - common::CallbackContext *context); + void callback_event(common::StageEvent *event, common::CallbackContext *context); protected: common::StageEvent *handle_request(common::StageEvent *event); + private: Stage *optimize_stage_ = nullptr; }; -#endif //__OBSERVER_SQL_PARSE_STAGE_H__ +#endif //__OBSERVER_SQL_PARSE_STAGE_H__ diff --git a/src/observer/sql/parser/resolve_stage.cpp b/src/observer/sql/parser/resolve_stage.cpp index ea76f2060c18ed8bbccc487eea56afe1f42f09d2..9dc48c29c9e7954719ff41ec2e6ad1735b87fee4 100644 --- a/src/observer/sql/parser/resolve_stage.cpp +++ b/src/observer/sql/parser/resolve_stage.cpp @@ -27,13 +27,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; //! Constructor -ResolveStage::ResolveStage(const char *tag) : Stage(tag) {} +ResolveStage::ResolveStage(const char *tag) : Stage(tag) +{} //! Destructor -ResolveStage::~ResolveStage() {} +ResolveStage::~ResolveStage() +{} //! Parse properties, instantiate a stage object -Stage *ResolveStage::make_stage(const std::string &tag) { +Stage *ResolveStage::make_stage(const std::string &tag) +{ ResolveStage *stage = new (std::nothrow) ResolveStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new ResolveStage failed"); @@ -44,7 +47,8 @@ Stage *ResolveStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool ResolveStage::set_properties() { +bool ResolveStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -57,7 +61,8 @@ bool ResolveStage::set_properties() { } //! Initialize stage params and validate outputs -bool ResolveStage::initialize() { +bool ResolveStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -68,13 +73,15 @@ bool ResolveStage::initialize() { } //! Cleanup after disconnection -void ResolveStage::cleanup() { +void ResolveStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void ResolveStage::handle_event(StageEvent *event) { +void ResolveStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); SQLStageEvent *sql_event = static_cast(event); @@ -86,7 +93,8 @@ void ResolveStage::handle_event(StageEvent *event) { return; } -void ResolveStage::callback_event(StageEvent *event, CallbackContext *context) { +void ResolveStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); LOG_TRACE("Exit\n"); diff --git a/src/observer/sql/parser/resolve_stage.h b/src/observer/sql/parser/resolve_stage.h index 088f0a7e99e33a7b744201cad01d4c30a66ecbbc..63c52abd5689eda71391d2556e516c301f3da0cc 100644 --- a/src/observer/sql/parser/resolve_stage.h +++ b/src/observer/sql/parser/resolve_stage.h @@ -30,12 +30,11 @@ protected: bool initialize(); void cleanup(); void handle_event(common::StageEvent *event); - void callback_event(common::StageEvent *event, - common::CallbackContext *context); + void callback_event(common::StageEvent *event, common::CallbackContext *context); protected: private: Stage *query_cache_stage = nullptr; }; -#endif //__OBSERVER_SQL_RESOLVE_STAGE_H__ +#endif //__OBSERVER_SQL_RESOLVE_STAGE_H__ diff --git a/src/observer/sql/parser/yacc_sql.tab.h b/src/observer/sql/parser/yacc_sql.tab.h index 9116658a88560e1ac5454bff523f1a6a81c70dac..eccd1da2e77485970df5afb8b4d628ab2ddc4d81 100644 --- a/src/observer/sql/parser/yacc_sql.tab.h +++ b/src/observer/sql/parser/yacc_sql.tab.h @@ -36,10 +36,10 @@ private implementation details that can be changed or removed. */ #ifndef YY_YY_YACC_SQL_TAB_H_INCLUDED -# define YY_YY_YACC_SQL_TAB_H_INCLUDED +#define YY_YY_YACC_SQL_TAB_H_INCLUDED /* Debug traces. */ #ifndef YYDEBUG -# define YYDEBUG 0 +#define YYDEBUG 0 #endif #if YYDEBUG extern int yydebug; @@ -47,69 +47,67 @@ extern int yydebug; /* Token kinds. */ #ifndef YYTOKENTYPE -# define YYTOKENTYPE - enum yytokentype - { - YYEMPTY = -2, - YYEOF = 0, /* "end of file" */ - YYerror = 256, /* error */ - YYUNDEF = 257, /* "invalid token" */ - SEMICOLON = 258, /* SEMICOLON */ - CREATE = 259, /* CREATE */ - DROP = 260, /* DROP */ - TABLE = 261, /* TABLE */ - TABLES = 262, /* TABLES */ - INDEX = 263, /* INDEX */ - SELECT = 264, /* SELECT */ - DESC = 265, /* DESC */ - SHOW = 266, /* SHOW */ - SYNC = 267, /* SYNC */ - INSERT = 268, /* INSERT */ - DELETE = 269, /* DELETE */ - UPDATE = 270, /* UPDATE */ - LBRACE = 271, /* LBRACE */ - RBRACE = 272, /* RBRACE */ - COMMA = 273, /* COMMA */ - TRX_BEGIN = 274, /* TRX_BEGIN */ - TRX_COMMIT = 275, /* TRX_COMMIT */ - TRX_ROLLBACK = 276, /* TRX_ROLLBACK */ - INT_T = 277, /* INT_T */ - STRING_T = 278, /* STRING_T */ - FLOAT_T = 279, /* FLOAT_T */ - HELP = 280, /* HELP */ - EXIT = 281, /* EXIT */ - DOT = 282, /* DOT */ - INTO = 283, /* INTO */ - VALUES = 284, /* VALUES */ - FROM = 285, /* FROM */ - WHERE = 286, /* WHERE */ - AND = 287, /* AND */ - SET = 288, /* SET */ - ON = 289, /* ON */ - LOAD = 290, /* LOAD */ - DATA = 291, /* DATA */ - INFILE = 292, /* INFILE */ - EQ = 293, /* EQ */ - LT = 294, /* LT */ - GT = 295, /* GT */ - LE = 296, /* LE */ - GE = 297, /* GE */ - NE = 298, /* NE */ - NUMBER = 299, /* NUMBER */ - FLOAT = 300, /* FLOAT */ - ID = 301, /* ID */ - PATH = 302, /* PATH */ - SSS = 303, /* SSS */ - STAR = 304, /* STAR */ - STRING_V = 305 /* STRING_V */ - }; - typedef enum yytokentype yytoken_kind_t; +#define YYTOKENTYPE +enum yytokentype { + YYEMPTY = -2, + YYEOF = 0, /* "end of file" */ + YYerror = 256, /* error */ + YYUNDEF = 257, /* "invalid token" */ + SEMICOLON = 258, /* SEMICOLON */ + CREATE = 259, /* CREATE */ + DROP = 260, /* DROP */ + TABLE = 261, /* TABLE */ + TABLES = 262, /* TABLES */ + INDEX = 263, /* INDEX */ + SELECT = 264, /* SELECT */ + DESC = 265, /* DESC */ + SHOW = 266, /* SHOW */ + SYNC = 267, /* SYNC */ + INSERT = 268, /* INSERT */ + DELETE = 269, /* DELETE */ + UPDATE = 270, /* UPDATE */ + LBRACE = 271, /* LBRACE */ + RBRACE = 272, /* RBRACE */ + COMMA = 273, /* COMMA */ + TRX_BEGIN = 274, /* TRX_BEGIN */ + TRX_COMMIT = 275, /* TRX_COMMIT */ + TRX_ROLLBACK = 276, /* TRX_ROLLBACK */ + INT_T = 277, /* INT_T */ + STRING_T = 278, /* STRING_T */ + FLOAT_T = 279, /* FLOAT_T */ + HELP = 280, /* HELP */ + EXIT = 281, /* EXIT */ + DOT = 282, /* DOT */ + INTO = 283, /* INTO */ + VALUES = 284, /* VALUES */ + FROM = 285, /* FROM */ + WHERE = 286, /* WHERE */ + AND = 287, /* AND */ + SET = 288, /* SET */ + ON = 289, /* ON */ + LOAD = 290, /* LOAD */ + DATA = 291, /* DATA */ + INFILE = 292, /* INFILE */ + EQ = 293, /* EQ */ + LT = 294, /* LT */ + GT = 295, /* GT */ + LE = 296, /* LE */ + GE = 297, /* GE */ + NE = 298, /* NE */ + NUMBER = 299, /* NUMBER */ + FLOAT = 300, /* FLOAT */ + ID = 301, /* ID */ + PATH = 302, /* PATH */ + SSS = 303, /* SSS */ + STAR = 304, /* STAR */ + STRING_V = 305 /* STRING_V */ +}; +typedef enum yytokentype yytoken_kind_t; #endif /* Value type. */ -#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED -union YYSTYPE -{ +#if !defined YYSTYPE && !defined YYSTYPE_IS_DECLARED +union YYSTYPE { #line 106 "yacc_sql.y" struct _Attr *attr; @@ -118,18 +116,15 @@ union YYSTYPE char *string; int number; float floats; - char *position; + char *position; #line 124 "yacc_sql.tab.h" - }; typedef union YYSTYPE YYSTYPE; -# define YYSTYPE_IS_TRIVIAL 1 -# define YYSTYPE_IS_DECLARED 1 +#define YYSTYPE_IS_TRIVIAL 1 +#define YYSTYPE_IS_DECLARED 1 #endif - - -int yyparse (void *scanner); +int yyparse(void *scanner); #endif /* !YY_YY_YACC_SQL_TAB_H_INCLUDED */ diff --git a/src/observer/sql/plan_cache/plan_cache_stage.cpp b/src/observer/sql/plan_cache/plan_cache_stage.cpp index 0c0efc1ffeb8c099095beaad39502dc26807bfc1..c2c1c3d2fdd30c2bd5f8ffb4a63b1b779b6be9f3 100644 --- a/src/observer/sql/plan_cache/plan_cache_stage.cpp +++ b/src/observer/sql/plan_cache/plan_cache_stage.cpp @@ -26,13 +26,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; //! Constructor -PlanCacheStage::PlanCacheStage(const char *tag) : Stage(tag) {} +PlanCacheStage::PlanCacheStage(const char *tag) : Stage(tag) +{} //! Destructor -PlanCacheStage::~PlanCacheStage() {} +PlanCacheStage::~PlanCacheStage() +{} //! Parse properties, instantiate a stage object -Stage *PlanCacheStage::make_stage(const std::string &tag) { +Stage *PlanCacheStage::make_stage(const std::string &tag) +{ PlanCacheStage *stage = new (std::nothrow) PlanCacheStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new PlanCacheStage failed"); @@ -43,7 +46,8 @@ Stage *PlanCacheStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool PlanCacheStage::set_properties() { +bool PlanCacheStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -56,7 +60,8 @@ bool PlanCacheStage::set_properties() { } //! Initialize stage params and validate outputs -bool PlanCacheStage::initialize() { +bool PlanCacheStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -68,13 +73,15 @@ bool PlanCacheStage::initialize() { } //! Cleanup after disconnection -void PlanCacheStage::cleanup() { +void PlanCacheStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void PlanCacheStage::handle_event(StageEvent *event) { +void PlanCacheStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); // Add callback to update plan cache @@ -95,8 +102,8 @@ void PlanCacheStage::handle_event(StageEvent *event) { return; } -void PlanCacheStage::callback_event(StageEvent *event, - CallbackContext *context) { +void PlanCacheStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); // update execute plan here diff --git a/src/observer/sql/plan_cache/plan_cache_stage.h b/src/observer/sql/plan_cache/plan_cache_stage.h index 7a1bd5dcc481d0b109c6020bd7d2a196b7589147..ebafeef6a21a5fa4d659c313a4f70ea19dfba0cf 100644 --- a/src/observer/sql/plan_cache/plan_cache_stage.h +++ b/src/observer/sql/plan_cache/plan_cache_stage.h @@ -30,8 +30,7 @@ protected: bool initialize(); void cleanup(); void handle_event(common::StageEvent *event); - void callback_event(common::StageEvent *event, - common::CallbackContext *context); + void callback_event(common::StageEvent *event, common::CallbackContext *context); protected: private: @@ -39,4 +38,4 @@ private: Stage *execute_stage = nullptr; }; -#endif //__OBSERVER_SQL_PLAN_CACHE_STAGE_H__ +#endif //__OBSERVER_SQL_PLAN_CACHE_STAGE_H__ diff --git a/src/observer/sql/query_cache/query_cache_stage.cpp b/src/observer/sql/query_cache/query_cache_stage.cpp index 7f7b89c5655e4c76b18d9fb5699ab8da23895fcc..650f2b232ffd5f0ed581dd3efaa43c277eea8b5e 100644 --- a/src/observer/sql/query_cache/query_cache_stage.cpp +++ b/src/observer/sql/query_cache/query_cache_stage.cpp @@ -26,13 +26,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; //! Constructor -QueryCacheStage::QueryCacheStage(const char *tag) : Stage(tag) {} +QueryCacheStage::QueryCacheStage(const char *tag) : Stage(tag) +{} //! Destructor -QueryCacheStage::~QueryCacheStage() {} +QueryCacheStage::~QueryCacheStage() +{} //! Parse properties, instantiate a stage object -Stage *QueryCacheStage::make_stage(const std::string &tag) { +Stage *QueryCacheStage::make_stage(const std::string &tag) +{ QueryCacheStage *stage = new (std::nothrow) QueryCacheStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new QueryCacheStage failed"); @@ -43,7 +46,8 @@ Stage *QueryCacheStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool QueryCacheStage::set_properties() { +bool QueryCacheStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -56,7 +60,8 @@ bool QueryCacheStage::set_properties() { } //! Initialize stage params and validate outputs -bool QueryCacheStage::initialize() { +bool QueryCacheStage::initialize() +{ LOG_TRACE("Enter"); std::list::iterator stgp = next_stage_list_.begin(); @@ -67,13 +72,15 @@ bool QueryCacheStage::initialize() { } //! Cleanup after disconnection -void QueryCacheStage::cleanup() { +void QueryCacheStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void QueryCacheStage::handle_event(StageEvent *event) { +void QueryCacheStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); // Add callback to update query cache @@ -94,8 +101,8 @@ void QueryCacheStage::handle_event(StageEvent *event) { return; } -void QueryCacheStage::callback_event(StageEvent *event, - CallbackContext *context) { +void QueryCacheStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); // update data to query cache here diff --git a/src/observer/sql/query_cache/query_cache_stage.h b/src/observer/sql/query_cache/query_cache_stage.h index f642f6e03ea072075aba38b73b3846dff7f5812d..27731bb532125c653455cabdb75e19640dafbb7c 100644 --- a/src/observer/sql/query_cache/query_cache_stage.h +++ b/src/observer/sql/query_cache/query_cache_stage.h @@ -30,12 +30,11 @@ protected: bool initialize(); void cleanup(); void handle_event(common::StageEvent *event); - void callback_event(common::StageEvent *event, - common::CallbackContext *context); + void callback_event(common::StageEvent *event, common::CallbackContext *context); protected: private: Stage *plan_cache_stage = nullptr; }; -#endif //__OBSERVER_SQL_QUERY_CACHE_STAGE_H__ +#endif //__OBSERVER_SQL_QUERY_CACHE_STAGE_H__ diff --git a/src/observer/storage/common/bplus_tree.cpp b/src/observer/storage/common/bplus_tree.cpp deleted file mode 100644 index 6f3d93e78b615c3c776ed0ce87a39c75e3873e98..0000000000000000000000000000000000000000 --- a/src/observer/storage/common/bplus_tree.cpp +++ /dev/null @@ -1,2411 +0,0 @@ -/* Copyright (c) 2021 Xie Meiyi(xiemeiyi@hust.edu.cn) and OceanBase and/or its affiliates. All rights reserved. -miniob is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. */ - -// -// Created by Xie Meiyi -// -#include "storage/common/bplus_tree.h" -#include "storage/default/disk_buffer_pool.h" -#include "rc.h" -#include "common/log/log.h" -#include "sql/parser/parse_defs.h" - -#define FIRST_INDEX_PAGE 1 - -int float_compare(float f1, float f2) -{ - float result = f1 - f2; - if (-1e-6 < result && result < 1e-6) { - return 0; - } - return result > 0 ? 1 : -1; -} - -int attribute_comp(const char *first, const char *second, AttrType attr_type, int attr_length) -{ // 简化 - int i1, i2; - float f1, f2; - const char *s1, *s2; - switch (attr_type) { - case INTS: { - i1 = *(int *)first; - i2 = *(int *)second; - return i1 - i2; - } break; - case FLOATS: { - f1 = *(float *)first; - f2 = *(float *)second; - return float_compare(f1, f2); - } break; - case CHARS: { - s1 = first; - s2 = second; - return strncmp(s1, s2, attr_length); - } break; - default: { - LOG_PANIC("Unknown attr type: %d", attr_type); - } - } - return -2; // This means error happens -} -int key_compare(AttrType attr_type, int attr_length, const char *first, const char *second) -{ - int result = attribute_comp(first, second, attr_type, attr_length); - if (0 != result) { - return result; - } - RID *rid1 = (RID *)(first + attr_length); - RID *rid2 = (RID *)(second + attr_length); - return RID::compare(rid1, rid2); -} - -int get_page_index_capacity(int attr_length) -{ - - int capacity = - ((int)BP_PAGE_DATA_SIZE - sizeof(IndexFileHeader) - sizeof(IndexNode)) / (attr_length + 2 * sizeof(RID)); - // Here is some tricks - // 1. reserver one pair of kV for insert operation - // 2. make sure capacity % 2 == 0, otherwise it is likeyly to occur problem when split node - capacity = ((capacity - RECORD_RESERVER_PAIR_NUM) / 2) * 2; - return capacity; -} - -IndexNode *BplusTreeHandler::get_index_node(char *page_data) const -{ - IndexNode *node = (IndexNode *)(page_data + sizeof(IndexFileHeader)); - node->keys = (char *)node + sizeof(IndexNode); - node->rids = (RID *)(node->keys + (file_header_.order + RECORD_RESERVER_PAIR_NUM) * file_header_.key_length); - return node; -} - -RC BplusTreeHandler::sync() -{ - return disk_buffer_pool_->purge_all_pages(file_id_); -} - -RC BplusTreeHandler::create(const char *file_name, AttrType attr_type, int attr_length) -{ - DiskBufferPool *disk_buffer_pool = theGlobalDiskBufferPool(); - RC rc = disk_buffer_pool->create_file(file_name); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to create file. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - return rc; - } - LOG_INFO("Successfully create index file:%s", file_name); - - int file_id; - rc = disk_buffer_pool->open_file(file_name, &file_id); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to open file. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - return rc; - } - LOG_INFO("Successfully open index file %s.", file_name); - - rc = disk_buffer_pool->allocate_page(file_id, &root_page_handle_); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to allocate page. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool->close_file(file_id); - return rc; - } - - char *pdata; - rc = disk_buffer_pool->get_data(&root_page_handle_, &pdata); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get data. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool->close_file(file_id); - return rc; - } - - PageNum page_num; - rc = disk_buffer_pool->get_page_num(&root_page_handle_, &page_num); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get page num. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool->close_file(file_id); - return rc; - } - - IndexFileHeader *file_header = (IndexFileHeader *)pdata; - file_header->attr_length = attr_length; - file_header->key_length = attr_length + sizeof(RID); - file_header->attr_type = attr_type; - file_header->order = get_page_index_capacity(attr_length); - file_header->root_page = page_num; - - root_node_ = get_index_node(pdata); - root_node_->init_empty(*file_header); - - disk_buffer_pool->mark_dirty(&root_page_handle_); - - disk_buffer_pool_ = disk_buffer_pool; - file_id_ = file_id; - - memcpy(&file_header_, pdata, sizeof(file_header_)); - header_dirty_ = false; - - mem_pool_item_ = new common::MemPoolItem(file_name); - if (mem_pool_item_->init(file_header->key_length) < 0) { - LOG_WARN("Failed to init memory pool for index %s", file_name); - close(); - return RC::NOMEM; - } - - LOG_INFO("Successfully create index %s", file_name); - return RC::SUCCESS; -} - -RC BplusTreeHandler::open(const char *file_name) -{ - if (file_id_ > 0) { - LOG_WARN("%s has been opened before index.open.", file_name); - return RC::RECORD_OPENNED; - } - - DiskBufferPool *disk_buffer_pool = theGlobalDiskBufferPool(); - int file_id = 0; - RC rc = disk_buffer_pool->open_file(file_name, &file_id); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to open file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - return rc; - } - - BPPageHandle page_handle; - rc = disk_buffer_pool->get_this_page(file_id, FIRST_INDEX_PAGE, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get first page file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool_->close_file(file_id); - return rc; - } - - char *pdata; - rc = disk_buffer_pool->get_data(&page_handle, &pdata); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get first page data. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool_->close_file(file_id); - return rc; - } - - memcpy(&file_header_, pdata, sizeof(IndexFileHeader)); - header_dirty_ = false; - disk_buffer_pool_ = disk_buffer_pool; - file_id_ = file_id; - - mem_pool_item_ = new common::MemPoolItem(file_name); - if (mem_pool_item_->init(file_header_.key_length) < 0) { - LOG_WARN("Failed to init memory pool for index %s", file_name); - close(); - return RC::NOMEM; - } - - if (file_header_.root_page == FIRST_INDEX_PAGE) { - root_node_ = get_index_node(pdata); - root_page_handle_ = page_handle; - - LOG_INFO("Successfully open index %s", file_name); - return RC::SUCCESS; - } - - // close old page_handle - disk_buffer_pool->unpin_page(&page_handle); - - LOG_INFO("Begin to load root page of index:%s, root_page:%d.", file_name, file_header_.root_page); - rc = disk_buffer_pool->get_this_page(file_id, file_header_.root_page, &root_page_handle_); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get first page file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool_->close_file(file_id); - return rc; - } - - rc = disk_buffer_pool->get_data(&root_page_handle_, &pdata); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get first page data. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); - disk_buffer_pool_->close_file(file_id); - return rc; - } - root_node_ = get_index_node(pdata); - - LOG_INFO("Successfully open index %s", file_name); - return RC::SUCCESS; -} - -RC BplusTreeHandler::close() -{ - if (file_id_ != -1) { - disk_buffer_pool_->unpin_page(&root_page_handle_); - root_node_ = nullptr; - - disk_buffer_pool_->close_file(file_id_); - file_id_ = -1; - - delete mem_pool_item_; - mem_pool_item_ = nullptr; - } - - disk_buffer_pool_ = nullptr; - return RC::SUCCESS; -} - -RC BplusTreeHandler::print_node(IndexNode *node, PageNum page_num) -{ - LOG_INFO("PageNum:%d, node {%s}\n", page_num, node->to_string(file_header_).c_str()); - - if (node->is_leaf == false) { - for (int i = 0; i <= node->key_num; i++) { - PageNum child_page_num = node->rids[i].page_num; - BPPageHandle page_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, child_page_num, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to load page file_id:%d, page_num:%d", file_id_, child_page_num); - continue; - } - - char *pdata; - disk_buffer_pool_->get_data(&page_handle, &pdata); - IndexNode *child = get_index_node(pdata); - print_node(child, child_page_num); - disk_buffer_pool_->unpin_page(&page_handle); - } - } - - return RC::SUCCESS; -} - -RC BplusTreeHandler::print_tree() -{ - if (file_id_ < 0) { - LOG_WARN("Index hasn't been created or opened, fail to print"); - return RC::SUCCESS; - } - - int page_count; - RC rc = disk_buffer_pool_->get_page_count(file_id_, &page_count); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get page count of index %d", file_id_); - return rc; - } - - LOG_INFO("\n\n\n !!!! Begin to print index %s:%d, page_count:%d, file_header:%s\n\n\n", - mem_pool_item_->get_name().c_str(), - file_id_, - page_count, - file_header_.to_string().c_str()); - - print_node(root_node_, file_header_.root_page); - return RC::SUCCESS; -} - -RC BplusTreeHandler::print_leafs() -{ - PageNum page_num; - get_first_leaf_page(&page_num); - - IndexNode *node; - BPPageHandle page_handle; - RC rc ; - while(page_num != -1) { - rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to print leafs, due to failed to load. "); - return rc; - } - char *pdata; - disk_buffer_pool_->get_data(&page_handle, &pdata); - node = get_index_node(pdata); - LOG_INFO("Page:%d, Node:%s", page_num, node->to_string(file_header_).c_str()); - page_num = node->next_brother; - disk_buffer_pool_->unpin_page(&page_handle); - } - - return RC::SUCCESS; -} - -bool BplusTreeHandler::validate_node(IndexNode *node) -{ - if (node->key_num > file_header_.order) { - LOG_WARN("NODE %s 's key number is invalid", node->to_string(file_header_).c_str()); - return false; - } - if (node->parent != -1) { - if (node->key_num < file_header_.order / 2) { - LOG_WARN("NODE %s 's key number is invalid", node->to_string(file_header_).c_str()); - return false; - } - } else { - // node is root - if (node->is_leaf == false) { - - if (node->key_num < 1) { - LOG_WARN("NODE %s 's key number is invalid", node->to_string(file_header_).c_str()); - return false; - } - } - } - - if (node->is_leaf && node->prev_brother != -1) { - char *first_key = node->keys; - bool found = false; - - PageNum parent_page = node->parent; - while (parent_page != -1) { - BPPageHandle parent_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, parent_page, &parent_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to check parent's keys, file_id:%d", file_id_); - - return false; - } - - char *pdata; - disk_buffer_pool_->get_data(&parent_handle, &pdata); - IndexNode *parent = get_index_node(pdata); - for (int i = 0; i < parent->key_num; i++) { - char *cur_key = parent->keys + i * file_header_.key_length; - int tmp = key_compare(file_header_.attr_type, file_header_.attr_length, first_key, cur_key); - if (tmp == 0) { - found = true; - - break; - } else if (tmp < 0) { - break; - } - } - disk_buffer_pool_->unpin_page(&parent_handle); - if (found == true) { - break; - } - - parent_page = parent->parent; - } - - if (found == false) { - LOG_WARN("Failed to find leaf's first key in internal node. leaf:%s, file_id:%d", - node->to_string(file_header_).c_str(), - file_id_); - return false; - } - } - - bool ret = false; - char *last_key = node->keys; - char *cur_key; - for (int i = 0; i < node->key_num; i++) { - int tmp; - cur_key = node->keys + i * file_header_.key_length; - if (i > 0) { - tmp = key_compare(file_header_.attr_type, file_header_.attr_length, cur_key, last_key); - if (tmp < 0) { - LOG_WARN("NODE %s 's key sequence is wrong", node->to_string(file_header_).c_str()); - return false; - } - } - last_key = cur_key; - if (node->is_leaf) { - continue; - } - - PageNum child_page = node->rids[i].page_num; - BPPageHandle child_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, child_page, &child_handle); - if (rc != RC::SUCCESS) { - LOG_WARN( - "Failed to validte node's child %d, file_id:%d, node:%s", i, file_id_, node->to_string(file_header_).c_str()); - continue; - } - char *pdata; - disk_buffer_pool_->get_data(&child_handle, &pdata); - IndexNode *child = get_index_node(pdata); - - char *child_last_key = child->keys + (child->key_num - 1) * file_header_.key_length; - tmp = key_compare(file_header_.attr_type, file_header_.attr_length, cur_key, child_last_key); - if (tmp <= 0) { - LOG_WARN("Child's last key is bigger than current key, child:%s, current:%s, file_id:%d", - child->to_string(file_header_).c_str(), - node->to_string(file_header_).c_str(), - file_id_); - disk_buffer_pool_->unpin_page(&child_handle); - return false; - } - - ret = validate_node(child); - if (ret == false) { - disk_buffer_pool_->unpin_page(&child_handle); - return false; - } - - BPPageHandle next_child_handle; - PageNum next_child_page = node->rids[i + 1].page_num; - rc = disk_buffer_pool_->get_this_page(file_id_, next_child_page, &next_child_handle); - if (rc != RC::SUCCESS) { - LOG_WARN( - "Failed to validte node's child %d, file_id:%d, node:%s", i, file_id_, node->to_string(file_header_).c_str()); - disk_buffer_pool_->unpin_page(&child_handle); - continue; - } - disk_buffer_pool_->get_data(&next_child_handle, &pdata); - IndexNode *next_child = get_index_node(pdata); - - char *first_next_child_key = next_child->keys; - tmp = key_compare(file_header_.attr_type, file_header_.attr_length, cur_key, first_next_child_key); - if (next_child->is_leaf) { - if (tmp != 0) { - LOG_WARN("Next child's first key isn't equal current key, next_child:%s, current:%s, file_id:%d", - next_child->to_string(file_header_).c_str(), - node->to_string(file_header_).c_str(), - file_id_); - disk_buffer_pool_->unpin_page(&next_child_handle); - disk_buffer_pool_->unpin_page(&child_handle); - return false; - } - } else { - if (tmp >= 0) { - LOG_WARN("Next child's first key isn't equal current key, next_child:%s, current:%s, file_id:%d", - next_child->to_string(file_header_).c_str(), - node->to_string(file_header_).c_str(), - file_id_); - disk_buffer_pool_->unpin_page(&next_child_handle); - disk_buffer_pool_->unpin_page(&child_handle); - return false; - } - } - - if (i == node->key_num - 1) { - ret = validate_node(next_child); - if (ret == false) { - LOG_WARN("Next child is invalid, next_child:%s, current:%s, file_id:%d", - next_child->to_string(file_header_).c_str(), - node->to_string(file_header_).c_str(), - file_id_); - disk_buffer_pool_->unpin_page(&next_child_handle); - disk_buffer_pool_->unpin_page(&child_handle); - return false; - } - } - if (child->is_leaf) { - if (child->next_brother != next_child_page || next_child->prev_brother != child_page) { - LOG_WARN("The child 's next brother or the next child's previous brother isn't correct, child:%s, " - "next_child:%s, file_id:%d", - child->to_string(file_header_).c_str(), - next_child->to_string(file_header_).c_str(), - file_id_); - disk_buffer_pool_->unpin_page(&next_child_handle); - disk_buffer_pool_->unpin_page(&child_handle); - return false; - } - } - disk_buffer_pool_->unpin_page(&next_child_handle); - disk_buffer_pool_->unpin_page(&child_handle); - } - - return true; -} - -bool BplusTreeHandler::validate_leaf_link() -{ - BPPageHandle first_leaf_handle; - IndexNode *first_leaf = root_node_; - PageNum first_page; - RC rc; - - while (first_leaf->is_leaf == false) { - if (first_leaf_handle.open) { - disk_buffer_pool_->unpin_page(&first_leaf_handle); - } - first_page = first_leaf->rids[0].page_num; - rc = disk_buffer_pool_->get_this_page(file_id_, first_page, &first_leaf_handle); - if (rc != RC::SUCCESS) { - return false; - } - - char *pdata; - disk_buffer_pool_->get_data(&first_leaf_handle, &pdata); - first_leaf = get_index_node(pdata); - } - - if (first_leaf_handle.open == false) { - // only root node - if (first_leaf->prev_brother != -1 || first_leaf->next_brother != -1) { - LOG_WARN("root node is the only node, but either root node's previous brother or next brother is wrong, root:%s, " - "file_id:%s", - first_leaf->to_string(file_header_).c_str(), - file_id_); - return false; - } - return true; - } - - if (first_leaf->prev_brother != -1 || first_leaf->next_brother == -1) { - LOG_WARN("First leaf is invalid, node:%s, file_id:%d", first_leaf->to_string(file_header_).c_str(), file_id_); - disk_buffer_pool_->unpin_page(&first_leaf_handle); - return false; - } - - BPPageHandle last_leaf_handle; - IndexNode *last_leaf = root_node_; - PageNum last_page = -1; - - while (last_leaf->is_leaf == false) { - if (last_leaf_handle.open) { - disk_buffer_pool_->unpin_page(&last_leaf_handle); - } - last_page = last_leaf->rids[last_leaf->key_num].page_num; - rc = disk_buffer_pool_->get_this_page(file_id_, last_page, &last_leaf_handle); - if (rc != RC::SUCCESS) { - disk_buffer_pool_->unpin_page(&first_leaf_handle); - return false; - } - - char *pdata; - disk_buffer_pool_->get_data(&last_leaf_handle, &pdata); - last_leaf = get_index_node(pdata); - } - - if (last_page == -1) { - LOG_WARN( - "The last leaf is invalid, last leaf is root:%s, file_id:%d", last_leaf->to_string(file_header_).c_str(), file_id_); - disk_buffer_pool_->unpin_page(&first_leaf_handle); - return false; - } - - if (last_leaf->next_brother != -1 || last_leaf->prev_brother == -1) { - LOG_WARN( - "The last leaf is invalid, last leaf:%s, file_id:%d", last_leaf->to_string(file_header_).c_str(), file_id_); - disk_buffer_pool_->unpin_page(&first_leaf_handle); - disk_buffer_pool_->unpin_page(&last_leaf_handle); - return false; - } - - std::set leaf_pages; - leaf_pages.insert(first_page); - - BPPageHandle current_handle; - IndexNode *cur_node = first_leaf; - PageNum cur_page = first_page; - - BPPageHandle next_handle; - IndexNode *next_node = nullptr; - PageNum next_page = cur_node->next_brother; - - bool found = false; - bool ret = false; - - while (next_page != -1) { - rc = disk_buffer_pool_->get_this_page(file_id_, next_page, &next_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to check leaf link "); - goto cleanup; - } - - char *pdata; - disk_buffer_pool_->get_data(&next_handle, &pdata); - next_node = get_index_node(pdata); - - if (cur_node->next_brother != next_page || next_node->prev_brother != cur_page) { - LOG_WARN("The leaf 's next brother or the next leaf's previous brother isn't correct, child:%s, next_child:%s, " - "file_id:%d", - cur_node->to_string(file_header_).c_str(), - next_node->to_string(file_header_).c_str(), - file_id_); - disk_buffer_pool_->unpin_page(&next_handle); - goto cleanup; - } - - if (next_page == last_page) { - found = true; - disk_buffer_pool_->unpin_page(&next_handle); - break; - } - - if (leaf_pages.find(next_page) != leaf_pages.end()) { - LOG_WARN( - "Leaf links occur loop, current node:%s, file_id:%d", cur_node->to_string(file_header_).c_str(), file_id_); - disk_buffer_pool_->unpin_page(&next_handle); - goto cleanup; - } else { - leaf_pages.insert(next_page); - } - - if (current_handle.open) { - disk_buffer_pool_->unpin_page(¤t_handle); - } - current_handle = next_handle; - cur_node = next_node; - cur_page = next_page; - next_page = cur_node->next_brother; - } - - if (found == true) { - ret = true; - } - -cleanup: - if (first_leaf_handle.open) { - disk_buffer_pool_->unpin_page(&first_leaf_handle); - } - - if (last_leaf_handle.open) { - disk_buffer_pool_->unpin_page(&last_leaf_handle); - } - - if (current_handle.open) { - disk_buffer_pool_->unpin_page(¤t_handle); - } - - return ret; -} - -bool BplusTreeHandler::validate_tree() -{ - IndexNode *node = root_node_; - if (validate_node(node) == false || validate_leaf_link() == false) { - LOG_WARN("Current B+ Tree is invalid"); - print_tree(); - return false; - } - return true; -} - -RC BplusTreeHandler::find_leaf(const char *pkey, PageNum *leaf_page) -{ - BPPageHandle page_handle; - IndexNode *node = root_node_; - while (false == node->is_leaf) { - char *pdata; - int i; - for (i = 0; i < node->key_num; i++) { - int tmp = - key_compare(file_header_.attr_type, file_header_.attr_length, pkey, node->keys + i * file_header_.key_length); - if (tmp < 0) - break; - } - - if (page_handle.open == true) { - disk_buffer_pool_->unpin_page(&page_handle); - } - - RC rc = disk_buffer_pool_->get_this_page(file_id_, node->rids[i].page_num, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to load page file_id:%d, page_num:%d", file_id_, node->rids[i].page_num); - return rc; - } - disk_buffer_pool_->get_data(&page_handle, &pdata); - - node = get_index_node(pdata); - } - - if (page_handle.open == false) { - *leaf_page = file_header_.root_page; - return RC::SUCCESS; - } - - disk_buffer_pool_->get_page_num(&page_handle, leaf_page); - disk_buffer_pool_->unpin_page(&page_handle); - - return RC::SUCCESS; -} - -RC BplusTreeHandler::insert_entry_into_node(IndexNode *node, const char *pkey, const RID *rid, PageNum left_page) -{ - int insert_pos = 0, tmp; - - for (; insert_pos < node->key_num; insert_pos++) { - tmp = key_compare( - file_header_.attr_type, file_header_.attr_length, pkey, node->keys + insert_pos * file_header_.key_length); - if (tmp == 0) { - LOG_TRACE("Insert into %d occur duplicated key, rid:%s.", file_id_, node->rids[insert_pos].to_string().c_str()); - return RC::RECORD_DUPLICATE_KEY; - } - if (tmp < 0) - break; - } - - char *from = node->keys + insert_pos * file_header_.key_length; - char *to = from + file_header_.key_length; - int len = (node->key_num - insert_pos) * file_header_.key_length; - memmove(to, from, len); - memcpy(node->keys + insert_pos * file_header_.key_length, pkey, file_header_.key_length); - - if (node->is_leaf) { - len = (node->key_num - insert_pos) * sizeof(RID); - memmove(node->rids + insert_pos + 1, node->rids + insert_pos, len); - memcpy(node->rids + insert_pos, rid, sizeof(RID)); - - change_leaf_parent_key_insert(node, insert_pos, left_page); - } else { - - len = (node->key_num - insert_pos) * sizeof(RID); - memmove(node->rids + insert_pos + 2, node->rids + insert_pos + 1, len); - memcpy(node->rids + insert_pos + 1, rid, sizeof(RID)); - } - - node->key_num++; //叶子结点增加一条记录 - return RC::SUCCESS; -} - -RC BplusTreeHandler::split_leaf(BPPageHandle &leaf_page_handle) -{ - PageNum leaf_page; - disk_buffer_pool_->get_page_num(&leaf_page_handle, &leaf_page); - - char *pdata; - RC rc = disk_buffer_pool_->get_data(&leaf_page_handle, &pdata); - if (rc != RC::SUCCESS) { - return rc; - } - IndexNode *old_node = get_index_node(pdata); - - char *new_parent_key = (char *)mem_pool_item_->alloc(); - if (new_parent_key == nullptr) { - LOG_WARN("Failed to alloc memory for new key. size=%d", file_header_.key_length); - return RC::NOMEM; - } - - // add a new node - BPPageHandle page_handle2; - rc = disk_buffer_pool_->allocate_page(file_id_, &page_handle2); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to split index page due to failed to allocate page, file_id:%d ", file_id_); - return rc; - } - PageNum new_page; - disk_buffer_pool_->get_page_num(&page_handle2, &new_page); - disk_buffer_pool_->get_data(&page_handle2, &pdata); - IndexNode *new_node = get_index_node(pdata); - new_node->init_empty(file_header_); - new_node->parent = old_node->parent; - new_node->prev_brother = leaf_page; - new_node->next_brother = old_node->next_brother; - old_node->next_brother = new_page; - - // begin to move data from leaf_node to new_node - split_node(old_node, new_node, leaf_page, new_page, new_parent_key); - disk_buffer_pool_->mark_dirty(&leaf_page_handle); - disk_buffer_pool_->mark_dirty(&page_handle2); - - PageNum parent_page = old_node->parent; - rc = insert_into_parent(parent_page, leaf_page_handle, new_parent_key, page_handle2); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to insert into parent of index %d", file_id_); - // restore status before insert into parent - // merge_nodes function will move left node into right node - merge_nodes(old_node, new_node, new_page, new_parent_key); - copy_node(old_node, new_node); - change_insert_leaf_link(old_node, new_node, leaf_page); - - mem_pool_item_->free(new_parent_key); - disk_buffer_pool_->unpin_page(&page_handle2); - disk_buffer_pool_->dispose_page(file_id_, new_page); - return rc; - } - mem_pool_item_->free(new_parent_key); - disk_buffer_pool_->unpin_page(&page_handle2); - return RC::SUCCESS; -} - -RC BplusTreeHandler::insert_intern_node( - BPPageHandle &parent_page_handle, BPPageHandle &left_page_handle, BPPageHandle &right_page_handle, const char *pkey) -{ - PageNum left_page; - disk_buffer_pool_->get_page_num(&left_page_handle, &left_page); - - PageNum right_page; - disk_buffer_pool_->get_page_num(&right_page_handle, &right_page); - - char *pdata; - RC rc = disk_buffer_pool_->get_data(&parent_page_handle, &pdata); - if (rc != RC::SUCCESS) { - return rc; - } - - IndexNode *node = get_index_node(pdata); - - RID rid; - rid.page_num = right_page; - rid.slot_num = BP_INVALID_PAGE_NUM; // change to invalid page num - - insert_entry_into_node(node, pkey, &rid, right_page); - - disk_buffer_pool_->mark_dirty(&parent_page_handle); - - return RC::SUCCESS; -} - -RC BplusTreeHandler::split_intern_node(BPPageHandle &inter_page_handle, const char *pkey) -{ - PageNum inter_page_num; - disk_buffer_pool_->get_page_num(&inter_page_handle, &inter_page_num); - - char *pdata; - RC rc = disk_buffer_pool_->get_data(&inter_page_handle, &pdata); - if (rc != RC::SUCCESS) { - return rc; - } - - IndexNode *inter_node = get_index_node(pdata); - - char *new_parent_key = (char *)mem_pool_item_->alloc(); - if (new_parent_key == nullptr) { - LOG_WARN("Failed to alloc memory for new key when split intern node index %d", file_id_); - return RC::NOMEM; - } - - // add a new node - BPPageHandle new_page_handle; - rc = disk_buffer_pool_->allocate_page(file_id_, &new_page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Faild to alloc new page when split inter node of index, file_id:%d", file_id_); - mem_pool_item_->free(new_parent_key); - return rc; - } - disk_buffer_pool_->get_data(&new_page_handle, &pdata); - - PageNum new_page; - disk_buffer_pool_->get_page_num(&new_page_handle, &new_page); - - IndexNode *new_node = get_index_node(pdata); - new_node->init_empty(file_header_); - new_node->is_leaf = false; - new_node->parent = inter_node->parent; - - split_node(inter_node, new_node, inter_page_num, new_page, new_parent_key); - - disk_buffer_pool_->mark_dirty(&inter_page_handle); - disk_buffer_pool_->mark_dirty(&new_page_handle); - - // print(); - PageNum parent_page = inter_node->parent; - rc = insert_into_parent(parent_page, inter_page_handle, new_parent_key, new_page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to insert key to parents, file_id:%d", file_id_); - merge_nodes(inter_node, new_node, new_page, new_parent_key); - copy_node(inter_node, new_node); - change_children_parent(inter_node->rids, inter_node->key_num + 1, inter_page_num); - - mem_pool_item_->free(new_parent_key); - disk_buffer_pool_->unpin_page(&new_page_handle); - disk_buffer_pool_->dispose_page(file_id_, new_page); - - return rc; - } - mem_pool_item_->free(new_parent_key); - disk_buffer_pool_->unpin_page(&new_page_handle); - return rc; -} - -RC BplusTreeHandler::insert_into_parent( - PageNum parent_page, BPPageHandle &left_page_handle, const char *pkey, BPPageHandle &right_page_handle) -{ - if (parent_page == -1) { - return insert_into_new_root(left_page_handle, pkey, right_page_handle); - } - - BPPageHandle page_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, parent_page, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get parent page file_id:%d, page:%d", file_id_, parent_page); - return rc; - } - - char *pdata; - disk_buffer_pool_->get_data(&page_handle, &pdata); - IndexNode *node = get_index_node(pdata); - - rc = insert_intern_node(page_handle, left_page_handle, right_page_handle, pkey); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to insert intern node of index :%d", file_id_); - return rc; - } - if (node->key_num > file_header_.order) { - rc = split_intern_node(page_handle, pkey); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to split intern node of index %d", file_id_); - int delete_index; - delete_entry_from_node(node, pkey, delete_index); - } - } - - disk_buffer_pool_->unpin_page(&page_handle); - return rc; -} - -void BplusTreeHandler::swith_root(BPPageHandle &new_root_page_handle, IndexNode *root, PageNum root_page) -{ - //@@@ TODO here should add lock - - disk_buffer_pool_->unpin_page(&root_page_handle_); - root_page_handle_ = new_root_page_handle; - root_node_ = root; - file_header_.root_page = root_page; - header_dirty_ = true; -} - -/** - * Create one new root node - * @param left_page_handle - * @param pkey - * @param right_page_handle - * @return - */ -RC BplusTreeHandler::insert_into_new_root( - BPPageHandle &left_page_handle, const char *pkey, BPPageHandle &right_page_handle) -{ - BPPageHandle new_root_page_handle; - RC rc = disk_buffer_pool_->allocate_page(file_id_, &new_root_page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to alloc new page for the new root node of index, file_id:%d", file_id_); - return rc; - } - - PageNum root_page; - disk_buffer_pool_->get_page_num(&new_root_page_handle, &root_page); - - // modify the left node - PageNum left_page; - char *pdata; - disk_buffer_pool_->get_page_num(&left_page_handle, &left_page); - disk_buffer_pool_->get_data(&left_page_handle, &pdata); - IndexNode *left = get_index_node(pdata); - left->parent = root_page; - disk_buffer_pool_->mark_dirty(&left_page_handle); - - // modify the right node - PageNum right_page; - disk_buffer_pool_->get_page_num(&right_page_handle, &right_page); - disk_buffer_pool_->get_data(&right_page_handle, &pdata); - IndexNode *right = get_index_node(pdata); - right->parent = root_page; - disk_buffer_pool_->mark_dirty(&right_page_handle); - - // handle the root node - disk_buffer_pool_->get_data(&new_root_page_handle, &pdata); - IndexNode *root = get_index_node(pdata); - root->init_empty(file_header_); - root->is_leaf = false; - root->key_num = 1; - memcpy(root->keys, pkey, file_header_.key_length); - - RID rid; - rid.page_num = left_page; - rid.slot_num = EMPTY_RID_SLOT_NUM; - memcpy(root->rids, &rid, sizeof(RID)); - rid.page_num = right_page; - rid.slot_num = EMPTY_RID_SLOT_NUM; - memcpy(root->rids + root->key_num, &rid, sizeof(RID)); - - disk_buffer_pool_->mark_dirty(&new_root_page_handle); - swith_root(new_root_page_handle, root, root_page); - - return RC::SUCCESS; -} - -RC BplusTreeHandler::insert_entry(const char *pkey, const RID *rid) -{ - - if (file_id_ < 0) { - LOG_WARN("Index isn't ready!"); - return RC::RECORD_CLOSED; - } - - if (pkey == nullptr || rid == nullptr) { - LOG_WARN("Invalid arguments, key is empty or rid is empty"); - return RC::INVALID_ARGUMENT; - } - - char *key = (char *)mem_pool_item_->alloc(); - if (key == nullptr) { - LOG_WARN("Failed to alloc memory for key. file_id:%d", file_id_); - return RC::NOMEM; - } - memcpy(key, pkey, file_header_.attr_length); - memcpy(key + file_header_.attr_length, rid, sizeof(*rid)); - - PageNum leaf_page; - RC rc = find_leaf(key, &leaf_page); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to find leaf file_id:%d, %s", file_id_, rid->to_string().c_str()); - mem_pool_item_->free(key); - return rc; - } - - BPPageHandle page_handle; - rc = disk_buffer_pool_->get_this_page(file_id_, leaf_page, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to load leaf file_id:%d, page_num:%d", file_id_, leaf_page); - mem_pool_item_->free(key); - return rc; - } - - char *pdata; - disk_buffer_pool_->get_data(&page_handle, &pdata); - - IndexNode *leaf = get_index_node(pdata); - rc = insert_entry_into_node(leaf, key, rid, leaf_page); - if (rc != RC::SUCCESS) { - LOG_TRACE("Failed to insert into leaf of index %d, rid:%s", file_id_, rid->to_string().c_str()); - disk_buffer_pool_->unpin_page(&page_handle); - mem_pool_item_->free(key); - return rc; - } - disk_buffer_pool_->mark_dirty(&page_handle); - - if (leaf->key_num > file_header_.order) { - - rc = split_leaf(page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to insert index of %d, failed to split for rid:%s", file_id_, rid->to_string().c_str()); - int delete_index = 0; - delete_entry_from_node(leaf, key, delete_index); - - disk_buffer_pool_->unpin_page(&page_handle); - mem_pool_item_->free(key); - return rc; - } - } - - disk_buffer_pool_->unpin_page(&page_handle); - mem_pool_item_->free(key); - return RC::SUCCESS; -} - -void BplusTreeHandler::get_entry_from_leaf( - IndexNode *node, const char *pkey, std::list &rids, bool &continue_check) -{ - for (int i = node->key_num - 1; i >= 0; i--) { - int tmp = attribute_comp( - pkey, node->keys + (i * file_header_.key_length), file_header_.attr_type, file_header_.attr_length); - if (tmp < 0) { - if (continue_check == true) { - LOG_WARN("Something is wrong, the sequence is wrong."); - print_tree(); - continue_check = false; - break; - } else { - continue; - } - } else if (tmp == 0) { - rids.push_back(node->rids[i]); - continue_check = true; - } else { - continue_check = false; - break; - } - } -} - -RC BplusTreeHandler::get_entry(const char *pkey, std::list &rids) -{ - if (file_id_ < 0) { - LOG_WARN("Index isn't ready!"); - return RC::RECORD_CLOSED; - } - - char *key = (char *)mem_pool_item_->alloc(); - if (key == nullptr) { - LOG_WARN("Failed to alloc memory for key. size=%d", file_header_.key_length); - return RC::NOMEM; - } - memcpy(key, pkey, file_header_.attr_length); - - RC rc; - - BPPageHandle page_handle; - char *pdata; - IndexNode *node = root_node_; - while (false == node->is_leaf) { - - int i; - for (i = 0; i < node->key_num; i++) { - int tmp = attribute_comp( - pkey, node->keys + i * file_header_.key_length, file_header_.attr_type, file_header_.attr_length); - if (tmp < 0) - break; - } - - if (page_handle.open == true) { - disk_buffer_pool_->unpin_page(&page_handle); - } - - rc = disk_buffer_pool_->get_this_page(file_id_, node->rids[i].page_num, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to load page file_id:%d, page_num:%d", file_id_, node->rids[i].page_num); - return rc; - } - disk_buffer_pool_->get_data(&page_handle, &pdata); - - node = get_index_node(pdata); - } - - bool continue_check = false; - get_entry_from_leaf(node, key, rids, continue_check); - - while (continue_check == true) { - PageNum prev_brother = node->prev_brother; - if (prev_brother == EMPTY_RID_PAGE_NUM) { - break; - } - if (page_handle.open) { - disk_buffer_pool_->unpin_page(&page_handle); - } - - rc = disk_buffer_pool_->get_this_page(file_id_, prev_brother, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Skip load the previous page, file_id:%d", file_id_); - break; - } - disk_buffer_pool_->get_data(&page_handle, &pdata); - node = get_index_node(pdata); - - get_entry_from_leaf(node, key, rids, continue_check); - } - - if (page_handle.open) { - disk_buffer_pool_->unpin_page(&page_handle); - } - mem_pool_item_->free(key); - return RC::SUCCESS; -} - -void BplusTreeHandler::delete_entry_from_node(IndexNode *node, const int delete_index) -{ - char *from = node->keys + (delete_index + 1) * file_header_.key_length; - char *to = from - file_header_.key_length; - int len = (node->key_num - delete_index - 1) * file_header_.key_length; - memmove(to, from, len); - - RID *from_rid = node->rids + (delete_index + 1); - RID *to_rid = from_rid - 1; - len = (node->key_num - delete_index - 1) * sizeof(RID); - if (node->is_leaf == false) { - len += sizeof(RID); - } - memmove(to_rid, from_rid, len); - - node->key_num--; -} - -RC BplusTreeHandler::get_parent_changed_index( - BPPageHandle &parent_handle, IndexNode *&parent, IndexNode *node, PageNum page_num, int &changed_index) -{ - RC rc = disk_buffer_pool_->get_this_page(file_id_, node->parent, &parent_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete index, due to failed to get pareent page, file_id:%d, parent_page:%d", - file_id_, - node->parent); - return rc; - } - char *pdata; - disk_buffer_pool_->get_data(&parent_handle, &pdata); - parent = get_index_node(pdata); - - while (changed_index <= parent->key_num) { - if ((parent->rids[changed_index].page_num) == page_num) - break; - changed_index++; - } - - if (changed_index == parent->key_num + 1) { - LOG_WARN("Something is wrong, failed to find the target page %d in parent, node:%s file_id:%d", - page_num, - node->to_string(file_header_).c_str(), - file_id_); - print_tree(); - return RC::RECORD_CLOSED; - } - - return RC::SUCCESS; -} - -RC BplusTreeHandler::change_leaf_parent_key_insert(IndexNode *node, int changed_indx, PageNum page_num) -{ - if (changed_indx != 0) { - return RC::SUCCESS; - } - - if (node->is_leaf == false) { - return RC::SUCCESS; - } - - if (node->parent == -1) { - return RC::SUCCESS; - } - - if (node->key_num == 0) { - return RC::SUCCESS; - } - - if (node->prev_brother == -1) { - return RC::SUCCESS; - } - - int parent_changed_index = 0; - BPPageHandle parent_handle; - IndexNode *parent = nullptr; - - RC rc = get_parent_changed_index(parent_handle, parent, node, page_num, parent_changed_index); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get parent's delete index, file_id:%d, child's page_num:%d", file_id_, page_num); - if (parent_handle.open) { - disk_buffer_pool_->unpin_page(&parent_handle); - return rc; - } - } - if (parent_changed_index > 0) { - memcpy(parent->keys + (parent_changed_index - 1) * file_header_.key_length, node->keys, file_header_.key_length); - } - - disk_buffer_pool_->unpin_page(&parent_handle); - return RC::SUCCESS; -} -RC BplusTreeHandler::change_leaf_parent_key_delete(IndexNode *leaf, int delete_indx, const char *old_first_key) -{ - if (delete_indx != 0) { - return RC::SUCCESS; - } - - if (leaf->is_leaf == false) { - return RC::SUCCESS; - } - - if (leaf->parent == -1) { - return RC::SUCCESS; - } - - if (leaf->prev_brother == -1) { - return RC::SUCCESS; - } - - if (leaf->key_num == 0) { - return RC::SUCCESS; - } - - IndexNode *node = leaf; - bool found = false; - while (node->parent != -1) { - int index = 0; - BPPageHandle parent_handle; - - RC rc = disk_buffer_pool_->get_this_page(file_id_, node->parent, &parent_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete index, due to failed to get pareent page, file_id:%d, parent_page:%d", - file_id_, - node->parent); - return rc; - } - char *pdata; - disk_buffer_pool_->get_data(&parent_handle, &pdata); - node = get_index_node(pdata); - - int tmp = 0; - while (index < node->key_num) { - tmp = key_compare(file_header_.attr_type, - file_header_.attr_length, - old_first_key, - node->keys + index * file_header_.key_length); - if (tmp == 0) { - found = true; - memcpy(node->keys + index * file_header_.key_length, leaf->keys, file_header_.key_length); - break; - } else if (tmp > 0) { - index++; - continue; - } else { - break; - } - } - - disk_buffer_pool_->unpin_page(&parent_handle); - - if (found == true) { - return RC::SUCCESS; - } - } - - if (found == false) { - LOG_INFO("The old fist key has been changed, leaf:%s", leaf->to_string(file_header_).c_str()); - print_tree(); - } - return RC::SUCCESS; -} -RC BplusTreeHandler::delete_entry_from_node(IndexNode *node, const char *pkey, int &node_delete_index) -{ - - int delete_index, tmp; - for (delete_index = 0; delete_index < node->key_num; delete_index++) { - tmp = key_compare( - file_header_.attr_type, file_header_.attr_length, pkey, node->keys + delete_index * file_header_.key_length); - if (tmp == 0) { - node_delete_index = delete_index; - break; - } - } - if (delete_index >= node->key_num) { - // LOG_WARN("Failed to delete index of %d", file_id_); - return RC::RECORD_INVALID_KEY; - } - - delete_entry_from_node(node, delete_index); - - // change parent's key - change_leaf_parent_key_delete(node, delete_index, pkey); - return RC::SUCCESS; -} - -RC BplusTreeHandler::change_insert_leaf_link(IndexNode *left, IndexNode *right, PageNum right_page) -{ - if (left->is_leaf == false) { - return RC::SUCCESS; - } - - if (right->next_brother != -1) { - PageNum next_right_page = right->next_brother; - BPPageHandle next_right_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, next_right_page, &next_right_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to set link for leaf for node %s, file_id:%d", file_id_, right->to_string(file_header_).c_str()); - return rc; - } - - char *pdata; - disk_buffer_pool_->get_data(&next_right_handle, &pdata); - IndexNode *next_right = get_index_node(pdata); - next_right->prev_brother = right_page; - disk_buffer_pool_->mark_dirty(&next_right_handle); - disk_buffer_pool_->unpin_page(&next_right_handle); - } - - return RC::SUCCESS; -} - -RC BplusTreeHandler::change_delete_leaf_link(IndexNode *left, IndexNode *right, PageNum right_page) -{ - if (left->is_leaf == false) { - return RC::SUCCESS; - } - - right->prev_brother = left->prev_brother; - if (left->prev_brother != -1) { - PageNum prev_left_page = left->prev_brother; - BPPageHandle prev_left_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, prev_left_page, &prev_left_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to set link for leaf for node %s, file_id:%d", file_id_, right->to_string(file_header_).c_str()); - return rc; - } - - char *pdata; - disk_buffer_pool_->get_data(&prev_left_handle, &pdata); - IndexNode *prev_left = get_index_node(pdata); - prev_left->next_brother = right_page; - disk_buffer_pool_->mark_dirty(&prev_left_handle); - disk_buffer_pool_->unpin_page(&prev_left_handle); - } - - return RC::SUCCESS; -} - -/** - * merge left node into right node. - * @param parent_handle - * @param left_handle - * @param right_handle - * @param delete_index - * @return - */ -RC BplusTreeHandler::coalesce_node(BPPageHandle &parent_handle, BPPageHandle &left_handle, BPPageHandle &right_handle, - int delete_index, bool check_change_leaf_key, int node_delete_index, const char *pkey) -{ - PageNum left_page, right_page, parent_page; - IndexNode *left, *right, *parent; - char *pdata, *parent_key; - RC rc; - - disk_buffer_pool_->get_page_num(&left_handle, &left_page); - disk_buffer_pool_->get_data(&left_handle, &pdata); - left = get_index_node(pdata); - - disk_buffer_pool_->get_page_num(&right_handle, &right_page); - disk_buffer_pool_->get_data(&right_handle, &pdata); - right = get_index_node(pdata); - - parent_page = left->parent; - disk_buffer_pool_->get_data(&parent_handle, &pdata); - parent = get_index_node(pdata); - - parent_key = (char *)mem_pool_item_->alloc(); - if (parent_key == nullptr) { - LOG_WARN("Failed to alloc memory for key. size=%d", file_header_.key_length); - return RC::NOMEM; - } - - memcpy(parent_key, parent->keys + delete_index * file_header_.key_length, file_header_.key_length); - merge_nodes(left, right, right_page, parent_key); - disk_buffer_pool_->mark_dirty(&left_handle); - disk_buffer_pool_->mark_dirty(&right_handle); - - change_delete_leaf_link(left, right, right_page); - if (check_change_leaf_key) { - change_leaf_parent_key_delete(right, node_delete_index, pkey); - } - - rc = delete_entry_internal(parent_page, parent_key); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete internal entry of index ", file_id_); - - // restore status - copy_node(left, right); - right->key_num = 0; - split_node(left, right, left_page, right_page, parent_key); - change_delete_leaf_link(left, right, left_page); - left->next_brother = right_page; - right->prev_brother = left_page; - - mem_pool_item_->free(parent_key); - return rc; - } - - mem_pool_item_->free(parent_key); - return RC::SUCCESS; -} - -void BplusTreeHandler::change_children_parent(RID *rids, int rid_len, PageNum new_parent_page) -{ - for (int i = 0; i < rid_len; i++) { - RID rid = rids[i]; - - PageNum page_num = rid.page_num; - - BPPageHandle child_page_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &child_page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to load child page %d of index %d when change child's parent.", file_id_, page_num); - continue; - } - - char *pdata; - disk_buffer_pool_->get_data(&child_page_handle, &pdata); - - IndexNode *child_node = get_index_node(pdata); - child_node->parent = new_parent_page; - - disk_buffer_pool_->mark_dirty(&child_page_handle); - disk_buffer_pool_->unpin_page(&child_page_handle); - } -} - -/** - * merge left node into right node; - * - * This function is contrary to split_node - */ -void BplusTreeHandler::merge_nodes(IndexNode *left_node, IndexNode *right_node, PageNum right_page, char *parent_key) -{ - bool is_leaf = left_node->is_leaf; - int old_left_key_num = left_node->key_num; - int old_right_key_num = right_node->key_num; - int new_left_key_num = 0; - int new_right_key_num = old_left_key_num + old_right_key_num; - if (is_leaf == false) { - new_right_key_num++; - } - left_node->key_num = new_left_key_num; - right_node->key_num = new_right_key_num; - - if (is_leaf) { - int delta = new_right_key_num - old_right_key_num; - char *from = right_node->keys; - char *to = right_node->keys + delta * file_header_.key_length; - int len = old_right_key_num * file_header_.key_length; - memmove(to, from, len); - - RID *from_rid = right_node->rids; - RID *to_rid = right_node->rids + delta; - len = old_right_key_num * sizeof(RID); - memmove(to_rid, from_rid, len); - - from = left_node->keys; - to = right_node->keys; - len = old_left_key_num * file_header_.key_length; - memmove(to, from, len); - - from_rid = left_node->rids; - to_rid = right_node->rids; - len = old_left_key_num * sizeof(RID); - memmove(to_rid, from_rid, len); - - } else { - int delta = new_right_key_num - old_right_key_num; - char *from = right_node->keys; - char *to = right_node->keys + delta * file_header_.key_length; - int len = old_right_key_num * file_header_.key_length; - memmove(to, from, len); - - RID *from_rid = right_node->rids; - RID *to_rid = right_node->rids + delta; - len = (old_right_key_num + 1) * sizeof(RID); - memmove(to_rid, from_rid, len); - - memcpy(right_node->keys + (delta - 1) * file_header_.key_length, parent_key, file_header_.key_length); - - from = left_node->keys; - to = right_node->keys; - len = old_left_key_num * file_header_.key_length; - memmove(to, from, len); - - from_rid = left_node->rids; - to_rid = right_node->rids; - len = (old_left_key_num + 1) * sizeof(RID); - memmove(to_rid, from_rid, len); - - change_children_parent(to_rid, len / sizeof(RID), right_page); - } -} - -/** - * split left node to two node - * This function is contrary to merge_node - */ -void BplusTreeHandler::split_node( - IndexNode *left_node, IndexNode *right_node, PageNum left_page, PageNum right_page, char *new_parent_key) -{ - bool is_leaf = left_node->is_leaf; - int old_left_key_num = left_node->key_num; - int old_right_key_num = right_node->key_num; // right_node->key_num should be zero - int total_key_num = left_node->key_num + right_node->key_num; - - int mid, new_left_key_num, new_right_key_num; - - /** - * if node is leaf, all key will be distributed both in left and right node - * if node is intern node, all keys except the middle key will be distributed both in the left and the right node - */ - if (is_leaf == true) { - new_left_key_num = total_key_num / 2; - mid = new_left_key_num; - new_right_key_num = total_key_num - mid; - } else { - new_left_key_num = (total_key_num - 1) / 2; - mid = new_left_key_num + 1; - new_right_key_num = (total_key_num - mid); - } - - left_node->key_num = new_left_key_num; - right_node->key_num = new_right_key_num; - - if (is_leaf) { - memcpy(new_parent_key, left_node->keys + new_left_key_num * file_header_.key_length, file_header_.key_length); - } else { - memmove(new_parent_key, left_node->keys + new_left_key_num * file_header_.key_length, file_header_.key_length); - } - - char *from = left_node->keys + mid * file_header_.key_length; - char *to = right_node->keys; - int len = new_right_key_num * file_header_.key_length; - memmove(to, from, len); - - RID *from_rid = left_node->rids + mid; - RID *to_rid = right_node->rids; - len = new_right_key_num * sizeof(RID); - memmove(to_rid, from_rid, len); - - // handle the last rid - if (is_leaf == false) { - - RID *changed_rids = to_rid; - int changed_rids_len = len; - PageNum changed_page = right_page; - - if (old_right_key_num == 0) { - memmove(right_node->rids + new_right_key_num, left_node->rids + old_left_key_num, sizeof(RID)); - changed_rids_len += sizeof(RID); - } - - change_children_parent(changed_rids, changed_rids_len / sizeof(RID), changed_page); - } else { - change_insert_leaf_link(left_node, right_node, right_page); - } - - return; -} - -void BplusTreeHandler::copy_node(IndexNode *to, IndexNode *from) -{ - memcpy(to->keys, from->keys, from->key_num * file_header_.key_length); - memcpy(to->rids, from->rids, (from->key_num + 1) * sizeof(RID)); - memcpy(to, from, sizeof(IndexNode)); -} - -void BplusTreeHandler::redistribute_nodes( - IndexNode *left_node, IndexNode *right_node, PageNum left_page, PageNum right_page, char *parent_key) -{ - bool is_leaf = left_node->is_leaf; - int old_left_key_num = left_node->key_num; - int old_right_key_num = right_node->key_num; - int total_key_num = left_node->key_num + right_node->key_num; - if (is_leaf == false) { - total_key_num++; - } - - // mid represent the parent key's position - int mid, new_left_key_num, new_right_key_num; - - /** - * if node is leaf, all key will be distributed both in left and right node - * if node is intern node, all keys except the middle key will be distributed both in the left and the right node - */ - if (is_leaf == true) { - new_left_key_num = total_key_num / 2; - mid = new_left_key_num; - new_right_key_num = total_key_num - mid; - } else { - new_left_key_num = (total_key_num - 1) / 2; - mid = new_left_key_num + 1; - new_right_key_num = (total_key_num - mid); - } - - left_node->key_num = new_left_key_num; - right_node->key_num = new_right_key_num; - - RID *changed_rids = nullptr; - int changed_rids_len = 0; - PageNum changed_page = 0; - - int delta = old_left_key_num - new_left_key_num; - if (delta == 0) { - return; - } else if (delta > 0) { - // move kv from left to right - delta = new_right_key_num - old_right_key_num; - char *from = right_node->keys; - char *to = right_node->keys + delta * file_header_.key_length; - int len = old_right_key_num * file_header_.key_length; - memmove(to, from, len); - - RID *from_rid = right_node->rids; - RID *to_rid = right_node->rids + delta; - len = old_right_key_num * sizeof(RID); - if (left_node->is_leaf == false) { - len += sizeof(RID); - } - memmove(to_rid, from_rid, len); - - if (is_leaf == false) { - memcpy(left_node->keys + old_left_key_num * file_header_.key_length, parent_key, file_header_.key_length); - } - - delta = old_left_key_num - new_left_key_num; - - from = left_node->keys + mid * file_header_.key_length; - to = right_node->keys; - len = delta * file_header_.key_length; - memmove(to, from, len); - - from_rid = left_node->rids + mid; - to_rid = right_node->rids; - len = delta * sizeof(RID); - memmove(to_rid, from_rid, len); - - changed_rids = to_rid; - changed_rids_len = len; - changed_page = right_page; - - if (is_leaf) { - memcpy(parent_key, right_node->keys, file_header_.key_length); - } else { - memmove(parent_key, left_node->keys + new_left_key_num * file_header_.key_length, file_header_.key_length); - } - - } else { - // move kv from right to left - if (is_leaf == false) { - memcpy(left_node->keys + old_left_key_num * file_header_.key_length, parent_key, file_header_.key_length); - } - int start_pos = old_left_key_num; - int len = (new_left_key_num - old_left_key_num); - if (is_leaf == false) { - start_pos++; - len--; - } - - char *from = right_node->keys; - char *to = left_node->keys + start_pos * file_header_.key_length; - memmove(to, from, len * file_header_.key_length); - - RID *from_rid = right_node->rids; - RID *to_rid = left_node->rids + start_pos; - memmove(to_rid, from_rid, len * sizeof(RID)); - - changed_rids = to_rid; - changed_rids_len = (new_left_key_num - old_left_key_num) * sizeof(RID); - changed_page = left_page; - - if (is_leaf == false) { - memcpy(parent_key, right_node->keys + len * file_header_.key_length, file_header_.key_length); - memcpy(left_node->rids + new_left_key_num, right_node->rids + len, sizeof(RID)); - } else { - memcpy(parent_key, right_node->keys + len * file_header_.key_length, file_header_.key_length); - } - - delta = old_right_key_num - new_right_key_num; - from = right_node->keys + delta * file_header_.key_length; - to = right_node->keys; - len = new_right_key_num * file_header_.key_length; - memmove(to, from, len); - - from_rid = right_node->rids + delta; - to_rid = right_node->rids; - len = new_right_key_num * sizeof(RID); - if (left_node->is_leaf == false) { - len += sizeof(RID); - } - memmove(to_rid, from_rid, len); - } - - // handle the last rid - if (left_node->is_leaf == false) { - change_children_parent(changed_rids, changed_rids_len / sizeof(RID), changed_page); - } - - return; -} - -RC BplusTreeHandler::redistribute_nodes( - BPPageHandle &parent_handle, BPPageHandle &left_handle, BPPageHandle &right_handle) -{ - - PageNum left_page, right_page; - IndexNode *left, *right, *parent; - char *pdata; - - disk_buffer_pool_->get_page_num(&left_handle, &left_page); - disk_buffer_pool_->get_data(&left_handle, &pdata); - left = get_index_node(pdata); - - disk_buffer_pool_->get_page_num(&right_handle, &right_page); - disk_buffer_pool_->get_data(&right_handle, &pdata); - right = get_index_node(pdata); - - disk_buffer_pool_->get_data(&parent_handle, &pdata); - parent = get_index_node(pdata); - - int parent_change_pos = -1; - for (int k = 0; k < parent->key_num; k++) { - - if (parent->rids[k].page_num == left_page) { - parent_change_pos = k; - break; - } - } - - if (parent_change_pos == -1) { - LOG_WARN("Failed to find the parent pos during redistribute node"); - return RC::RECORD_INVALID_KEY; - } - - char *parent_key = parent->keys + parent_change_pos * file_header_.key_length; - redistribute_nodes(left, right, left_page, right_page, parent_key); - - disk_buffer_pool_->mark_dirty(&left_handle); - disk_buffer_pool_->mark_dirty(&right_handle); - disk_buffer_pool_->mark_dirty(&parent_handle); - - return RC::SUCCESS; -} - -RC BplusTreeHandler::clean_root_after_delete(IndexNode *old_root) -{ - if (old_root->key_num > 0) { - return RC::SUCCESS; - } - - BPPageHandle root_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, old_root->rids[0].page_num, &root_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get new root page %d of index %d", old_root->rids[0].page_num, file_id_); - return rc; - } - - char *pdata; - disk_buffer_pool_->get_data(&root_handle, &pdata); - IndexNode *root = get_index_node(pdata); - root->parent = -1; - disk_buffer_pool_->mark_dirty(&root_handle); - swith_root(root_handle, root, old_root->rids[0].page_num); - - return RC::SUCCESS; -} - -RC BplusTreeHandler::can_merge_with_other(BPPageHandle *page_handle, PageNum page_num, bool *can_merge) -{ - RC rc = disk_buffer_pool_->get_this_page(file_id_, page_num, page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete index, due to failed to get page of current delete page, file_id:%d, page:%d", - file_id_, - page_num); - return rc; - } - char *pdata; - disk_buffer_pool_->get_data(page_handle, &pdata); - IndexNode *node = get_index_node(pdata); - *can_merge = node->key_num > (file_header_.order / 2); - - return RC::SUCCESS; -} - -RC BplusTreeHandler::delete_entry_internal(PageNum page_num, const char *pkey) -{ - BPPageHandle page_handle; - RC rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN( - "Failed to delete entry in index node, due to failed to get page!, file_id:%d, page:%d", file_id_, page_num); - return rc; - } - - char *pdata; - rc = disk_buffer_pool_->get_data(&page_handle, &pdata); - if (rc != RC::SUCCESS) { - return rc; - } - IndexNode *node = get_index_node(pdata); - - int node_delete_index = -1; - rc = delete_entry_from_node(node, pkey, node_delete_index); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete index %d", file_id_); - return rc; - } - - disk_buffer_pool_->mark_dirty(&page_handle); - - int min_key = file_header_.order / 2; - if (node->key_num >= min_key) { - disk_buffer_pool_->unpin_page(&page_handle); - return RC::SUCCESS; - } - - if (node->parent == -1) { - if (node->key_num == 0 && node->is_leaf == false) { - rc = clean_root_after_delete(node); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to clean root after delete all entry in the root, file_id:%d", file_id_); - insert_entry_into_node(node, pkey, (RID *)(pkey + file_header_.attr_length), page_num); - disk_buffer_pool_->unpin_page(&page_handle); - return rc; - } - disk_buffer_pool_->unpin_page(&page_handle); - disk_buffer_pool_->dispose_page(file_id_, page_num); - - return RC::SUCCESS; - } - - disk_buffer_pool_->unpin_page(&page_handle); - return RC::SUCCESS; - } - - int delete_index = 0; - BPPageHandle parent_handle; - IndexNode *parent = nullptr; - - rc = get_parent_changed_index(parent_handle, parent, node, page_num, delete_index); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get parent delete index"); - insert_entry_into_node(node, pkey, (RID *)(pkey + file_header_.attr_length), page_num); - disk_buffer_pool_->unpin_page(&page_handle); - return rc; - } - - bool can_merge_with_right = false; - bool force_collapse_with_right = false; - bool can_merge_with_left = false; - // bool force_collapse_with_left = false; - PageNum left_page = 0, right_page = 0; - BPPageHandle right_handle, left_handle; - if (delete_index == 0) { - right_page = parent->rids[delete_index + 1].page_num; - rc = can_merge_with_other(&right_handle, right_page, &can_merge_with_right); - if (rc != RC::SUCCESS) { - goto cleanup; - } - - if (can_merge_with_right == false) { - force_collapse_with_right = true; - } - } else { - left_page = parent->rids[delete_index - 1].page_num; - rc = can_merge_with_other(&left_handle, left_page, &can_merge_with_left); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed delete index, due to failed to get page, file_id:%d, page:%d", file_id_, left_page); - goto cleanup; - } - if (can_merge_with_left == false) { - // begin to merge with right - // force_collapse_with_left = true; - if (delete_index < parent->key_num) { - - right_page = parent->rids[delete_index + 1].page_num; - rc = can_merge_with_other(&right_handle, right_page, &can_merge_with_right); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete index, due to failed to get right page of current delete page, file_id:%d, " - "right_page:$d", - file_id_, - right_page); - - goto cleanup; - } - - } // delete_index < parent->key_num - 1 - } // if can_merge_with_left = false - } // delete_index = 0 - - if (can_merge_with_left) { - rc = redistribute_nodes(parent_handle, left_handle, page_handle); - } else if (can_merge_with_right) { - rc = redistribute_nodes(parent_handle, page_handle, right_handle); - change_leaf_parent_key_delete(node, node_delete_index, pkey); - } else if (force_collapse_with_right) { - rc = coalesce_node(parent_handle, page_handle, right_handle, delete_index, true, node_delete_index, pkey); - if (rc == RC::SUCCESS) { - disk_buffer_pool_->unpin_page(&page_handle); - disk_buffer_pool_->dispose_page(file_id_, page_num); - page_handle.open = false; - } - } else { - rc = coalesce_node(parent_handle, left_handle, page_handle, delete_index - 1, false, node_delete_index, pkey); - if (rc == RC::SUCCESS) { - disk_buffer_pool_->unpin_page(&left_handle); - disk_buffer_pool_->dispose_page(file_id_, left_page); - left_handle.open = false; - } - } - -cleanup: - if (rc != RC::SUCCESS) { - insert_entry_into_node(node, pkey, (RID *)(pkey + file_header_.attr_length), page_num); - } - if (right_handle.open) { - disk_buffer_pool_->unpin_page(&right_handle); - } - if (left_handle.open) { - disk_buffer_pool_->unpin_page(&left_handle); - } - disk_buffer_pool_->unpin_page(&parent_handle); - if (page_handle.open) { - disk_buffer_pool_->unpin_page(&page_handle); - } - - return rc; -} - -RC BplusTreeHandler::delete_entry(const char *data, const RID *rid) -{ - if (file_id_ < 0) { - LOG_WARN("Failed to delete index entry, due to index is't ready"); - return RC::RECORD_CLOSED; - } - - char *pkey = (char *)mem_pool_item_->alloc(); - if (nullptr == pkey) { - LOG_WARN("Failed to alloc memory for key. size=%d", file_header_.key_length); - return RC::NOMEM; - } - memcpy(pkey, data, file_header_.attr_length); - memcpy(pkey + file_header_.attr_length, rid, sizeof(*rid)); - - PageNum leaf_page; - RC rc = find_leaf(pkey, &leaf_page); - if (rc != RC::SUCCESS) { - mem_pool_item_->free(pkey); - return rc; - } - rc = delete_entry_internal(leaf_page, pkey); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to delete index %d", file_id_); - mem_pool_item_->free(pkey); - return rc; - } - mem_pool_item_->free(pkey); - return RC::SUCCESS; -} - -RC BplusTreeHandler::find_first_index_satisfied(CompOp compop, const char *key, PageNum *page_num, int *rididx) -{ - BPPageHandle page_handle; - IndexNode *node; - PageNum leaf_page, next; - char *pdata, *pkey; - RC rc; - int i, tmp; - RID rid; - if (compop == LESS_THAN || compop == LESS_EQUAL || compop == NOT_EQUAL) { - rc = get_first_leaf_page(page_num); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get first leaf page, index:%d", file_id_); - return rc; - } - *rididx = 0; - return RC::SUCCESS; - } - rid.page_num = -1; - rid.slot_num = -1; - pkey = (char *)mem_pool_item_->alloc(); - if (pkey == nullptr) { - LOG_WARN("Failed to alloc memory for key. size=%d", file_header_.key_length); - return RC::NOMEM; - } - memcpy(pkey, key, file_header_.attr_length); - memcpy(pkey + file_header_.attr_length, &rid, sizeof(RID)); - - rc = find_leaf(pkey, &leaf_page); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to find leaf page of index %d", file_id_); - mem_pool_item_->free(pkey); - return rc; - } - mem_pool_item_->free(pkey); - - next = leaf_page; - - while (next > 0) { - rc = disk_buffer_pool_->get_this_page(file_id_, next, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to scan index due to failed to load page %d of index %d", next, file_id_); - return rc; - } - disk_buffer_pool_->get_data(&page_handle, &pdata); - - node = get_index_node(pdata); - for (i = 0; i < node->key_num; i++) { - tmp = attribute_comp( - node->keys + i * file_header_.key_length, key, file_header_.attr_type, file_header_.attr_length); - if (compop == EQUAL_TO || compop == GREAT_EQUAL) { - if (tmp >= 0) { - disk_buffer_pool_->get_page_num(&page_handle, page_num); - - *rididx = i; - disk_buffer_pool_->unpin_page(&page_handle); - return RC::SUCCESS; - } - } - if (compop == GREAT_THAN) { - if (tmp > 0) { - disk_buffer_pool_->get_page_num(&page_handle, page_num); - *rididx = i; - disk_buffer_pool_->unpin_page(&page_handle); - return RC::SUCCESS; - } - } - } - next = node->next_brother; - } - disk_buffer_pool_->unpin_page(&page_handle); - - return RC::RECORD_EOF; -} - -RC BplusTreeHandler::get_first_leaf_page(PageNum *leaf_page) -{ - RC rc; - BPPageHandle page_handle; - PageNum page_num; - IndexNode *node; - char *pdata; - - node = root_node_; - - while (node->is_leaf == false) { - page_num = node->rids[0].page_num; - if (page_handle.open) { - disk_buffer_pool_->unpin_page(&page_handle); - } - - rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &page_handle); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to load page %d of index %d", page_num, file_id_); - return rc; - } - disk_buffer_pool_->get_data(&page_handle, &pdata); - - node = get_index_node(pdata); - } - if (page_handle.open) { - disk_buffer_pool_->get_page_num(&page_handle, leaf_page); - - disk_buffer_pool_->unpin_page(&page_handle); - } else { - disk_buffer_pool_->get_page_num(&root_page_handle_, leaf_page); - } - - return RC::SUCCESS; -} - -BplusTreeScanner::BplusTreeScanner(BplusTreeHandler &index_handler) : index_handler_(index_handler) -{} - -RC BplusTreeScanner::open(CompOp comp_op, const char *value) -{ - RC rc; - if (opened_) { - return RC::RECORD_OPENNED; - } - - comp_op_ = comp_op; - - char *value_copy = (char *)malloc(index_handler_.file_header_.attr_length); - if (value_copy == nullptr) { - LOG_WARN("Failed to alloc memory for value. size=%d", index_handler_.file_header_.attr_length); - return RC::NOMEM; - } - memcpy(value_copy, value, index_handler_.file_header_.attr_length); - value_ = value_copy; // mem_pool_item_->free value_ - rc = index_handler_.find_first_index_satisfied(comp_op, value, &next_page_num_, &index_in_node_); - if (rc != RC::SUCCESS) { - if (rc == RC::RECORD_EOF) { - next_page_num_ = -1; - index_in_node_ = -1; - } else - return rc; - } - num_fixed_pages_ = 1; - next_index_of_page_handle_ = 0; - pinned_page_count_ = 0; - opened_ = true; - return RC::SUCCESS; -} - -RC BplusTreeScanner::close() -{ - if (!opened_) { - return RC::RECORD_SCANCLOSED; - } - free((void *)value_); - value_ = nullptr; - opened_ = false; - return RC::SUCCESS; -} - -RC BplusTreeScanner::next_entry(RID *rid) -{ - RC rc; - if (!opened_) { - return RC::RECORD_CLOSED; - } - rc = get_next_idx_in_memory(rid); //和RM中一样,有可能有错误,一次只查当前页和当前页的下一页,有待确定 - if (rc == RC::RECORD_NO_MORE_IDX_IN_MEM) { - rc = find_idx_pages(); - if (rc != RC::SUCCESS) { - return rc; - } - return get_next_idx_in_memory(rid); - } else { - if (rc != RC::SUCCESS) { - return rc; - } - } - return RC::SUCCESS; -} - -RC BplusTreeScanner::find_idx_pages() -{ - RC rc; - if (!opened_) { - return RC::RECORD_CLOSED; - } - if (pinned_page_count_ > 0) { - for (int i = 0; i < pinned_page_count_; i++) { - rc = index_handler_.disk_buffer_pool_->unpin_page(page_handles_ + i); - if (rc != RC::SUCCESS) { - return rc; - } - } - } - next_index_of_page_handle_ = 0; - pinned_page_count_ = 0; - - for (int i = 0; i < num_fixed_pages_; i++) { - if (next_page_num_ <= 0) - break; - rc = index_handler_.disk_buffer_pool_->get_this_page(index_handler_.file_id_, next_page_num_, page_handles_ + i); - if (rc != RC::SUCCESS) { - return rc; - } - char *pdata; - rc = index_handler_.disk_buffer_pool_->get_data(page_handles_ + i, &pdata); - if (rc != RC::SUCCESS) { - return rc; - } - - IndexNode *node = index_handler_.get_index_node(pdata); - pinned_page_count_++; - next_page_num_ = node->next_brother; - } - if (pinned_page_count_ > 0) - return RC::SUCCESS; - return RC::RECORD_EOF; -} - -RC BplusTreeScanner::get_next_idx_in_memory(RID *rid) -{ - char *pdata; - IndexNode *node; - RC rc; - if (next_index_of_page_handle_ >= pinned_page_count_) { - return RC::RECORD_NO_MORE_IDX_IN_MEM; - } - - if (next_page_num_ == -1 && index_in_node_ == -1) { - return RC::RECORD_EOF; - } - - for (; next_index_of_page_handle_ < pinned_page_count_; next_index_of_page_handle_++) { - rc = index_handler_.disk_buffer_pool_->get_data(page_handles_ + next_index_of_page_handle_, &pdata); - if (rc != RC::SUCCESS) { - LOG_WARN("Failed to get data from disk buffer pool. rc=%s", strrc); - return rc; - } - - node = index_handler_.get_index_node(pdata); - for (; index_in_node_ < node->key_num; index_in_node_++) { - if (satisfy_condition(node->keys + index_in_node_ * index_handler_.file_header_.key_length)) { - memcpy(rid, node->rids + index_in_node_, sizeof(RID)); - index_in_node_++; - return RC::SUCCESS; - } - } - - index_in_node_ = 0; - } - return RC::RECORD_NO_MORE_IDX_IN_MEM; -} -bool BplusTreeScanner::satisfy_condition(const char *pkey) -{ - int i1 = 0, i2 = 0; - float f1 = 0, f2 = 0; - const char *s1 = nullptr, *s2 = nullptr; - - if (comp_op_ == NO_OP) { - return true; - } - - AttrType attr_type = index_handler_.file_header_.attr_type; - switch (attr_type) { - case INTS: - i1 = *(int *)pkey; - i2 = *(int *)value_; - break; - case FLOATS: - f1 = *(float *)pkey; - f2 = *(float *)value_; - break; - case CHARS: - s1 = pkey; - s2 = value_; - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - - bool flag = false; - - int attr_length = index_handler_.file_header_.attr_length; - switch (comp_op_) { - case EQUAL_TO: - switch (attr_type) { - case INTS: - flag = (i1 == i2); - break; - case FLOATS: - flag = 0 == float_compare(f1, f2); - break; - case CHARS: - flag = (strncmp(s1, s2, attr_length) == 0); - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - break; - case LESS_THAN: - switch (attr_type) { - case INTS: - flag = (i1 < i2); - break; - case FLOATS: - flag = (f1 < f2); - break; - case CHARS: - flag = (strncmp(s1, s2, attr_length) < 0); - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - break; - case GREAT_THAN: - switch (attr_type) { - case INTS: - flag = (i1 > i2); - break; - case FLOATS: - flag = (f1 > f2); - break; - case CHARS: - flag = (strncmp(s1, s2, attr_length) > 0); - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - break; - case LESS_EQUAL: - switch (attr_type) { - case INTS: - flag = (i1 <= i2); - break; - case FLOATS: - flag = (f1 <= f2); - break; - case CHARS: - flag = (strncmp(s1, s2, attr_length) <= 0); - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - break; - case GREAT_EQUAL: - switch (attr_type) { - case INTS: - flag = (i1 >= i2); - break; - case FLOATS: - flag = (f1 >= f2); - break; - case CHARS: - flag = (strncmp(s1, s2, attr_length) >= 0); - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - break; - case NOT_EQUAL: - switch (attr_type) { - case INTS: - flag = (i1 != i2); - break; - case FLOATS: - flag = 0 != float_compare(f1, f2); - break; - case CHARS: - flag = (strncmp(s1, s2, attr_length) != 0); - break; - default: - LOG_PANIC("Unknown attr type: %d", attr_type); - } - break; - default: - LOG_PANIC("Unknown comp op: %d", comp_op_); - } - return flag; -} diff --git a/src/observer/storage/common/bplus_tree.h b/src/observer/storage/common/bplus_tree.h deleted file mode 100644 index 429e5fbb23e46bfbfc5936c1e6933c17ac96cb9b..0000000000000000000000000000000000000000 --- a/src/observer/storage/common/bplus_tree.h +++ /dev/null @@ -1,308 +0,0 @@ -/* Copyright (c) 2021 Xie Meiyi(xiemeiyi@hust.edu.cn) and OceanBase and/or its affiliates. All rights reserved. -miniob is licensed under Mulan PSL v2. -You can use this software according to the terms and conditions of the Mulan PSL v2. -You may obtain a copy of Mulan PSL v2 at: - http://license.coscl.org.cn/MulanPSL2 -THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, -EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, -MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. -See the Mulan PSL v2 for more details. */ - -// -// Created by Longda on 2021/4/13. -// -#ifndef __OBSERVER_STORAGE_COMMON_INDEX_MANAGER_H_ -#define __OBSERVER_STORAGE_COMMON_INDEX_MANAGER_H_ - -#include - -#include "record_manager.h" -#include "storage/default/disk_buffer_pool.h" -#include "sql/parser/parse_defs.h" - -#define EMPTY_RID_PAGE_NUM -1 -#define EMPTY_RID_SLOT_NUM -1 - -struct IndexFileHeader { - IndexFileHeader() - { - memset(this, 0, sizeof(IndexFileHeader)); - } - int attr_length; - int key_length; - AttrType attr_type; - PageNum root_page; - int order; - - const std::string to_string() - { - std::stringstream ss; - - ss << "attr_length:" << attr_length << "," - << "key_length:" << key_length << "," - << "attr_type:" << attr_type << "," - << "root_page:" << root_page << "," - << "order:" << order << ";"; - - return ss.str(); - } -}; - -#define RECORD_RESERVER_PAIR_NUM 2 -struct IndexNode { - bool is_leaf; - int key_num; - PageNum parent; - PageNum prev_brother; // valid when is_leaf = true - PageNum next_brother; // valid when is_leaf = true - /** - * leaf can store order keys and rids at most - * internal node just store order -1 keys and order rids, the last rid is last rght child. - */ - char *keys; - /** - * In the node which isn't leaf, the rids point to child's page, - * rids[i] is keys[i]'s left child, rids[key_num] is the last right child. - * In the node which is leaf, the rids point to record's rid. - */ - RID *rids; - - void init_empty(IndexFileHeader &file_header) - { - is_leaf = true; - key_num = 0; - parent = -1; - prev_brother = -1; - next_brother = -1; - keys = (char *)(this + 1); - rids = (RID *)(keys + (file_header.order + RECORD_RESERVER_PAIR_NUM) * file_header.key_length); - } - - std::string to_string(IndexFileHeader &file_header) - { - std::stringstream ss; - - ss << "is_leaf:" << is_leaf << "," - << "key_num:" << key_num << "," - << "parent:" << parent << "," - << "prev_brother:" << prev_brother << "," - << "next_brother:" << next_brother << ","; - - if (file_header.attr_type == INTS) { // CHARS, INTS, FLOATS - - ss << "start_key:" << *(int *)(keys) << "," - << "end_key:" << *(int *)(keys + (key_num - 1) * file_header.key_length) << ";"; - } else if (file_header.attr_type == FLOATS) { - ss << "start_key:" << *(float *)(keys) << "," - << "end_key:" << *(float *)(keys + (key_num - 1) * file_header.key_length) << ";"; - } else if (file_header.attr_type == CHARS) { - char *temp = (char *)malloc(file_header.attr_length + 1); - memset(temp, 0, file_header.attr_length + 1); - memcpy(temp, keys, file_header.attr_length); - ss << "start_key:" << temp << ","; - memcpy(temp, keys + (key_num - 1) * file_header.key_length, file_header.attr_length); - ss << "end_key:" << temp << ";"; - - free(temp); - } else { - ss << "Unkown key range." << std::endl; - } - - return ss.str(); - } -}; - -class BplusTreeHandler { -public: - /** - * 此函数创建一个名为fileName的索引。 - * attrType描述被索引属性的类型,attrLength描述被索引属性的长度 - */ - RC create(const char *file_name, AttrType attr_type, int attr_length); - - /** - * 打开名为fileName的索引文件。 - * 如果方法调用成功,则indexHandle为指向被打开的索引句柄的指针。 - * 索引句柄用于在索引中插入或删除索引项,也可用于索引的扫描 - */ - RC open(const char *file_name); - - /** - * 关闭句柄indexHandle对应的索引文件 - */ - RC close(); - - /** - * 此函数向IndexHandle对应的索引中插入一个索引项。 - * 参数pData指向要插入的属性值,参数rid标识该索引项对应的元组, - * 即向索引中插入一个值为(*pData,rid)的键值对 - */ - RC insert_entry(const char *pkey, const RID *rid); - - /** - * 从IndexHandle句柄对应的索引中删除一个值为(*pData,rid)的索引项 - * @return RECORD_INVALID_KEY 指定值不存在 - */ - RC delete_entry(const char *pkey, const RID *rid); - - /** - * 获取指定值的record - * @param rid 返回值,记录记录所在的页面号和slot - */ - RC get_entry(const char *pkey, std::list &rids); - - RC sync(); - - const int get_file_id() - { - return file_id_; - } - - /** - * Check whether current B+ tree is invalid or not. - * return true means current tree is valid, return false means current tree is invalid. - * @return - */ - bool validate_tree(); - -public: - RC print_tree(); - RC print_node(IndexNode *node, PageNum page_num); - RC print_leafs(); - - bool validate_node(IndexNode *node); - bool validate_leaf_link(); - -protected: - RC find_leaf(const char *pkey, PageNum *leaf_page); - - RC insert_into_parent( - PageNum parent_page, BPPageHandle &left_page_handle, const char *pkey, BPPageHandle &right_page_handle); - RC insert_intern_node(BPPageHandle &parent_page_handle, BPPageHandle &left_page_handle, - BPPageHandle &right_page_handle, const char *pkey); - RC split_leaf(BPPageHandle &leaf_page_handle); - RC split_intern_node(BPPageHandle &parent_page_handle, const char *pkey); - - RC delete_entry_internal(PageNum page_num, const char *pkey); - RC coalesce_node(BPPageHandle &parent_handle, BPPageHandle &left_handle, BPPageHandle &right_handle, int delete_index, - bool check_change_leaf_key, int node_delete_index, const char *pkey); - - RC insert_into_new_root(BPPageHandle &left_page_handle, const char *pkey, BPPageHandle &right_page_handle); - RC clean_root_after_delete(IndexNode *old_root); - - RC insert_entry_into_node(IndexNode *node, const char *pkey, const RID *rid, PageNum left_page); - RC delete_entry_from_node(IndexNode *node, const char *pkey, int &node_delete_index); - void delete_entry_from_node(IndexNode *node, const int delete_index); - - RC redistribute_nodes(BPPageHandle &parent_handle, BPPageHandle &left_handle, BPPageHandle &right_handle); - void redistribute_nodes( - IndexNode *left_node, IndexNode *right_node, PageNum left_page, PageNum right_page, char *new_key); - void merge_nodes(IndexNode *left_node, IndexNode *right_node, PageNum left_page, char *parent_key); - RC can_merge_with_other(BPPageHandle *page_handle, PageNum page_num, bool *can_merge); - void split_node( - IndexNode *left_node, IndexNode *right_node, PageNum left_page, PageNum right_page, char *new_parent_key); - void copy_node(IndexNode *to, IndexNode *from); - - void get_entry_from_leaf(IndexNode *node, const char *pkey, std::list &rids, bool &continue_check); - RC find_first_index_satisfied(CompOp comp_op, const char *pkey, PageNum *page_num, int *rididx); - RC get_first_leaf_page(PageNum *leaf_page); - - IndexNode *get_index_node(char *page_data) const; - void swith_root(BPPageHandle &new_root_page_handle, IndexNode *root, PageNum root_page); - - void change_children_parent(RID *rid, int rid_len, PageNum new_parent_page); - RC get_parent_changed_index( - BPPageHandle &parent_handle, IndexNode *&parent, IndexNode *node, PageNum page_num, int &changed_index); - RC change_leaf_parent_key_insert(IndexNode *node, int changed_indx, PageNum page_num); - RC change_leaf_parent_key_delete(IndexNode *leaf, int delete_indx, const char *old_first_key); - RC change_insert_leaf_link(IndexNode *left, IndexNode *right, PageNum right_page); - RC change_delete_leaf_link(IndexNode *left, IndexNode *right, PageNum right_page); - -protected: - DiskBufferPool *disk_buffer_pool_ = nullptr; - int file_id_ = -1; - bool header_dirty_ = false; - IndexFileHeader file_header_; - - BPPageHandle root_page_handle_; - IndexNode *root_node_ = nullptr; - - common::MemPoolItem *mem_pool_item_ = nullptr; - -private: - friend class BplusTreeScanner; - friend class BplusTreeTester; -}; - -class BplusTreeScanner { -public: - BplusTreeScanner(BplusTreeHandler &index_handler); - - /** - * 用于在indexHandle对应的索引上初始化一个基于条件的扫描。 - * compOp和*value指定比较符和比较值,indexScan为初始化后的索引扫描结构指针 - * 没有带两个边界的范围扫描 - */ - RC open(CompOp comp_op, const char *value); - - /** - * 用于继续索引扫描,获得下一个满足条件的索引项, - * 并返回该索引项对应的记录的ID - */ - RC next_entry(RID *rid); - - /** - * 关闭一个索引扫描,释放相应的资源 - */ - RC close(); - - /** - * 获取由fileName指定的B+树索引内容,返回指向B+树的指针。 - * 此函数提供给测试程序调用,用于检查B+树索引内容的正确性 - */ - // RC getIndexTree(char *fileName, Tree *index); - -private: - RC get_next_idx_in_memory(RID *rid); - RC find_idx_pages(); - bool satisfy_condition(const char *key); - -private: - BplusTreeHandler &index_handler_; - bool opened_ = false; - CompOp comp_op_ = NO_OP; // 用于比较的操作符 - const char *value_ = nullptr; // 与属性行比较的值 - int num_fixed_pages_ = -1; // 固定在缓冲区中的页,与指定的页面固定策略有关 - int pinned_page_count_ = 0; // 实际固定在缓冲区的页面数 - BPPageHandle page_handles_[BP_BUFFER_SIZE]; // 固定在缓冲区页面所对应的页面操作列表 - int next_index_of_page_handle_ = -1; // 当前被扫描页面的操作索引 - int index_in_node_ = -1; // 当前B+ Tree页面上的key index - PageNum next_page_num_ = -1; // 下一个将要被读入的页面号 -}; - -class BplusTreeTester { -public: - BplusTreeTester(BplusTreeHandler &index_handler) : index_handler_(index_handler) - {} - ~BplusTreeTester() = default; - - void set_order(int order) - { - if (order >= 2 && order % 2 == 0) { - index_handler_.file_header_.order = order; - LOG_INFO("Successfully set index %d's order as %d", index_handler_.file_id_, order); - } else { - LOG_INFO("Invalid input order argument %d", order); - } - } - - const int get_oder() - { - return index_handler_.file_header_.order; - } - -protected: - BplusTreeHandler &index_handler_; -}; - -#endif //__OBSERVER_STORAGE_COMMON_INDEX_MANAGER_H_ \ No newline at end of file diff --git a/src/observer/storage/common/condition_filter.cpp b/src/observer/storage/common/condition_filter.cpp index baf8fa989912c5b3230f14b5c13ba985d55c5448..1f19516e8e86720c535ba40ac0aaf7311ee63a0a 100644 --- a/src/observer/storage/common/condition_filter.cpp +++ b/src/observer/storage/common/condition_filter.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/7. +// Created by Meiyi & Wangyunlai on 2021/5/7. // #include diff --git a/src/observer/storage/common/condition_filter.h b/src/observer/storage/common/condition_filter.h index b72bee3775886d783448a2c8038603a9d19ad173..6f644247216fc0704a55e3552482ea7f8611115f 100644 --- a/src/observer/storage/common/condition_filter.h +++ b/src/observer/storage/common/condition_filter.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/7. +// Created by Meiyi & Wangyunlai on 2021/5/7. // #ifndef __OBSERVER_STORAGE_COMMON_CONDITION_FILTER_H_ @@ -22,10 +22,10 @@ struct Record; class Table; struct ConDesc { - bool is_attr; // 是否属性,false 表示是值 - int attr_length; // 如果是属性,表示属性值长度 - int attr_offset; // 如果是属性,表示在记录中的偏移量 - void * value; // 如果是值类型,这里记录值的数据 + bool is_attr; // 是否属性,false 表示是值 + int attr_length; // 如果是属性,表示属性值长度 + int attr_offset; // 如果是属性,表示在记录中的偏移量 + void *value; // 如果是值类型,这里记录值的数据 }; class ConditionFilter { @@ -51,23 +51,26 @@ public: virtual bool filter(const Record &rec) const; public: - const ConDesc &left() const { + const ConDesc &left() const + { return left_; } - const ConDesc &right() const { + const ConDesc &right() const + { return right_; } - CompOp comp_op() const { + CompOp comp_op() const + { return comp_op_; } private: - ConDesc left_; - ConDesc right_; + ConDesc left_; + ConDesc right_; AttrType attr_type_ = UNDEFINED; - CompOp comp_op_ = NO_OP; + CompOp comp_op_ = NO_OP; }; class CompositeConditionFilter : public ConditionFilter { @@ -80,19 +83,22 @@ public: virtual bool filter(const Record &rec) const; public: - int filter_num() const { + int filter_num() const + { return filter_num_; } - const ConditionFilter &filter(int index) const { + const ConditionFilter &filter(int index) const + { return *filters_[index]; } private: RC init(const ConditionFilter *filters[], int filter_num, bool own_memory); + private: - const ConditionFilter ** filters_ = nullptr; - int filter_num_ = 0; - bool memory_owner_ = false; // filters_的内存是否由自己来控制 + const ConditionFilter **filters_ = nullptr; + int filter_num_ = 0; + bool memory_owner_ = false; // filters_的内存是否由自己来控制 }; -#endif // __OBSERVER_STORAGE_COMMON_CONDITION_FILTER_H_ \ No newline at end of file +#endif // __OBSERVER_STORAGE_COMMON_CONDITION_FILTER_H_ \ No newline at end of file diff --git a/src/observer/storage/common/db.cpp b/src/observer/storage/common/db.cpp index b8f06c2b1805657428a2732fe332f6b2e79b89d5..4a52d6b1ea9cebd15b3a1ff498e2b507fd9c30d6 100644 --- a/src/observer/storage/common/db.cpp +++ b/src/observer/storage/common/db.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Longda & Wangyunlai on 2021/5/12. // #include "storage/common/db.h" diff --git a/src/observer/storage/common/db.h b/src/observer/storage/common/db.h index 588b3df4cf69565ad7f0e001d6444c5f470d3efa..4095db3df790d974f9ee9d5710eb5ab7e1ab7e2b 100644 --- a/src/observer/storage/common/db.h +++ b/src/observer/storage/common/db.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Longda & Wangyunlai on 2021/5/12. // #ifndef __OBSERVER_STORAGE_COMMON_DB_H__ @@ -40,13 +40,14 @@ public: void all_tables(std::vector &table_names) const; RC sync(); + private: RC open_all_tables(); private: - std::string name_; - std::string path_; - std::unordered_map opened_tables_; + std::string name_; + std::string path_; + std::unordered_map opened_tables_; }; -#endif // __OBSERVER_STORAGE_COMMON_DB_H__ \ No newline at end of file +#endif // __OBSERVER_STORAGE_COMMON_DB_H__ \ No newline at end of file diff --git a/src/observer/storage/common/field_meta.cpp b/src/observer/storage/common/field_meta.cpp index daaf38ca1bd8102bc5d77618a0b5fde4ab0e8935..038cdd8077f5bb627099e1003ac1d45809b40086 100644 --- a/src/observer/storage/common/field_meta.cpp +++ b/src/observer/storage/common/field_meta.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Wangyunlai on 2021/5/12. // #include diff --git a/src/observer/storage/common/field_meta.h b/src/observer/storage/common/field_meta.h index 8ba6eeacf73cfbc209dea08917a828199e7a6f34..87d62e95f847f330ed0c4911e85f1ae751418c63 100644 --- a/src/observer/storage/common/field_meta.h +++ b/src/observer/storage/common/field_meta.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Wangyunlai on 2021/5/12. // #ifndef __OBSERVER_STORAGE_COMMON_FIELD_META_H__ diff --git a/src/observer/storage/common/index_meta.cpp b/src/observer/storage/common/index_meta.cpp index c08095d1ea5aa4991d3c320525415b4638b94f8a..a1860cd4c1be9b7a1a7f0ecb864e3d1285ec3b59 100644 --- a/src/observer/storage/common/index_meta.cpp +++ b/src/observer/storage/common/index_meta.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by wangyunlai.wyl on 2021/5/18. +// Created by Meiyi & Wangyunlai.wyl on 2021/5/18. // #include "storage/common/index_meta.h" diff --git a/src/observer/storage/common/index_meta.h b/src/observer/storage/common/index_meta.h index 5f437c328fede619d9e8799f247b6f49b3154dc9..06cdff1e37ffb98d8a60455416b2b9be135eacfa 100644 --- a/src/observer/storage/common/index_meta.h +++ b/src/observer/storage/common/index_meta.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Wangyunlai on 2021/5/12. // #ifndef __OBSERVER_STORAGE_COMMON_INDEX_META_H__ diff --git a/src/observer/storage/common/meta_util.cpp b/src/observer/storage/common/meta_util.cpp index c10e0df921409eb424c964cad6804e99c48da3c0..fccd8ad9c24b1c6d03eea3e59d6bcd24a45e6516 100644 --- a/src/observer/storage/common/meta_util.cpp +++ b/src/observer/storage/common/meta_util.cpp @@ -8,7 +8,7 @@ EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ -// Created by wangyunlai.wyl on 2021/5/18. +// Created by Meiyi & wangyunlai.wyl on 2021/5/18. // #include "common/defs.h" diff --git a/src/observer/storage/common/meta_util.h b/src/observer/storage/common/meta_util.h index d54aae7b2f732e904b893130011dae4f2c9ca622..1c4173da905f4986c53306ff47e21fdb5ecc184c 100644 --- a/src/observer/storage/common/meta_util.h +++ b/src/observer/storage/common/meta_util.h @@ -8,7 +8,7 @@ EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ -// Created by wangyunlai.wyl on 2021/5/18. +// Created by Meiyi & wangyunlai.wyl on 2021/5/18. // #ifndef __OBSERVER_STORAGE_COMMON_META_UTIL_H_ diff --git a/src/observer/storage/common/record_manager.cpp b/src/observer/storage/common/record_manager.cpp index 76a61d679a2b36231842518936967dd2dc9a53df..aaeab9153d6137bcfe5a83c8c0b7a1ccb3c1e826 100644 --- a/src/observer/storage/common/record_manager.cpp +++ b/src/observer/storage/common/record_manager.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #include "storage/common/record_manager.h" #include "rc.h" diff --git a/src/observer/storage/common/record_manager.h b/src/observer/storage/common/record_manager.h index 02c00bfc42b561a3d9a4f7ab8c53b47d1f0e1aff..1552129b43350a6a19d450222064e968117fc2cb 100644 --- a/src/observer/storage/common/record_manager.h +++ b/src/observer/storage/common/record_manager.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #ifndef __OBSERVER_STORAGE_COMMON_RECORD_MANAGER_H_ #define __OBSERVER_STORAGE_COMMON_RECORD_MANAGER_H_ @@ -17,7 +17,7 @@ See the Mulan PSL v2 for more details. */ #include #include "storage/default/disk_buffer_pool.h" -typedef int SlotNum; +typedef int32_t SlotNum; class ConditionFilter; @@ -48,6 +48,11 @@ struct RID { return page_num == other.page_num && slot_num == other.slot_num; } + bool operator!=(const RID &other) const + { + return !(*this == other); + } + static int compare(const RID *rid1, const RID *rid2) { int page_diff = rid1->page_num - rid2->page_num; @@ -57,6 +62,22 @@ struct RID { return rid1->slot_num - rid2->slot_num; } } + + /** + * 返回一个不可能出现的最小的RID + * 虽然page num 0和slot num 0都是合法的,但是page num 0通常用于存放meta数据,所以对数据部分来说都是 + * 不合法的. 这里在bplus tree中查找时会用到。 + */ + static RID *min() + { + static RID rid{0, 0}; + return &rid; + } + static RID *max() + { + static RID rid{std::numeric_limits::max(), std::numeric_limits::max()}; + return &rid; + } }; class RidDigest { @@ -222,4 +243,4 @@ private: RecordPageHandler record_page_handler_; }; -#endif //__OBSERVER_STORAGE_COMMON_RECORD_MANAGER_H_ \ No newline at end of file +#endif //__OBSERVER_STORAGE_COMMON_RECORD_MANAGER_H_ diff --git a/src/observer/storage/common/table.cpp b/src/observer/storage/common/table.cpp index 325487ff4db7a7f29744e80f743e67ad561b1d85..8b7ac695a21b28ae75237c8b3a9d8c16386ffef1 100644 --- a/src/observer/storage/common/table.cpp +++ b/src/observer/storage/common/table.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/13. +// Created by Meiyi & Wangyunlai on 2021/5/13. // #include @@ -25,8 +25,8 @@ See the Mulan PSL v2 for more details. */ #include "storage/common/record_manager.h" #include "storage/common/condition_filter.h" #include "storage/common/meta_util.h" -#include "storage/common/index.h" -#include "storage/common/bplus_tree_index.h" +#include "storage/index/index.h" +#include "storage/index/bplus_tree_index.h" #include "storage/trx/trx.h" Table::Table() : data_buffer_pool_(nullptr), file_id_(-1), record_handler_(nullptr) @@ -768,7 +768,43 @@ IndexScanner *Table::find_index_for_scan(const DefaultConditionFilter &filter) return nullptr; } - return index->create_scanner(filter.comp_op(), (const char *)value_cond_desc->value); + const char *left_key = nullptr; + const char *right_key = nullptr; + bool left_inclusive = false; + bool right_inclusive = false; + switch (filter.comp_op()) { + case EQUAL_TO: { + left_key = (const char *)value_cond_desc->value; + right_key = (const char *)value_cond_desc->value; + left_inclusive = true; + right_inclusive = true; + } + break; + case LESS_EQUAL: { + right_key = (const char *)value_cond_desc->value; + right_inclusive = true; + } + break; + case GREAT_EQUAL: { + left_key = (const char *)value_cond_desc->value; + left_inclusive = true; + } + break; + case LESS_THAN: { + right_key = (const char *)value_cond_desc->value; + right_inclusive = false; + } + break; + case GREAT_THAN: { + left_key = (const char *)value_cond_desc->value; + left_inclusive = false; + } + break; + default: { + return nullptr; + } + } + return index->create_scanner(left_key, left_inclusive, right_key, right_inclusive); } IndexScanner *Table::find_index_for_scan(const ConditionFilter *filter) diff --git a/src/observer/storage/common/table.h b/src/observer/storage/common/table.h index 264e78f4199d4eddd4d131b082b2190fe0022e1e..e416be855e4b8f573e2ffa000afdbde3ab426296 100644 --- a/src/observer/storage/common/table.h +++ b/src/observer/storage/common/table.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Wangyunlai on 2021/5/12. // #ifndef __OBSERVER_STORAGE_COMMON_TABLE_H__ @@ -49,12 +49,14 @@ public: * @param base_dir 表所在的文件夹,表记录数据文件、索引数据文件存放位置 */ RC open(const char *meta_file, const char *base_dir); - + RC insert_record(Trx *trx, int value_num, const Value *values); - RC update_record(Trx *trx, const char *attribute_name, const Value *value, int condition_num, const Condition conditions[], int *updated_count); + RC update_record(Trx *trx, const char *attribute_name, const Value *value, int condition_num, + const Condition conditions[], int *updated_count); RC delete_record(Trx *trx, ConditionFilter *filter, int *deleted_count); - RC scan_record(Trx *trx, ConditionFilter *filter, int limit, void *context, void (*record_reader)(const char *data, void *context)); + RC scan_record(Trx *trx, ConditionFilter *filter, int limit, void *context, + void (*record_reader)(const char *data, void *context)); RC create_index(Trx *trx, const char *index_name, const char *attribute_name); @@ -72,8 +74,10 @@ public: RC rollback_delete(Trx *trx, const RID &rid); private: - RC scan_record(Trx *trx, ConditionFilter *filter, int limit, void *context, RC (*record_reader)(Record *record, void *context)); - RC scan_record_by_index(Trx *trx, IndexScanner *scanner, ConditionFilter *filter, int limit, void *context, RC (*record_reader)(Record *record, void *context)); + RC scan_record( + Trx *trx, ConditionFilter *filter, int limit, void *context, RC (*record_reader)(Record *record, void *context)); + RC scan_record_by_index(Trx *trx, IndexScanner *scanner, ConditionFilter *filter, int limit, void *context, + RC (*record_reader)(Record *record, void *context)); IndexScanner *find_index_for_scan(const ConditionFilter *filter); IndexScanner *find_index_for_scan(const DefaultConditionFilter &filter); @@ -86,20 +90,21 @@ private: RC insert_entry_of_indexes(const char *record, const RID &rid); RC delete_entry_of_indexes(const char *record, const RID &rid, bool error_on_not_exists); + private: RC init_record_handler(const char *base_dir); - RC make_record(int value_num, const Value *values, char * &record_out); + RC make_record(int value_num, const Value *values, char *&record_out); private: Index *find_index(const char *index_name) const; private: - std::string base_dir_; - TableMeta table_meta_; - DiskBufferPool * data_buffer_pool_; /// 数据文件关联的buffer pool - int file_id_; - RecordFileHandler * record_handler_; /// 记录操作 - std::vector indexes_; + std::string base_dir_; + TableMeta table_meta_; + DiskBufferPool *data_buffer_pool_; /// 数据文件关联的buffer pool + int file_id_; + RecordFileHandler *record_handler_; /// 记录操作 + std::vector indexes_; }; -#endif // __OBSERVER_STORAGE_COMMON_TABLE_H__ \ No newline at end of file +#endif // __OBSERVER_STORAGE_COMMON_TABLE_H__ \ No newline at end of file diff --git a/src/observer/storage/common/table_meta.cpp b/src/observer/storage/common/table_meta.cpp index f5934a4610d82f5422eff15e1b5cb3e07d010850..70c88b96ab5ce9b97a73f6120e4ef13869610929 100644 --- a/src/observer/storage/common/table_meta.cpp +++ b/src/observer/storage/common/table_meta.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Wangyunlai on 2021/5/12. // #include diff --git a/src/observer/storage/common/table_meta.h b/src/observer/storage/common/table_meta.h index 21c510e2dabb9f6fd65dc2b5c81fefb9cd0d6345..2a2de6df219c124e1733d2b13744d96294a623ed 100644 --- a/src/observer/storage/common/table_meta.h +++ b/src/observer/storage/common/table_meta.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/12. +// Created by Meiyi & Wangyunlai on 2021/5/12. // #ifndef __OBSERVER_STORAGE_COMMON_TABLE_META_H__ diff --git a/src/observer/storage/default/default_handler.cpp b/src/observer/storage/default/default_handler.cpp index 34a64176fa90008eed72c74647849dca4c875029..87f5824bd9ac26f6aab954c23948de5ade863d86 100644 --- a/src/observer/storage/default/default_handler.cpp +++ b/src/observer/storage/default/default_handler.cpp @@ -9,10 +9,9 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // - #include "storage/default/default_handler.h" #include @@ -21,23 +20,26 @@ See the Mulan PSL v2 for more details. */ #include "common/log/log.h" #include "common/lang/string.h" #include "storage/common/record_manager.h" -#include "storage/common/bplus_tree.h" +#include "storage/index/bplus_tree.h" #include "storage/common/table.h" #include "storage/common/condition_filter.h" -DefaultHandler &DefaultHandler::get_default() { +DefaultHandler &DefaultHandler::get_default() +{ static DefaultHandler handler; return handler; } -DefaultHandler::DefaultHandler() { -} +DefaultHandler::DefaultHandler() +{} -DefaultHandler::~DefaultHandler() noexcept { +DefaultHandler::~DefaultHandler() noexcept +{ destroy(); } -RC DefaultHandler::init(const char *base_dir) { +RC DefaultHandler::init(const char *base_dir) +{ // 检查目录是否存在,或者创建 std::string tmp(base_dir); tmp += "/db"; @@ -53,16 +55,18 @@ RC DefaultHandler::init(const char *base_dir) { return RC::SUCCESS; } -void DefaultHandler::destroy() { +void DefaultHandler::destroy() +{ sync(); - for (const auto & iter : opened_dbs_) { + for (const auto &iter : opened_dbs_) { delete iter.second; } opened_dbs_.clear(); } -RC DefaultHandler::create_db(const char *dbname) { +RC DefaultHandler::create_db(const char *dbname) +{ if (nullptr == dbname || common::is_blank(dbname)) { LOG_WARN("Invalid db name"); return RC::INVALID_ARGUMENT; @@ -77,16 +81,18 @@ RC DefaultHandler::create_db(const char *dbname) { if (!common::check_directory(dbpath)) { LOG_ERROR("Create db fail: %s", dbpath.c_str()); - return RC::GENERIC_ERROR; // io error + return RC::GENERIC_ERROR; // io error } return RC::SUCCESS; } -RC DefaultHandler::drop_db(const char *dbname) { +RC DefaultHandler::drop_db(const char *dbname) +{ return RC::GENERIC_ERROR; } -RC DefaultHandler::open_db(const char *dbname) { +RC DefaultHandler::open_db(const char *dbname) +{ if (nullptr == dbname || common::is_blank(dbname)) { LOG_WARN("Invalid db name"); return RC::INVALID_ARGUMENT; @@ -111,15 +117,19 @@ RC DefaultHandler::open_db(const char *dbname) { return RC::SUCCESS; } -RC DefaultHandler::close_db(const char *dbname) { +RC DefaultHandler::close_db(const char *dbname) +{ return RC::GENERIC_ERROR; } -RC DefaultHandler::execute(const char *sql) { +RC DefaultHandler::execute(const char *sql) +{ return RC::GENERIC_ERROR; } -RC DefaultHandler::create_table(const char *dbname, const char *relation_name, int attribute_count, const AttrInfo *attributes) { +RC DefaultHandler::create_table( + const char *dbname, const char *relation_name, int attribute_count, const AttrInfo *attributes) +{ Db *db = find_db(dbname); if (db == nullptr) { return RC::SCHEMA_DB_NOT_OPENED; @@ -127,11 +137,14 @@ RC DefaultHandler::create_table(const char *dbname, const char *relation_name, i return db->create_table(relation_name, attribute_count, attributes); } -RC DefaultHandler::drop_table(const char *dbname, const char *relation_name) { +RC DefaultHandler::drop_table(const char *dbname, const char *relation_name) +{ return RC::GENERIC_ERROR; } -RC DefaultHandler::create_index(Trx *trx, const char *dbname, const char *relation_name, const char *index_name, const char *attribute_name) { +RC DefaultHandler::create_index( + Trx *trx, const char *dbname, const char *relation_name, const char *index_name, const char *attribute_name) +{ Table *table = find_table(dbname, relation_name); if (nullptr == table) { return RC::SCHEMA_TABLE_NOT_EXIST; @@ -139,12 +152,15 @@ RC DefaultHandler::create_index(Trx *trx, const char *dbname, const char *relati return table->create_index(trx, index_name, attribute_name); } -RC DefaultHandler::drop_index(Trx *trx, const char *dbname, const char *relation_name, const char *index_name) { +RC DefaultHandler::drop_index(Trx *trx, const char *dbname, const char *relation_name, const char *index_name) +{ return RC::GENERIC_ERROR; } -RC DefaultHandler::insert_record(Trx *trx, const char *dbname, const char *relation_name, int value_num, const Value *values) { +RC DefaultHandler::insert_record( + Trx *trx, const char *dbname, const char *relation_name, int value_num, const Value *values) +{ Table *table = find_table(dbname, relation_name); if (nullptr == table) { return RC::SCHEMA_TABLE_NOT_EXIST; @@ -152,8 +168,9 @@ RC DefaultHandler::insert_record(Trx *trx, const char *dbname, const char *relat return table->insert_record(trx, value_num, values); } -RC DefaultHandler::delete_record(Trx *trx, const char *dbname, const char *relation_name, - int condition_num, const Condition *conditions, int *deleted_count) { +RC DefaultHandler::delete_record(Trx *trx, const char *dbname, const char *relation_name, int condition_num, + const Condition *conditions, int *deleted_count) +{ Table *table = find_table(dbname, relation_name); if (nullptr == table) { return RC::SCHEMA_TABLE_NOT_EXIST; @@ -167,8 +184,9 @@ RC DefaultHandler::delete_record(Trx *trx, const char *dbname, const char *relat return table->delete_record(trx, &condition_filter, deleted_count); } -RC DefaultHandler::update_record(Trx *trx, const char *dbname, const char *relation_name, const char *attribute_name, const Value *value, - int condition_num, const Condition *conditions, int *updated_count) { +RC DefaultHandler::update_record(Trx *trx, const char *dbname, const char *relation_name, const char *attribute_name, + const Value *value, int condition_num, const Condition *conditions, int *updated_count) +{ Table *table = find_table(dbname, relation_name); if (nullptr == table) { return RC::SCHEMA_TABLE_NOT_EXIST; @@ -177,15 +195,17 @@ RC DefaultHandler::update_record(Trx *trx, const char *dbname, const char *relat return table->update_record(trx, attribute_name, value, condition_num, conditions, updated_count); } -Db *DefaultHandler::find_db(const char *dbname) const { - std::map::const_iterator iter = opened_dbs_.find(dbname); +Db *DefaultHandler::find_db(const char *dbname) const +{ + std::map::const_iterator iter = opened_dbs_.find(dbname); if (iter == opened_dbs_.end()) { return nullptr; } return iter->second; } -Table *DefaultHandler::find_table(const char *dbname, const char *table_name) const { +Table *DefaultHandler::find_table(const char *dbname, const char *table_name) const +{ if (dbname == nullptr || table_name == nullptr) { LOG_WARN("Invalid argument. dbname=%p, table_name=%p", dbname, table_name); return nullptr; @@ -198,9 +218,10 @@ Table *DefaultHandler::find_table(const char *dbname, const char *table_name) co return db->find_table(table_name); } -RC DefaultHandler::sync() { +RC DefaultHandler::sync() +{ RC rc = RC::SUCCESS; - for (const auto & db_pair: opened_dbs_) { + for (const auto &db_pair : opened_dbs_) { Db *db = db_pair.second; rc = db->sync(); if (rc != RC::SUCCESS) { @@ -209,4 +230,4 @@ RC DefaultHandler::sync() { } } return rc; -} \ No newline at end of file +} diff --git a/src/observer/storage/default/default_handler.h b/src/observer/storage/default/default_handler.h index b1c144bddb5d90546914b93525b05f27d164ce1b..cac9ff1ea92f4c191f7b21ae46eda9c8edba0a2c 100644 --- a/src/observer/storage/default/default_handler.h +++ b/src/observer/storage/default/default_handler.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/5/11. +// Created by Meiyi & Longda on 2021/5/11. // #ifndef __OBSERVER_STORAGE_DEFAULT_ENGINE_H__ #define __OBSERVER_STORAGE_DEFAULT_ENGINE_H__ @@ -102,12 +102,13 @@ public: * @param attrName * @return */ - RC create_index(Trx *trx, const char *dbname, const char *relation_name, const char *index_name, const char *attribute_name); + RC create_index( + Trx *trx, const char *dbname, const char *relation_name, const char *index_name, const char *attribute_name); /** * 该函数用来删除名为indexName的索引。 * 函数首先检查索引是否存在,如果不存在,则返回一个非零的错误码。否则,销毁该索引 - * @param index_name + * @param index_name * @return */ RC drop_index(Trx *trx, const char *dbname, const char *relation_name, const char *index_name); @@ -122,7 +123,7 @@ public: * @param values * @return */ - RC insert_record(Trx * trx, const char *dbname, const char *relation_name, int value_num, const Value *values); + RC insert_record(Trx *trx, const char *dbname, const char *relation_name, int value_num, const Value *values); /** * 该函数用来删除relName表中所有满足指定条件的元组以及该元组对应的索引项。 @@ -133,8 +134,8 @@ public: * @param conditions * @return */ - RC delete_record(Trx *trx, const char *dbname, const char *relation_name, - int condition_num, const Condition *conditions, int *deleted_count); + RC delete_record(Trx *trx, const char *dbname, const char *relation_name, int condition_num, + const Condition *conditions, int *deleted_count); /** * 该函数用于更新relName表中所有满足指定条件的元组, @@ -148,21 +149,22 @@ public: * @param conditions * @return */ - RC update_record(Trx * trx, const char *dbname, const char *relation_name, const char *attribute_name, const Value *value, - int condition_num, const Condition *conditions, int *updated_count); + RC update_record(Trx *trx, const char *dbname, const char *relation_name, const char *attribute_name, + const Value *value, int condition_num, const Condition *conditions, int *updated_count); public: Db *find_db(const char *dbname) const; - Table *find_table(const char * dbname, const char *table_name) const; + Table *find_table(const char *dbname, const char *table_name) const; RC sync(); public: static DefaultHandler &get_default(); + private: std::string base_dir_; std::string db_dir_; - std::map opened_dbs_; -}; // class Handler + std::map opened_dbs_; +}; // class Handler -#endif // __OBSERVER_STORAGE_DEFAULT_ENGINE_H__ \ No newline at end of file +#endif // __OBSERVER_STORAGE_DEFAULT_ENGINE_H__ \ No newline at end of file diff --git a/src/observer/storage/default/default_storage_stage.cpp b/src/observer/storage/default/default_storage_stage.cpp index 01368d8c1c23f5481957c82c41f24b0dc58535ed..539ea10a5c12d5c0a91c030ac6557116888bc2f1 100644 --- a/src/observer/storage/default/default_storage_stage.cpp +++ b/src/observer/storage/default/default_storage_stage.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #include diff --git a/src/observer/storage/default/default_storage_stage.h b/src/observer/storage/default/default_storage_stage.h index 9424e8586a1d3055dc23c84c8b44ac4a8eb546bd..e55e1f189c7c2afe9313a578f10406a6eecf180c 100644 --- a/src/observer/storage/default/default_storage_stage.h +++ b/src/observer/storage/default/default_storage_stage.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #ifndef __OBSERVER_STORAGE_DEFAULT_STORAGE_STAGE_H__ @@ -33,8 +33,7 @@ protected: bool initialize() override; void cleanup() override; void handle_event(common::StageEvent *event) override; - void callback_event(common::StageEvent *event, - common::CallbackContext *context) override; + void callback_event(common::StageEvent *event, common::CallbackContext *context) override; private: std::string load_data(const char *db_name, const char *table_name, const char *file_name); @@ -44,7 +43,7 @@ protected: static const std::string QUERY_METRIC_TAG; private: - DefaultHandler * handler_; + DefaultHandler *handler_; }; -#endif //__OBSERVER_STORAGE_DEFAULT_STORAGE_STAGE_H__ +#endif //__OBSERVER_STORAGE_DEFAULT_STORAGE_STAGE_H__ diff --git a/src/observer/storage/default/disk_buffer_pool.cpp b/src/observer/storage/default/disk_buffer_pool.cpp index b1ae3bf917a0ae651e104e272a947373b6248dad..6bbc66357bc3b941868cfa91164ffe75ed2802e8 100644 --- a/src/observer/storage/default/disk_buffer_pool.cpp +++ b/src/observer/storage/default/disk_buffer_pool.cpp @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #include "disk_buffer_pool.h" #include @@ -478,7 +478,7 @@ RC DiskBufferPool::purge_page(int file_id, PageNum page_num) RC DiskBufferPool::purge_page(Frame *buf) { if (buf->pin_count > 0) { - LOG_INFO("Begin to free page %d of %d, but it's pinned, pin_count:%d.", + LOG_INFO("Begin to free page %d of %d(file id), but it's pinned, pin_count:%d.", buf->page.page_num, buf->file_desc, buf->pin_count); @@ -488,12 +488,12 @@ RC DiskBufferPool::purge_page(Frame *buf) if (buf->dirty) { RC rc = flush_page(buf); if (rc != RC::SUCCESS) { - LOG_WARN("Failed to flush page %d of %d during purge page.", buf->page.page_num, buf->file_desc); + LOG_WARN("Failed to flush page %d of %d(file desc) during purge page.", buf->page.page_num, buf->file_desc); return rc; } } - LOG_DEBUG("Successfully purge frame =%p, page %d of %d", buf, buf->page.page_num, buf->file_desc); + LOG_DEBUG("Successfully purge frame =%p, page %d of %d(file desc)", buf, buf->page.page_num, buf->file_desc); bp_manager_.free(buf); return RC::SUCCESS; } @@ -533,7 +533,8 @@ RC DiskBufferPool::purge_all_pages(BPFileHandle *file_handle) for (std::list::iterator it = used.begin(); it != used.end(); ++it) { Frame *frame = *it; if (frame->pin_count > 0) { - LOG_WARN("The page has been pinned, file_id:%d, pagenum:%d", frame->file_desc, frame->page.page_num); + LOG_WARN("The page has been pinned, file_desc:%d, pagenum:%d, pin_count=%d", + frame->file_desc, frame->page.page_num, frame->pin_count); continue; } if (frame->dirty) { @@ -548,6 +549,29 @@ RC DiskBufferPool::purge_all_pages(BPFileHandle *file_handle) return RC::SUCCESS; } +RC DiskBufferPool::check_all_pages_unpinned(int file_id) +{ + RC rc = check_file_id(file_id); + if (rc != RC::SUCCESS) { + LOG_ERROR("Failed to flush pages due to invalid file_id %d", file_id); + return rc; + } + + BPFileHandle *file_handle = open_list_[file_id]; + std::list frames = bp_manager_.find_list(file_handle->file_desc); + for (auto & frame : frames) { + if (frame->page.page_num == 0 && frame->pin_count > 1) { + LOG_WARN("This page has been pinned. file id=%d, page num:%d, pin count=%d", + file_id, frame->page.page_num, frame->pin_count); + } else if (frame->page.page_num != 0 && frame->pin_count > 0) { + LOG_WARN("This page has been pinned. file id=%d, page num:%d, pin count=%d", + file_id, frame->page.page_num, frame->pin_count); + } + } + LOG_INFO("all pages have been checked of file id %d", file_id); + return RC::SUCCESS; +} + RC DiskBufferPool::flush_page(Frame *frame) { // The better way is use mmap the block into memory, diff --git a/src/observer/storage/default/disk_buffer_pool.h b/src/observer/storage/default/disk_buffer_pool.h index 4a68ad2f33ec864253c7a6ba6da9814c15985677..90881919a9379b9ee85cae8c85218a112eca4293 100644 --- a/src/observer/storage/default/disk_buffer_pool.h +++ b/src/observer/storage/default/disk_buffer_pool.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Longda on 2021/4/13. +// Created by Meiyi & Longda on 2021/4/13. // #ifndef __OBSERVER_STORAGE_COMMON_PAGE_MANAGER_H_ #define __OBSERVER_STORAGE_COMMON_PAGE_MANAGER_H_ @@ -69,6 +69,16 @@ typedef struct BPPageHandle { BPPageHandle() : open(false), frame(nullptr) {} + PageNum page_num() const { + return frame->page.page_num; + } + void mark_dirty() { + this->frame->dirty = true; + } + + char *data() { + return this->frame->page.data; + } bool open; Frame *frame; } BPPageHandle; @@ -111,10 +121,9 @@ public: if (pool_num > 0) { POOL_NUM = pool_num; LOG_INFO("Successfully set POOL_NUM as %d", pool_num); - }else { + } else { LOG_INFO("Invalid input argument pool_num:%d", pool_num); } - } static const int get_pool_num() @@ -196,6 +205,8 @@ public: RC purge_all_pages(int file_id); + RC check_all_pages_unpinned(int file_id); + protected: RC allocate_page(Frame **buf); diff --git a/src/observer/storage/index/bplus_tree.cpp b/src/observer/storage/index/bplus_tree.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ee6a5f2cfc86f5d6c5215ebbc8eae5b0950dc2d3 --- /dev/null +++ b/src/observer/storage/index/bplus_tree.cpp @@ -0,0 +1,1944 @@ +/* Copyright (c) 2021 Xie Meiyi(xiemeiyi@hust.edu.cn) and OceanBase and/or its affiliates. All rights reserved. +miniob is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. */ + +// +// Created by Xie Meiyi +// Rewritten by Longda & Wangyunlai +// +#include "storage/index/bplus_tree.h" +#include "storage/default/disk_buffer_pool.h" +#include "rc.h" +#include "common/log/log.h" +#include "sql/parser/parse_defs.h" + +#define FIRST_INDEX_PAGE 1 + +int calc_internal_page_capacity(int attr_length) +{ + int item_size = attr_length + sizeof(RID) + sizeof(PageNum); + + int capacity = + ((int)BP_PAGE_DATA_SIZE - InternalIndexNode::HEADER_SIZE) / item_size; + return capacity; +} + +int calc_leaf_page_capacity(int attr_length) +{ + int item_size = attr_length + sizeof(RID) + sizeof(RID); + int capacity = + ((int)BP_PAGE_DATA_SIZE - LeafIndexNode::HEADER_SIZE) / item_size; + return capacity; +} + +///////////////////////////////////////////////////////////////////////////////// +IndexNodeHandler::IndexNodeHandler(const IndexFileHeader &header, BPPageHandle &page_handle) + : header_(header), page_num_(page_handle.page_num()), node_((IndexNode *)page_handle.data()) +{} + +bool IndexNodeHandler::is_leaf() const +{ + return node_->is_leaf; +} +void IndexNodeHandler::init_empty(bool leaf) +{ + node_->is_leaf = leaf; + node_->key_num = 0; + node_->parent = BP_INVALID_PAGE_NUM; +} +PageNum IndexNodeHandler::page_num() const +{ + return page_num_; +} + +int IndexNodeHandler::key_size() const +{ + return header_.key_length; +} + +int IndexNodeHandler::value_size() const +{ + // return header_.value_size; + return sizeof(RID); +} + +int IndexNodeHandler::item_size() const +{ + return key_size() + value_size(); +} + +int IndexNodeHandler::size() const +{ + return node_->key_num; +} + +void IndexNodeHandler::increase_size(int n) +{ + node_->key_num += n; +} + +PageNum IndexNodeHandler::parent_page_num() const +{ + return node_->parent; +} + +void IndexNodeHandler::set_parent_page_num(PageNum page_num) +{ + this->node_->parent = page_num; +} +std::string to_string(const IndexNodeHandler &handler) +{ + std::stringstream ss; + + ss << "PageNum:" << handler.page_num() + << ",is_leaf:" << handler.is_leaf() << "," + << "key_num:" << handler.size() << "," + << "parent:" << handler.parent_page_num() << ","; + + return ss.str(); +} + +bool IndexNodeHandler::validate() const +{ + if (parent_page_num() == BP_INVALID_PAGE_NUM) { + // this is a root page + if (size() < 1) { + LOG_WARN("root page has no item"); + return false; + } + + if (!is_leaf() && size() < 2) { + LOG_WARN("root page internal node has less than 2 child. size=%d", size()); + return false; + } + } + return true; +} + +///////////////////////////////////////////////////////////////////////////////// +LeafIndexNodeHandler::LeafIndexNodeHandler(const IndexFileHeader &header, BPPageHandle &page_handle) + : IndexNodeHandler(header, page_handle), leaf_node_((LeafIndexNode *)page_handle.data()) +{} + +void LeafIndexNodeHandler::init_empty() +{ + IndexNodeHandler::init_empty(true); + leaf_node_->prev_brother = BP_INVALID_PAGE_NUM; + leaf_node_->next_brother = BP_INVALID_PAGE_NUM; +} + +void LeafIndexNodeHandler::set_next_page(PageNum page_num) +{ + leaf_node_->next_brother = page_num; +} + +void LeafIndexNodeHandler::set_prev_page(PageNum page_num) +{ + leaf_node_->prev_brother = page_num; +} +PageNum LeafIndexNodeHandler::next_page() const +{ + return leaf_node_->next_brother; +} +PageNum LeafIndexNodeHandler::prev_page() const +{ + return leaf_node_->prev_brother; +} + +char *LeafIndexNodeHandler::key_at(int index) +{ + assert(index >= 0 && index < size()); + return __key_at(index); +} + +char *LeafIndexNodeHandler::value_at(int index) +{ + assert(index >= 0 && index < size()); + return __value_at(index); +} + +int LeafIndexNodeHandler::max_size() const +{ + return header_.leaf_max_size; +} + +int LeafIndexNodeHandler::min_size() const +{ + return header_.leaf_max_size - header_.leaf_max_size / 2; +} + +int LeafIndexNodeHandler::lookup(const KeyComparator &comparator, const char *key, bool *found /* = nullptr */) const +{ + const int size = this->size(); + int i = 0; + for ( ; i < size; i++) { + int result = comparator(key, __key_at(i)); + if (0 == result) { + if (found) { + *found = true; + } + return i; + } + if (result < 0) { + break; + } + } + if (found) { + *found = false; + } + return i; +} + +void LeafIndexNodeHandler::insert(int index, const char *key, const char *value) +{ + if (index < size()) { + memmove(__item_at(index + 1), __item_at(index), (size() - index) * item_size()); + } + memcpy(__item_at(index), key, key_size()); + memcpy(__item_at(index) + key_size(), value, value_size()); + increase_size(1); +} +void LeafIndexNodeHandler::remove(int index) +{ + assert(index >= 0 && index < size()); + if (index < size() - 1) { + memmove(__item_at(index), __item_at(index + 1), (size() - index - 1) * item_size()); + } + increase_size(-1); +} + +int LeafIndexNodeHandler::remove(const char *key, const KeyComparator &comparator) +{ + bool found = false; + int index = lookup(comparator, key, &found); + if (found) { + this->remove(index); + return 1; + } + return 0; +} + +RC LeafIndexNodeHandler::move_half_to(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id) +{ + const int size = this->size(); + const int move_index = size / 2; + + memcpy(other.__item_at(0), this->__item_at(move_index), item_size() * (size - move_index)); + other.increase_size(size - move_index); + this->increase_size(- ( size - move_index)); + return RC::SUCCESS; +} +RC LeafIndexNodeHandler::move_first_to_end(LeafIndexNodeHandler &other, DiskBufferPool *disk_buffer_pool, int file_id) +{ + other.append(__item_at(0)); + + if (size() >= 1) { + memmove(__item_at(0), __item_at(1), (size() - 1) * item_size() ); + } + increase_size(-1); + return RC::SUCCESS; +} + +RC LeafIndexNodeHandler::move_last_to_front(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id) +{ + other.preappend(__item_at(size() - 1)); + + increase_size(-1); + return RC::SUCCESS; +} +/** + * move all items to left page + */ +RC LeafIndexNodeHandler::move_to(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id) +{ + memcpy(other.__item_at(other.size()), this->__item_at(0), this->size() * item_size()); + other.increase_size(this->size()); + this->increase_size(- this->size()); + + other.set_next_page(this->next_page()); + + PageNum next_right_page_num = this->next_page(); + if (next_right_page_num != BP_INVALID_PAGE_NUM) { + BPPageHandle next_right_page_handle; + RC rc = bp->get_this_page(file_id, next_right_page_num, &next_right_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch next right page. page number:%d. rc=%d:%s", next_right_page_num, rc, strrc(rc)); + return rc; + } + + LeafIndexNodeHandler next_right_node(header_, next_right_page_handle); + next_right_node.set_prev_page(other.page_num()); + next_right_page_handle.mark_dirty(); + bp->unpin_page(&next_right_page_handle); + } + return RC::SUCCESS; +} + +void LeafIndexNodeHandler::append(const char *item) +{ + memcpy(__item_at(size()), item, item_size()); + increase_size(1); +} + +void LeafIndexNodeHandler::preappend(const char *item) +{ + if (size() > 0) { + memmove(__item_at(1), __item_at(0), size() * item_size()); + } + memcpy(__item_at(0), item, item_size()); + increase_size(1); +} + +char *LeafIndexNodeHandler::__item_at(int index) const +{ + return leaf_node_->array + (index * item_size()); +} +char *LeafIndexNodeHandler::__key_at(int index) const +{ + return __item_at(index); +} +char *LeafIndexNodeHandler::__value_at(int index) const +{ + return __item_at(index) + key_size(); +} + +std::string to_string(const LeafIndexNodeHandler &handler, const KeyPrinter &printer) +{ + std::stringstream ss; + ss << to_string((const IndexNodeHandler &)handler) + << ",prev page:" << handler.prev_page() + << ",next page:" << handler.next_page(); + ss << ",values=[" << printer(handler.__key_at(0)) ; + for (int i = 1; i < handler.size(); i++) { + ss << "," << printer(handler.__key_at(i)); + } + ss << "]"; + return ss.str(); +} + +bool LeafIndexNodeHandler::validate(const KeyComparator &comparator, DiskBufferPool *bp, int file_id) const +{ + bool result = IndexNodeHandler::validate(); + if (false == result) { + return false; + } + + const int node_size = size(); + for (int i = 1; i < node_size; i++) { + if (comparator(__key_at(i - 1), __key_at(i)) >= 0) { + LOG_WARN("page number = %d, invalid key order. id1=%d,id2=%d, this=%s", + page_num(), i-1, i, to_string(*this).c_str()); + return false; + } + } + + PageNum parent_page_num = this->parent_page_num(); + if (parent_page_num == BP_INVALID_PAGE_NUM) { + return true; + } + + BPPageHandle parent_page_handle; + RC rc = bp->get_this_page(file_id, parent_page_num, &parent_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch parent page. page num=%d, rc=%d:%s", + parent_page_num, rc, strrc(rc)); + return false; + } + + InternalIndexNodeHandler parent_node(header_, parent_page_handle); + int index_in_parent = parent_node.value_index(this->page_num()); + if (index_in_parent < 0) { + LOG_WARN("invalid leaf node. cannot find index in parent. this page num=%d, parent page num=%d", + this->page_num(), parent_page_num); + bp->unpin_page(&parent_page_handle); + return false; + } + + if (0 != index_in_parent) { + int cmp_result = comparator(__key_at(0), parent_node.key_at(index_in_parent)); + if (cmp_result < 0) { + LOG_WARN("invalid leaf node. first item should be greate than or equal to parent item. " \ + "this page num=%d, parent page num=%d, index in parent=%d", + this->page_num(), parent_node.page_num(), index_in_parent); + bp->unpin_page(&parent_page_handle); + return false; + } + } + + if (index_in_parent < parent_node.size() - 1) { + int cmp_result = comparator(__key_at(size() - 1), parent_node.key_at(index_in_parent + 1)); + if (cmp_result >= 0) { + LOG_WARN("invalid leaf node. last item should be less than the item at the first after item in parent." \ + "this page num=%d, parent page num=%d, parent item to compare=%d", + this->page_num(), parent_node.page_num(), index_in_parent + 1); + bp->unpin_page(&parent_page_handle); + return false; + } + } + bp->unpin_page(&parent_page_handle); + return true; +} + +///////////////////////////////////////////////////////////////////////////////// +InternalIndexNodeHandler::InternalIndexNodeHandler(const IndexFileHeader &header, BPPageHandle &page_handle) + : IndexNodeHandler(header, page_handle), internal_node_((InternalIndexNode *)page_handle.data()) +{} + +std::string to_string(const InternalIndexNodeHandler &node, const KeyPrinter &printer) +{ + std::stringstream ss; + ss << to_string((const IndexNodeHandler &)node); + ss << ",children:[" + << "{key:" << printer(node.__key_at(0)) << "," + << "value:" << *(PageNum *)node.__value_at(0) << "}"; + + for (int i = 1; i < node.size(); i++) { + ss << ",{key:" << printer(node.__key_at(i)) + << ",value:"<< *(PageNum *)node.__value_at(i) << "}"; + } + ss << "]"; + return ss.str(); +} + +void InternalIndexNodeHandler::init_empty() +{ + IndexNodeHandler::init_empty(false); +} +void InternalIndexNodeHandler::create_new_root(PageNum first_page_num, const char *key, PageNum page_num) +{ + memset(__key_at(0), 0, key_size()); + memcpy(__value_at(0), &first_page_num, value_size()); + memcpy(__item_at(1), key, key_size()); + memcpy(__value_at(1), &page_num, value_size()); + increase_size(2); +} + +/** + * insert one entry + * the entry to be inserted will never at the first slot. + * the right child page after split will always have bigger keys. + */ +void InternalIndexNodeHandler::insert(const char *key, PageNum page_num, const KeyComparator &comparator) +{ + int insert_position = -1; + lookup(comparator, key, nullptr, &insert_position); + if (insert_position < size()) { + memmove(__item_at(insert_position + 1), __item_at(insert_position), (size() - insert_position) * item_size()); + } + memcpy(__item_at(insert_position), key, key_size()); + memcpy(__value_at(insert_position), &page_num, value_size()); + increase_size(1); +} + +RC InternalIndexNodeHandler::move_half_to(InternalIndexNodeHandler &other, DiskBufferPool *bp, int file_id) +{ + const int size = this->size(); + const int move_index = size / 2; + RC rc = other.copy_from(this->__item_at(move_index), size - move_index, bp, file_id); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to copy item to new node. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + increase_size(- (size - move_index)); + return rc; +} + +int InternalIndexNodeHandler::max_size() const +{ + return header_.internal_max_size; +} + +int InternalIndexNodeHandler::min_size() const +{ + return header_.internal_max_size - header_.internal_max_size / 2; +} + +/** + * lookup the first item which key <= item + * @return unlike the leafNode, the return value is not the insert position, + * but only the index of child to find. + */ +int InternalIndexNodeHandler::lookup(const KeyComparator &comparator, const char *key, + bool *found /* = nullptr */, int *insert_position /*= nullptr */) const +{ + int i = 1; + const int size = this->size(); + for ( ; i < size; i++) { + int result = comparator(key, __key_at(i)); + if (result == 0) { + if (found) { + *found = true; + } + if (insert_position) { + *insert_position = i; + } + return i; + } + if (result < 0) { + if (found) { + *found = false; + } + if (insert_position) { + *insert_position = i; + } + + return i - 1; + } + } + if (found) { + *found = false; + } + if (insert_position) { + *insert_position = size; + } + return size - 1; +} + +char *InternalIndexNodeHandler::key_at(int index) +{ + assert(index >= 0 && index < size()); + return __key_at(index); +} + +void InternalIndexNodeHandler::set_key_at(int index, const char *key) +{ + assert(index >= 0 && index < size()); + memcpy(__key_at(index), key, key_size()); +} + +PageNum InternalIndexNodeHandler::value_at(int index) +{ + assert(index >= 0 && index < size()); + return *(PageNum *)__value_at(index); +} + +int InternalIndexNodeHandler::value_index(PageNum page_num) +{ + for (int i = 0; i < size(); i++) { + if (page_num == *(PageNum*)__value_at(i)) { + return i; + } + } + return -1; +} + +void InternalIndexNodeHandler::remove(int index) +{ + assert(index >= 0 && index < size()); + if (index < size() - 1) { + memmove(__item_at(index), __item_at(index + 1), (size() - index - 1) * item_size()); + } + increase_size(-1); +} + +RC InternalIndexNodeHandler::move_to(InternalIndexNodeHandler &other, DiskBufferPool *disk_buffer_pool, int file_id) +{ + RC rc = other.copy_from(__item_at(0), size(), disk_buffer_pool, file_id); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to copy items to other node. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + increase_size(- this->size()); + return RC::SUCCESS; +} + +RC InternalIndexNodeHandler::move_first_to_end(InternalIndexNodeHandler &other, DiskBufferPool *disk_buffer_pool, int file_id) +{ + RC rc = other.append(__item_at(0), disk_buffer_pool, file_id); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to append item to others."); + return rc; + } + + if (size() >= 1) { + memmove(__item_at(0), __item_at(1), (size() - 1) * item_size() ); + } + increase_size(-1); + return rc; +} + +RC InternalIndexNodeHandler::move_last_to_front(InternalIndexNodeHandler &other, DiskBufferPool *bp, int file_id) +{ + RC rc = other.preappend(__item_at(size() - 1), bp, file_id); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to preappend to others"); + return rc; + } + + increase_size(-1); + return rc; +} +/** + * copy items from other node to self's right + */ +RC InternalIndexNodeHandler::copy_from(const char *items, int num, DiskBufferPool *disk_buffer_pool, int file_id) +{ + memcpy(__item_at(this->size()), items, num * item_size()); + + RC rc = RC::SUCCESS; + PageNum this_page_num = this->page_num(); + BPPageHandle page_handle; + for (int i = 0; i < num; i++) { + const PageNum page_num = *(const PageNum *)((items + i * item_size()) + key_size()); + rc = disk_buffer_pool->get_this_page(file_id, page_num, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to set child's page num. child page num:%d, this page num=%d, rc=%d:%s", + page_num, this_page_num, rc, strrc(rc)); + return rc; + } + IndexNodeHandler child_node(header_, page_handle); + child_node.set_parent_page_num(this_page_num); + page_handle.mark_dirty(); + disk_buffer_pool->unpin_page(&page_handle); + } + increase_size(num); + return rc; +} + +RC InternalIndexNodeHandler::append(const char *item, DiskBufferPool *bp, int file_id) +{ + return this->copy_from(item, 1, bp, file_id); +} + +RC InternalIndexNodeHandler::preappend(const char *item, DiskBufferPool *bp, int file_id) +{ + PageNum child_page_num = *(PageNum *)(item + key_size()); + BPPageHandle page_handle; + RC rc = bp->get_this_page(file_id, child_page_num, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch child page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + IndexNodeHandler child_node(header_, page_handle); + child_node.set_parent_page_num(this->page_num()); + + page_handle.mark_dirty(); + bp->unpin_page(&page_handle); + + if (this->size() > 0) { + memmove(__item_at(1), __item_at(0), this->size() * item_size()); + } + + memcpy(__item_at(0), item, item_size()); + increase_size(1); + return RC::SUCCESS; +} + +char *InternalIndexNodeHandler::__item_at(int index) const +{ + return internal_node_->array + (index * item_size()); +} + +char *InternalIndexNodeHandler::__key_at(int index) const +{ + return __item_at(index); +} + +char *InternalIndexNodeHandler::__value_at(int index) const +{ + return __item_at(index) + key_size(); +} + +int InternalIndexNodeHandler::value_size() const +{ + return sizeof(PageNum); +} + +int InternalIndexNodeHandler::item_size() const +{ + return key_size() + this->value_size(); +} + +bool InternalIndexNodeHandler::validate(const KeyComparator &comparator, DiskBufferPool *bp, int file_id) const +{ + bool result = IndexNodeHandler::validate(); + if (false == result) { + return false; + } + + const int node_size = size(); + for (int i = 2; i < node_size; i++) { + if (comparator(__key_at(i - 1), __key_at(i)) >= 0) { + LOG_WARN("page number = %d, invalid key order. id1=%d,id2=%d, this=%s", + page_num(), i-1, i, to_string(*this).c_str()); + return false; + } + } + + for (int i = 0; result && i < node_size; i++) { + PageNum page_num = *(PageNum *)__value_at(i); + if (page_num < 0) { + LOG_WARN("this page num=%d, got invalid child page. page num=%d", this->page_num(), page_num); + } else { + BPPageHandle child_page_handle; + RC rc = bp->get_this_page(file_id, page_num, &child_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch child page while validate internal page. page num=%d, rc=%d:%s", + page_num, rc, strrc(rc)); + } else { + IndexNodeHandler child_node(header_, child_page_handle); + if (child_node.parent_page_num() != this->page_num()) { + LOG_WARN("child's parent page num is invalid. child page num=%d, parent page num=%d, this page num=%d", + child_node.page_num(), child_node.parent_page_num(), this->page_num()); + result = false; + } + bp->unpin_page(&child_page_handle); + } + } + } + + if (!result) { + return result; + } + + const PageNum parent_page_num = this->parent_page_num(); + if (parent_page_num == BP_INVALID_PAGE_NUM) { + return result; + } + + BPPageHandle parent_page_handle; + RC rc = bp->get_this_page(file_id, parent_page_num, &parent_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch parent page. page num=%d, rc=%d:%s", parent_page_num, rc, strrc(rc)); + return false; + } + + InternalIndexNodeHandler parent_node(header_, parent_page_handle); + int index_in_parent = parent_node.value_index(this->page_num()); + if (index_in_parent < 0) { + LOG_WARN("invalid internal node. cannot find index in parent. this page num=%d, parent page num=%d", + this->page_num(), parent_page_num); + bp->unpin_page(&parent_page_handle); + return false; + } + + if (0 != index_in_parent) { + int cmp_result = comparator(__key_at(1), parent_node.key_at(index_in_parent)); + if (cmp_result < 0) { + LOG_WARN("invalid internal node. the second item should be greate than or equal to parent item. " \ + "this page num=%d, parent page num=%d, index in parent=%d", + this->page_num(), parent_node.page_num(), index_in_parent); + bp->unpin_page(&parent_page_handle); + return false; + } + } + + if (index_in_parent < parent_node.size() - 1) { + int cmp_result = comparator(__key_at(size() - 1), parent_node.key_at(index_in_parent + 1)); + if (cmp_result >= 0) { + LOG_WARN("invalid internal node. last item should be less than the item at the first after item in parent." \ + "this page num=%d, parent page num=%d, parent item to compare=%d", + this->page_num(), parent_node.page_num(), index_in_parent + 1); + bp->unpin_page(&parent_page_handle); + return false; + } + } + bp->unpin_page(&parent_page_handle); + + return result; +} + +///////////////////////////////////////////////////////////////////////////////// + +RC BplusTreeHandler::sync() +{ + return disk_buffer_pool_->purge_all_pages(file_id_); +} + +RC BplusTreeHandler::create(const char *file_name, AttrType attr_type, int attr_length, + int internal_max_size /* = -1*/, int leaf_max_size /* = -1 */) +{ + DiskBufferPool *disk_buffer_pool = theGlobalDiskBufferPool(); + RC rc = disk_buffer_pool->create_file(file_name); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to create file. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); + return rc; + } + LOG_INFO("Successfully create index file:%s", file_name); + + int file_id; + rc = disk_buffer_pool->open_file(file_name, &file_id); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to open file. file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); + return rc; + } + LOG_INFO("Successfully open index file %s.", file_name); + + BPPageHandle header_page_handle; + rc = disk_buffer_pool->allocate_page(file_id, &header_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to allocate header page for bplus tree. rc=%d:%s", rc, strrc(rc)); + disk_buffer_pool->close_file(file_id); + return rc; + } + + if (header_page_handle.page_num() != FIRST_INDEX_PAGE) { + LOG_WARN("header page num should be %d but got %d. is it a new file : %s", + FIRST_INDEX_PAGE, header_page_handle.page_num(), file_name); + disk_buffer_pool->close_file(file_id); + return RC::INTERNAL; + } + + if (internal_max_size < 0) { + internal_max_size = calc_internal_page_capacity(attr_length); + } + if (leaf_max_size < 0) { + leaf_max_size = calc_leaf_page_capacity(attr_length); + } + char *pdata = header_page_handle.data(); + IndexFileHeader *file_header = (IndexFileHeader *)pdata; + file_header->attr_length = attr_length; + file_header->key_length = attr_length + sizeof(RID); + file_header->attr_type = attr_type; + file_header->internal_max_size = internal_max_size; + file_header->leaf_max_size = leaf_max_size; + file_header->root_page = BP_INVALID_PAGE_NUM; + + header_page_handle.mark_dirty(); + disk_buffer_pool->unpin_page(&header_page_handle); + + disk_buffer_pool_ = disk_buffer_pool; + file_id_ = file_id; + + memcpy(&file_header_, pdata, sizeof(file_header_)); + header_dirty_ = false; + + mem_pool_item_ = new common::MemPoolItem(file_name); + if (mem_pool_item_->init(file_header->key_length) < 0) { + LOG_WARN("Failed to init memory pool for index %s", file_name); + close(); + return RC::NOMEM; + } + + key_comparator_.init(file_header->attr_type, file_header->attr_length); + key_printer_.init(file_header->attr_type, file_header->attr_length); + LOG_INFO("Successfully create index %s", file_name); + return RC::SUCCESS; +} + +RC BplusTreeHandler::open(const char *file_name) +{ + if (file_id_ >= 0) { + LOG_WARN("%s has been opened before index.open.", file_name); + return RC::RECORD_OPENNED; + } + + DiskBufferPool *disk_buffer_pool = theGlobalDiskBufferPool(); + int file_id = 0; + RC rc = disk_buffer_pool->open_file(file_name, &file_id); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to open file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); + return rc; + } + + BPPageHandle page_handle; + rc = disk_buffer_pool->get_this_page(file_id, FIRST_INDEX_PAGE, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to get first page file name=%s, rc=%d:%s", file_name, rc, strrc(rc)); + disk_buffer_pool_->close_file(file_id); + return rc; + } + + char *pdata = page_handle.data(); + memcpy(&file_header_, pdata, sizeof(IndexFileHeader)); + header_dirty_ = false; + disk_buffer_pool_ = disk_buffer_pool; + file_id_ = file_id; + + mem_pool_item_ = new common::MemPoolItem(file_name); + if (mem_pool_item_->init(file_header_.key_length) < 0) { + LOG_WARN("Failed to init memory pool for index %s", file_name); + close(); + return RC::NOMEM; + } + + // close old page_handle + disk_buffer_pool->unpin_page(&page_handle); + + key_comparator_.init(file_header_.attr_type, file_header_.attr_length); + LOG_INFO("Successfully open index %s", file_name); + return RC::SUCCESS; +} + +RC BplusTreeHandler::close() +{ + if (file_id_ != -1) { + + disk_buffer_pool_->close_file(file_id_); + file_id_ = -1; + + delete mem_pool_item_; + mem_pool_item_ = nullptr; + } + + disk_buffer_pool_ = nullptr; + return RC::SUCCESS; +} + +RC BplusTreeHandler::print_leaf(BPPageHandle &page_handle) +{ + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + LOG_INFO("leaf node: %s", to_string(leaf_node, key_printer_).c_str()); + disk_buffer_pool_->unpin_page(&page_handle); + return RC::SUCCESS; +} + +RC BplusTreeHandler::print_internal_node_recursive(BPPageHandle &page_handle) +{ + RC rc = RC::SUCCESS; + LOG_INFO("bplus tree. file header: %s", file_header_.to_string().c_str()); + InternalIndexNodeHandler internal_node(file_header_, page_handle); + LOG_INFO("internal node: %s", to_string(internal_node, key_printer_).c_str()); + + int node_size = internal_node.size(); + for (int i = 0; i < node_size; i++) { + PageNum page_num = internal_node.value_at(i); + BPPageHandle child_page_handle; + rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &child_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch child page. page id=%d, rc=%d:%s", page_num, rc, strrc(rc)); + disk_buffer_pool_->unpin_page(&page_handle); + return rc; + } + + IndexNodeHandler node(file_header_, child_page_handle); + if (node.is_leaf()) { + rc = print_leaf(child_page_handle); + } else { + rc = print_internal_node_recursive(child_page_handle); + } + if (rc != RC::SUCCESS) { + LOG_WARN("failed to print node. page id=%d, rc=%d:%s", child_page_handle.page_num(), rc, strrc(rc)); + disk_buffer_pool_->unpin_page(&page_handle); + return rc; + } + } + + disk_buffer_pool_->unpin_page(&page_handle); + return RC::SUCCESS; +} + +RC BplusTreeHandler::print_tree() +{ + if (file_id_ < 0) { + LOG_WARN("Index hasn't been created or opened, fail to print"); + return RC::SUCCESS; + } + if (is_empty()) { + LOG_INFO("tree is empty"); + return RC::SUCCESS; + } + + BPPageHandle page_handle; + PageNum page_num = file_header_.root_page; + RC rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch page. page id=%d, rc=%d:%s", page_num, rc, strrc(rc)); + return rc; + } + + IndexNodeHandler node(file_header_, page_handle); + if (node.is_leaf()) { + rc = print_leaf(page_handle); + } else { + rc = print_internal_node_recursive(page_handle); + } + return rc; +} + +RC BplusTreeHandler::print_leafs() +{ + if (is_empty()) { + LOG_INFO("empty tree"); + return RC::SUCCESS; + } + + BPPageHandle page_handle; + + RC rc = left_most_page(page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to get left most page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + while (page_handle.page_num() != BP_INVALID_PAGE_NUM) { + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + LOG_INFO("leaf info: %s", to_string(leaf_node, key_printer_).c_str()); + + PageNum next_page_num = leaf_node.next_page(); + disk_buffer_pool_->unpin_page(&page_handle); + + if (next_page_num == BP_INVALID_PAGE_NUM) { + break; + } + + rc = disk_buffer_pool_->get_this_page(file_id_, next_page_num, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to get next page. page id=%d, rc=%d:%s", next_page_num, rc, strrc(rc)); + return rc; + } + } + return rc; +} + +bool BplusTreeHandler::validate_node_recursive(BPPageHandle &page_handle) +{ + bool result = true; + IndexNodeHandler node(file_header_, page_handle); + if (node.is_leaf()) { + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + result = leaf_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + } else { + InternalIndexNodeHandler internal_node(file_header_, page_handle); + result = internal_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + for (int i = 0; result && i < internal_node.size(); i++) { + PageNum page_num = internal_node.value_at(i); + BPPageHandle child_page_handle; + RC rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &child_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch child page.page id=%d, rc=%d:%s", page_num, rc, strrc(rc)); + result = false; + break; + } + + result = validate_node_recursive(child_page_handle); + } + } + + disk_buffer_pool_->unpin_page(&page_handle); + return result; +} + +bool BplusTreeHandler::validate_leaf_link() +{ + if (is_empty()) { + return true; + } + + BPPageHandle page_handle; + RC rc = left_most_page(page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch left most page. rc=%d:%s", rc, strrc(rc)); + return false; + } + + PageNum prev_page_num = BP_INVALID_PAGE_NUM; + + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + if (leaf_node.prev_page() != prev_page_num) { + LOG_WARN("invalid page. current_page_num=%d, prev page num should be %d but got %d", + page_handle.page_num(), prev_page_num, leaf_node.prev_page()); + return false; + } + PageNum next_page_num = leaf_node.next_page(); + + prev_page_num = page_handle.page_num(); + char *prev_key = (char *)mem_pool_item_->alloc(); + memcpy(prev_key, leaf_node.key_at(leaf_node.size() - 1), file_header_.key_length); + disk_buffer_pool_->unpin_page(&page_handle); + + bool result = true; + while (result && next_page_num != BP_INVALID_PAGE_NUM) { + rc = disk_buffer_pool_->get_this_page(file_id_, next_page_num, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch next page. page num=%d, rc=%d:%s", next_page_num, rc, strrc(rc)); + return false; + } + + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + if (leaf_node.prev_page() != prev_page_num) { + LOG_WARN("invalid page. current_page_num=%d, prev page num should be %d but got %d", + page_handle.page_num(), prev_page_num, leaf_node.prev_page()); + result = false; + } + if (key_comparator_(prev_key, leaf_node.key_at(0)) >= 0) { + LOG_WARN("invalid page. current first key is not bigger than last"); + result = false; + } + + next_page_num = leaf_node.next_page(); + memcpy(prev_key, leaf_node.key_at(leaf_node.size() - 1), file_header_.key_length); + prev_page_num = page_handle.page_num(); + disk_buffer_pool_->unpin_page(&page_handle); + } + + free_key(prev_key); + // can do more things + return result; +} + +bool BplusTreeHandler::validate_tree() +{ + if (is_empty()) { + return true; + } + + BPPageHandle page_handle; + RC rc = disk_buffer_pool_->get_this_page(file_id_, file_header_.root_page, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch root page. page id=%d, rc=%d:%s", file_header_.root_page, rc, strrc(rc)); + return rc; + } + + if (!validate_node_recursive(page_handle) || !validate_leaf_link()) { + LOG_WARN("Current B+ Tree is invalid"); + print_tree(); + return false; + } + + LOG_INFO("great! current tree is valid"); + return true; +} + +bool BplusTreeHandler::is_empty() const +{ + return file_header_.root_page == BP_INVALID_PAGE_NUM; +} + +RC BplusTreeHandler::find_leaf(const char *key, BPPageHandle &page_handle) +{ + return find_leaf_internal( + [&](InternalIndexNodeHandler &internal_node) { + return internal_node.value_at(internal_node.lookup(key_comparator_, key)); + }, + page_handle); +} + +RC BplusTreeHandler::left_most_page(BPPageHandle &page_handle) +{ + return find_leaf_internal( + [&](InternalIndexNodeHandler &internal_node) { + return internal_node.value_at(0); + }, + page_handle + ); +} +RC BplusTreeHandler::right_most_page(BPPageHandle &page_handle) +{ + return find_leaf_internal( + [&](InternalIndexNodeHandler &internal_node) { + return internal_node.value_at(internal_node.size() - 1); + }, + page_handle + ); +} + +RC BplusTreeHandler::find_leaf_internal(const std::function &child_page_getter, + BPPageHandle &page_handle) +{ + if (is_empty()) { + return RC::EMPTY; + } + + RC rc = disk_buffer_pool_->get_this_page(file_id_, file_header_.root_page, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch root page. page id=%d, rc=%d:%s", file_header_.root_page, rc, strrc(rc)); + return rc; + } + + IndexNode *node = (IndexNode *)page_handle.data(); + while (false == node->is_leaf) { + InternalIndexNodeHandler internal_node(file_header_, page_handle); + PageNum page_num = child_page_getter(internal_node); + + disk_buffer_pool_->unpin_page(&page_handle); + + rc = disk_buffer_pool_->get_this_page(file_id_, page_num, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to load page file_id:%d, page_num:%d", file_id_, page_num); + return rc; + } + node = (IndexNode *)page_handle.data(); + } + + return RC::SUCCESS; +} + +RC BplusTreeHandler::insert_entry_into_leaf_node(BPPageHandle &page_handle, const char *key, const RID *rid) +{ + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + bool exists = false; + int insert_position = leaf_node.lookup(key_comparator_, key, &exists); + if (exists) { + LOG_TRACE("entry exists"); + return RC::RECORD_DUPLICATE_KEY; + } + + if (leaf_node.size() < leaf_node.max_size()) { + leaf_node.insert(insert_position, key, (const char *)rid); + page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&page_handle); + return RC::SUCCESS; + } + + BPPageHandle new_page_handle; + RC rc = split(page_handle, new_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to split leaf node. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + LeafIndexNodeHandler new_index_node(file_header_, new_page_handle); + new_index_node.set_prev_page(page_handle.page_num()); + new_index_node.set_next_page(leaf_node.next_page()); + new_index_node.set_parent_page_num(leaf_node.parent_page_num()); + leaf_node.set_next_page(new_page_handle.page_num()); + + PageNum next_page_num = new_index_node.next_page(); + if (next_page_num != BP_INVALID_PAGE_NUM) { + BPPageHandle next_page_handle; + rc = disk_buffer_pool_->get_this_page(file_id_, next_page_num, &next_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch next page. page num=%d, rc=%d:%s", next_page_num, rc, strrc(rc)); + return rc; + } + + LeafIndexNodeHandler next_node(file_header_, next_page_handle); + next_node.set_prev_page(new_page_handle.page_num()); + disk_buffer_pool_->unpin_page(&next_page_handle); + } + + if (insert_position < leaf_node.size()) { + leaf_node.insert(insert_position, key, (const char *)rid); + } else { + new_index_node.insert(insert_position - leaf_node.size(), key, (const char *)rid); + } + + return insert_entry_into_parent(page_handle, new_page_handle, new_index_node.key_at(0)); +} + +RC BplusTreeHandler::insert_entry_into_parent(BPPageHandle &page_handle, BPPageHandle &new_page_handle, const char *key) +{ + RC rc = RC::SUCCESS; + + IndexNodeHandler node_handler(file_header_, page_handle); + IndexNodeHandler new_node_handler(file_header_, new_page_handle); + PageNum parent_page_num = node_handler.parent_page_num(); + + if (parent_page_num == BP_INVALID_PAGE_NUM) { + + // create new root page + BPPageHandle root_page; + rc = disk_buffer_pool_->allocate_page(file_id_, &root_page); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to allocate new root page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + InternalIndexNodeHandler root_node(file_header_, root_page); + root_node.init_empty(); + root_node.create_new_root(page_handle.page_num(), key, new_page_handle.page_num()); + node_handler.set_parent_page_num(root_page.page_num()); + new_node_handler.set_parent_page_num(root_page.page_num()); + + page_handle.mark_dirty(); + new_page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&page_handle); + disk_buffer_pool_->unpin_page(&new_page_handle); + + file_header_.root_page = root_page.page_num(); + update_root_page_num(); // TODO + root_page.mark_dirty(); + disk_buffer_pool_->unpin_page(&root_page); + + return RC::SUCCESS; + + } else { + + BPPageHandle parent_page_handle; + rc = disk_buffer_pool_->get_this_page(file_id_, parent_page_num, &parent_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to insert entry into leaf. rc=%d:%s", rc, strrc(rc)); + // should do more things to recover + return rc; + } + + InternalIndexNodeHandler node(file_header_, parent_page_handle); + + /// current node is not in full mode, insert the entry and return + if (node.size() < node.max_size()) { + node.insert(key, new_page_handle.page_num(), key_comparator_); + new_node_handler.set_parent_page_num(parent_page_num); + + page_handle.mark_dirty(); + new_page_handle.mark_dirty(); + parent_page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&page_handle); + disk_buffer_pool_->unpin_page(&new_page_handle); + disk_buffer_pool_->unpin_page(&parent_page_handle); + + } else { + + // we should split the node and insert the entry and then insert new entry to current node's parent + BPPageHandle new_parent_page_handle; + rc = split(parent_page_handle, new_parent_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to split internal node. rc=%d:%s", rc, strrc(rc)); + disk_buffer_pool_->unpin_page(&page_handle); + disk_buffer_pool_->unpin_page(&new_page_handle); + disk_buffer_pool_->unpin_page(&parent_page_handle); + } else { + // insert into left or right ? decide by key compare result + InternalIndexNodeHandler new_node(file_header_, new_parent_page_handle); + if (key_comparator_(key, new_node.key_at(0)) > 0) { + new_node.insert(key, new_page_handle.page_num(), key_comparator_); + new_node_handler.set_parent_page_num(new_node.page_num()); + } else { + node.insert(key, new_page_handle.page_num(), key_comparator_); + new_node_handler.set_parent_page_num(node.page_num()); + } + + disk_buffer_pool_->unpin_page(&page_handle); + disk_buffer_pool_->unpin_page(&new_page_handle); + + rc = insert_entry_into_parent(parent_page_handle, new_parent_page_handle, new_node.key_at(0)); + } + } + } + return rc; +} + +/** + * split one full node into two + * @param page_handle[inout] the node to split + * @param new_page_handle[out] the new node after split + * @param intert_position the intert position of new key + */ +template +RC BplusTreeHandler::split(BPPageHandle &page_handle, BPPageHandle &new_page_handle) +{ + IndexNodeHandlerType old_node(file_header_, page_handle); + + char *new_parent_key = (char *)mem_pool_item_->alloc(); + if (new_parent_key == nullptr) { + LOG_WARN("Failed to alloc memory for new key. size=%d", file_header_.key_length); + return RC::NOMEM; + } + + // add a new node + RC rc = disk_buffer_pool_->allocate_page(file_id_, &new_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to split index page due to failed to allocate page, file_id:%d. rc=%d:%s", file_id_, rc, strrc(rc)); + return rc; + } + + IndexNodeHandlerType new_node(file_header_, new_page_handle); + new_node.init_empty(); + new_node.set_parent_page_num(old_node.parent_page_num()); + + old_node.move_half_to(new_node, disk_buffer_pool_, file_id_); + + page_handle.mark_dirty(); + new_page_handle.mark_dirty(); + return RC::SUCCESS; +} + +RC BplusTreeHandler::update_root_page_num() +{ + BPPageHandle header_page_handle; + RC rc = disk_buffer_pool_->get_this_page(file_id_, FIRST_INDEX_PAGE, &header_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch header page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + IndexFileHeader *header = (IndexFileHeader *)header_page_handle.data(); + header->root_page = file_header_.root_page; + header_page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&header_page_handle); + return rc; +} + + +RC BplusTreeHandler::create_new_tree(const char *key, const RID *rid) +{ + RC rc = RC::SUCCESS; + if (file_header_.root_page != BP_INVALID_PAGE_NUM) { + rc = RC::INTERNAL; + LOG_WARN("cannot create new tree while root page is valid. root page id=%d", file_header_.root_page); + return rc; + } + + BPPageHandle page_handle; + rc = disk_buffer_pool_->allocate_page(file_id_, &page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to allocate root page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + LeafIndexNodeHandler leaf_node(file_header_, page_handle); + leaf_node.init_empty(); + leaf_node.insert(0, key, (const char *)rid); + file_header_.root_page = page_handle.page_num(); + page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&page_handle); + + rc = update_root_page_num(); + // disk_buffer_pool_->check_all_pages_unpinned(file_id_); + return rc; +} + +char *BplusTreeHandler::make_key(const char *user_key, const RID &rid) +{ + char *key = (char *)mem_pool_item_->alloc(); + if (key == nullptr) { + LOG_WARN("Failed to alloc memory for key. file_id:%d", file_id_); + return nullptr; + } + memcpy(key, user_key, file_header_.attr_length); + memcpy(key + file_header_.attr_length, &rid, sizeof(rid)); + return key; +} + +void BplusTreeHandler::free_key(char *key) +{ + mem_pool_item_->free(key); +} + +RC BplusTreeHandler::insert_entry(const char *user_key, const RID *rid) +{ + if (file_id_ < 0) { + LOG_WARN("Index isn't ready!"); + return RC::RECORD_CLOSED; + } + + if (user_key == nullptr || rid == nullptr) { + LOG_WARN("Invalid arguments, key is empty or rid is empty"); + return RC::INVALID_ARGUMENT; + } + + char *key = make_key(user_key, *rid); + if (key == nullptr) { + LOG_WARN("Failed to alloc memory for key. file_id:%d", file_id_); + return RC::NOMEM; + } + + if (is_empty()) { + return create_new_tree(key, rid); + } + + BPPageHandle page_handle; + RC rc = find_leaf(key, page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to find leaf file_id:%d, %s. rc=%d:%s", file_id_, rid->to_string().c_str(), rc, strrc(rc)); + mem_pool_item_->free(key); + return rc; + } + + rc = insert_entry_into_leaf_node(page_handle, key, rid); + if (rc != RC::SUCCESS) { + LOG_TRACE("Failed to insert into leaf of index %d, rid:%s", file_id_, rid->to_string().c_str()); + disk_buffer_pool_->unpin_page(&page_handle); + mem_pool_item_->free(key); + // disk_buffer_pool_->check_all_pages_unpinned(file_id_); + return rc; + } + + mem_pool_item_->free(key); + LOG_TRACE("insert entry success"); + // disk_buffer_pool_->check_all_pages_unpinned(file_id_); + return RC::SUCCESS; +} + +RC BplusTreeHandler::get_entry(const char *user_key, std::list &rids) +{ + if (file_id_ < 0) { + LOG_WARN("Index isn't ready!"); + return RC::RECORD_CLOSED; + } + + LOG_INFO("before get entry"); + disk_buffer_pool_->check_all_pages_unpinned(file_id_); + + BplusTreeScanner scanner(*this); + RC rc = scanner.open(user_key, true/*left_inclusive*/, user_key, true/*right_inclusive*/); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to open scanner. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + RID rid; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + rids.push_back(rid); + } + + scanner.close(); + if (rc != RC::RECORD_EOF) { + LOG_WARN("scanner return error. rc=%d:%s", rc, strrc(rc)); + } else { + rc = RC::SUCCESS; + } + LOG_INFO("after get entry"); + disk_buffer_pool_->check_all_pages_unpinned(file_id_); + return rc; +} + +RC BplusTreeHandler::adjust_root(BPPageHandle &root_page_handle) +{ + IndexNodeHandler root_node(file_header_, root_page_handle); + if (root_node.is_leaf() && root_node.size() > 0) { + root_page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&root_page_handle); + return RC::SUCCESS; + } + + if (root_node.is_leaf()) { + // this is a leaf and an empty node + file_header_.root_page = BP_INVALID_PAGE_NUM; + } else { + // this is an internal node and has only one child node + InternalIndexNodeHandler internal_node(file_header_, root_page_handle); + + const PageNum child_page_num = internal_node.value_at(0); + BPPageHandle child_page_handle; + RC rc = disk_buffer_pool_->get_this_page(file_id_, child_page_num, &child_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch child page. page num=%d, rc=%d:%s", child_page_num, rc, strrc(rc)); + return rc; + } + + IndexNodeHandler child_node(file_header_, child_page_handle); + child_node.set_parent_page_num(BP_INVALID_PAGE_NUM); + disk_buffer_pool_->unpin_page(&child_page_handle); + + file_header_.root_page = child_page_num; + } + + update_root_page_num(); + + PageNum old_root_page_num = root_page_handle.page_num(); + disk_buffer_pool_->unpin_page(&root_page_handle); + disk_buffer_pool_->dispose_page(file_id_, old_root_page_num); + return RC::SUCCESS; +} +template +RC BplusTreeHandler::coalesce_or_redistribute(BPPageHandle &page_handle) +{ + IndexNodeHandlerType index_node(file_header_, page_handle); + if (index_node.size() >= index_node.min_size()) { + disk_buffer_pool_->unpin_page(&page_handle); + return RC::SUCCESS; + } + + const PageNum parent_page_num = index_node.parent_page_num(); + if (BP_INVALID_PAGE_NUM == parent_page_num) { + // this is the root page + if (index_node.size() > 1) { + disk_buffer_pool_->unpin_page(&page_handle); + } else { + // adjust the root node + adjust_root(page_handle); + } + return RC::SUCCESS; + } + + BPPageHandle parent_page_handle; + RC rc = disk_buffer_pool_->get_this_page(file_id_, parent_page_num, &parent_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch parent page. page id=%d, rc=%d:%s", parent_page_num, rc, strrc(rc)); + disk_buffer_pool_->unpin_page(&page_handle); + return rc; + } + + InternalIndexNodeHandler parent_index_node(file_header_, parent_page_handle); + int index = parent_index_node.lookup(key_comparator_, index_node.key_at(index_node.size() - 1)); + if (parent_index_node.value_at(index) != page_handle.page_num()) { + LOG_ERROR("lookup return an invalid value. index=%d, this page num=%d, but got %d", + index, page_handle.page_num(), parent_index_node.value_at(index)); + } + PageNum neighbor_page_num; + if (index == 0) { + neighbor_page_num = parent_index_node.value_at(1); + } else { + neighbor_page_num = parent_index_node.value_at(index - 1); + } + + BPPageHandle neighbor_page_handle; + rc = disk_buffer_pool_->get_this_page(file_id_, neighbor_page_num, &neighbor_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch neighbor page. page id=%d, rc=%d:%s", neighbor_page_num, rc, strrc(rc)); + // TODO do more thing to release resource + disk_buffer_pool_->unpin_page(&page_handle); + disk_buffer_pool_->unpin_page(&parent_page_handle); + return rc; + } + + IndexNodeHandlerType neighbor_node(file_header_, neighbor_page_handle); + if (index_node.size() + neighbor_node.size() > index_node.max_size()) { + rc = redistribute(neighbor_page_handle, page_handle, parent_page_handle, index); + } else { + rc = coalesce(neighbor_page_handle, page_handle, parent_page_handle, index); + } + return rc; +} + +template +RC BplusTreeHandler::coalesce(BPPageHandle &neighbor_page_handle, BPPageHandle &page_handle, + BPPageHandle &parent_page_handle, int index) +{ + IndexNodeHandlerType neighbor_node(file_header_, neighbor_page_handle); + IndexNodeHandlerType node(file_header_, page_handle); + + InternalIndexNodeHandler parent_node(file_header_, parent_page_handle); + + BPPageHandle *left_page_handle = nullptr; + BPPageHandle *right_page_handle = nullptr; + if (index == 0) { + // neighbor node is at right + left_page_handle = &page_handle; + right_page_handle = &neighbor_page_handle; + index++; + } else { + left_page_handle = &neighbor_page_handle; + right_page_handle = &page_handle; + // neighbor is at left + } + + IndexNodeHandlerType left_node(file_header_, *left_page_handle); + IndexNodeHandlerType right_node(file_header_, *right_page_handle); + + parent_node.remove(index); + // parent_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + RC rc = right_node.move_to(left_node, disk_buffer_pool_, file_id_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to move right node to left. rc=%d:%s", rc, strrc(rc)); + return rc; + } + // left_node.validate(key_comparator_); + + if (left_node.is_leaf()) { + LeafIndexNodeHandler left_leaf_node(file_header_, *left_page_handle); + LeafIndexNodeHandler right_leaf_node(file_header_, *right_page_handle); + left_leaf_node.set_next_page(right_leaf_node.next_page()); + + PageNum next_right_page_num = right_leaf_node.next_page(); + if (next_right_page_num != BP_INVALID_PAGE_NUM) { + BPPageHandle next_right_page_handle; + rc = disk_buffer_pool_->get_this_page(file_id_, next_right_page_num, &next_right_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch next right page. page number:%d. rc=%d:%s", next_right_page_num, rc, strrc(rc)); + disk_buffer_pool_->unpin_page(&page_handle); + disk_buffer_pool_->unpin_page(&neighbor_page_handle); + disk_buffer_pool_->unpin_page(&parent_page_handle); + return rc; + } + + LeafIndexNodeHandler next_right_node(file_header_, next_right_page_handle); + next_right_node.set_prev_page(left_node.page_num()); + disk_buffer_pool_->unpin_page(&next_right_page_handle); + } + + } + + PageNum right_page_num = right_page_handle->page_num(); + disk_buffer_pool_->unpin_page(left_page_handle); + disk_buffer_pool_->unpin_page(right_page_handle); + disk_buffer_pool_->dispose_page(file_id_, right_page_num); + return coalesce_or_redistribute(parent_page_handle); +} + +template +RC BplusTreeHandler::redistribute(BPPageHandle &neighbor_page_handle, BPPageHandle &page_handle, + BPPageHandle &parent_page_handle, int index) +{ + InternalIndexNodeHandler parent_node(file_header_, parent_page_handle); + IndexNodeHandlerType neighbor_node(file_header_, neighbor_page_handle); + IndexNodeHandlerType node(file_header_, page_handle); + if (neighbor_node.size() < node.size()) { + LOG_ERROR("got invalid nodes. neighbor node size %d, this node size %d", + neighbor_node.size(), node.size()); + } + if (index == 0) { + // the neighbor is at right + neighbor_node.move_first_to_end(node, disk_buffer_pool_, file_id_); + // neighbor_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + // node.validate(key_comparator_, disk_buffer_pool_, file_id_); + parent_node.set_key_at(index + 1, neighbor_node.key_at(0)); + // parent_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + } else { + // the neighbor is at left + neighbor_node.move_last_to_front(node, disk_buffer_pool_, file_id_); + // neighbor_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + // node.validate(key_comparator_, disk_buffer_pool_, file_id_); + parent_node.set_key_at(index, node.key_at(0)); + // parent_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + } + + neighbor_page_handle.mark_dirty(); + page_handle.mark_dirty(); + parent_page_handle.mark_dirty(); + disk_buffer_pool_->unpin_page(&parent_page_handle); + disk_buffer_pool_->unpin_page(&neighbor_page_handle); + disk_buffer_pool_->unpin_page(&page_handle); + return RC::SUCCESS; +} + +RC BplusTreeHandler::delete_entry_internal(BPPageHandle &leaf_page_handle, const char *key) +{ + LeafIndexNodeHandler leaf_index_node(file_header_, leaf_page_handle); + + const int remove_count = leaf_index_node.remove(key, key_comparator_); + if (remove_count == 0) { + LOG_TRACE("no data to remove"); + disk_buffer_pool_->unpin_page(&leaf_page_handle); + return RC::RECORD_RECORD_NOT_EXIST; + } + // leaf_index_node.validate(key_comparator_, disk_buffer_pool_, file_id_); + + leaf_page_handle.mark_dirty(); + + if (leaf_index_node.size() >= leaf_index_node.min_size()) { + disk_buffer_pool_->unpin_page(&leaf_page_handle); + return RC::SUCCESS; + } + + return coalesce_or_redistribute(leaf_page_handle); +} + +RC BplusTreeHandler::delete_entry(const char *user_key, const RID *rid) +{ + if (file_id_ < 0) { + LOG_WARN("Failed to delete index entry, due to index is't ready"); + return RC::RECORD_CLOSED; + } + + char *key = (char *)mem_pool_item_->alloc(); + if (nullptr == key) { + LOG_WARN("Failed to alloc memory for key. size=%d", file_header_.key_length); + return RC::NOMEM; + } + memcpy(key, user_key, file_header_.attr_length); + memcpy(key + file_header_.attr_length, rid, sizeof(*rid)); + + LOG_INFO("before delete"); + disk_buffer_pool_->check_all_pages_unpinned(file_id_); + BPPageHandle leaf_page_handle; + RC rc = find_leaf(key, leaf_page_handle); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to find leaf page. rc =%d:%s", rc, strrc(rc)); + mem_pool_item_->free(key); + return rc; + } + rc = delete_entry_internal(leaf_page_handle, key); + if (rc != RC::SUCCESS) { + LOG_WARN("Failed to delete index %d", file_id_); + mem_pool_item_->free(key); + return rc; + } + mem_pool_item_->free(key); + LOG_INFO("after delete"); + disk_buffer_pool_->check_all_pages_unpinned(file_id_); + return RC::SUCCESS; +} + +BplusTreeScanner::BplusTreeScanner(BplusTreeHandler &tree_handler) : tree_handler_(tree_handler) +{} + +BplusTreeScanner::~BplusTreeScanner() +{ + close(); +} + +RC BplusTreeScanner::open(const char *left_user_key, bool left_inclusive, + const char *right_user_key, bool right_inclusive) +{ + RC rc = RC::SUCCESS; + if (inited_) { + LOG_WARN("tree scanner has been inited"); + return RC::INTERNAL; + } + + inited_ = true; + + // 校验输入的键值是否是合法范围 + if (left_user_key && right_user_key) { + const auto &attr_comparator = tree_handler_.key_comparator_.attr_comparator(); + const int result = attr_comparator(left_user_key, right_user_key); + if (result > 0 || // left < right + // left == right but is (left,right)/[left,right) or (left,right] + (result == 0 && (left_inclusive == false || right_inclusive == false))) { + return RC::INVALID_ARGUMENT; + } + } + + if (nullptr == left_user_key) { + rc = tree_handler_.left_most_page(left_page_handle_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to find left most page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + iter_index_ = 0; + } else { + char *left_key = nullptr; + if (left_inclusive) { + left_key = tree_handler_.make_key(left_user_key, *RID::min()); + } else { + left_key = tree_handler_.make_key(left_user_key, *RID::max()); + } + rc = tree_handler_.find_leaf(left_key, left_page_handle_); + + if (rc != RC::SUCCESS) { + LOG_WARN("failed to find left page. rc=%d:%s", rc, strrc(rc)); + tree_handler_.free_key(left_key); + return rc; + } + LeafIndexNodeHandler left_node(tree_handler_.file_header_, left_page_handle_); + int left_index = left_node.lookup(tree_handler_.key_comparator_, left_key); + tree_handler_.free_key(left_key); + // lookup 返回的是适合插入的位置,还需要判断一下是否在合适的边界范围内 + if (left_index >= left_node.size()) { // 超出了当前页,就需要向后移动一个位置 + const PageNum next_page_num = left_node.next_page(); + if (next_page_num == BP_INVALID_PAGE_NUM) { // 这里已经是最后一页,说明当前扫描,没有数据 + return RC::SUCCESS; + } + + tree_handler_.disk_buffer_pool_->unpin_page(&left_page_handle_); + rc = tree_handler_.disk_buffer_pool_->get_this_page(tree_handler_.file_id_, next_page_num, &left_page_handle_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch next page. page num=%d, rc=%d:%s", next_page_num, rc, strrc(rc)); + return rc; + } + + left_index = 0; + } + iter_index_ = left_index; + } + + // 没有指定右边界范围,那么就返回右边界最大值 + if (nullptr == right_user_key) { + rc = tree_handler_.right_most_page(right_page_handle_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch right most page. rc=%d:%s", rc, strrc(rc)); + return rc; + } + + LeafIndexNodeHandler node(tree_handler_.file_header_, right_page_handle_); + end_index_ = node.size() - 1; + } else { + + char *right_key = nullptr; + if (right_inclusive) { + right_key = tree_handler_.make_key(right_user_key, *RID::max()); + } else { + right_key = tree_handler_.make_key(right_user_key, *RID::min()); + } + + rc = tree_handler_.find_leaf(right_key, right_page_handle_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to find left page. rc=%d:%s", rc, strrc(rc)); + tree_handler_.free_key(right_key); + return rc; + } + + LeafIndexNodeHandler right_node(tree_handler_.file_header_, right_page_handle_); + int right_index = right_node.lookup(tree_handler_.key_comparator_, right_key); + tree_handler_.free_key(right_key); + // lookup 返回的是适合插入的位置,需要根据实际情况做调整 + // 通常情况下需要找到上一个位置 + if (right_index > 0) { + right_index--; + } else { + // 实际上,只有最左边的叶子节点查找时,lookup 才可能返回0 + // 其它的叶子节点都不可能返回0,所以这段逻辑其实是可以简化的 + const PageNum prev_page_num = right_node.prev_page(); + if (prev_page_num == BP_INVALID_PAGE_NUM) { + end_index_ = -1; + return RC::SUCCESS; + } + + tree_handler_.disk_buffer_pool_->unpin_page(&right_page_handle_); + rc = tree_handler_.disk_buffer_pool_->get_this_page(tree_handler_.file_id_, prev_page_num, &right_page_handle_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch prev page num. page num=%d, rc=%d:%s", prev_page_num, rc, strrc(rc)); + return rc; + } + + LeafIndexNodeHandler tmp_node(tree_handler_.file_header_, right_page_handle_); + right_index = tmp_node.size() - 1; + } + end_index_ = right_index; + } + + // 判断是否左边界比右边界要靠后 + // 两个边界最多会多一页 + // 查找不存在的元素,或者不存在的范围数据时,可能会存在这个问题 + if (left_page_handle_.page_num() == right_page_handle_.page_num() && + iter_index_ > end_index_) { + end_index_ = -1; + } else { + LeafIndexNodeHandler left_node(tree_handler_.file_header_, left_page_handle_); + LeafIndexNodeHandler right_node(tree_handler_.file_header_, right_page_handle_); + if (left_node.prev_page() == right_node.page_num()) { + end_index_ = -1; + } + } + return RC::SUCCESS; +} + +RC BplusTreeScanner::next_entry(RID *rid) +{ + if (-1 == end_index_) { + return RC::RECORD_EOF; + } + + LeafIndexNodeHandler node(tree_handler_.file_header_, left_page_handle_); + memcpy(rid, node.value_at(iter_index_), sizeof(*rid)); + + if (left_page_handle_.page_num() == right_page_handle_.page_num() && + iter_index_ == end_index_) { + end_index_ = -1; + return RC::SUCCESS; + } + + if (iter_index_ < node.size() - 1) { + ++iter_index_; + return RC::SUCCESS; + } + + RC rc = RC::SUCCESS; + if (left_page_handle_.page_num() != right_page_handle_.page_num()) { + PageNum page_num = node.next_page(); + tree_handler_.disk_buffer_pool_->unpin_page(&left_page_handle_); + if (page_num == BP_INVALID_PAGE_NUM) { + LOG_WARN("got invalid next page. page num=%d", page_num); + rc = RC::INTERNAL; + } else { + rc = tree_handler_.disk_buffer_pool_->get_this_page(tree_handler_.file_id_, page_num, &left_page_handle_); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to fetch next page. page num=%d, rc=%d:%s", page_num, rc, strrc(rc)); + return rc; + } + + iter_index_ = 0; + } + } else if (end_index_ != -1) { + LOG_WARN("should have more pages but not. left page=%d, right page=%d", + left_page_handle_.page_num(), right_page_handle_.page_num()); + rc = RC::INTERNAL; + } + return rc; +} + +RC BplusTreeScanner::close() +{ + if (left_page_handle_.open) { + tree_handler_.disk_buffer_pool_->unpin_page(&left_page_handle_); + } + if (right_page_handle_.open) { + tree_handler_.disk_buffer_pool_->unpin_page(&right_page_handle_); + } + end_index_ = -1; + inited_ = false; + LOG_INFO("bplus tree scanner closed"); + return RC::SUCCESS; +} diff --git a/src/observer/storage/index/bplus_tree.h b/src/observer/storage/index/bplus_tree.h new file mode 100644 index 0000000000000000000000000000000000000000..3700f40128b9df73178e485ccf03db36de257a9d --- /dev/null +++ b/src/observer/storage/index/bplus_tree.h @@ -0,0 +1,528 @@ +/* Copyright (c) 2021 Xie Meiyi(xiemeiyi@hust.edu.cn) and OceanBase and/or its affiliates. All rights reserved. +miniob is licensed under Mulan PSL v2. +You can use this software according to the terms and conditions of the Mulan PSL v2. +You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 +THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +See the Mulan PSL v2 for more details. */ + +// +// +// Created by Xie Meiyi +// Rewritten by Longda & Wangyunlai +// +// +#ifndef __OBSERVER_STORAGE_COMMON_INDEX_MANAGER_H_ +#define __OBSERVER_STORAGE_COMMON_INDEX_MANAGER_H_ + +#include +#include + +#include "storage/common/record_manager.h" +#include "storage/default/disk_buffer_pool.h" +#include "sql/parser/parse_defs.h" + +#define EMPTY_RID_PAGE_NUM -1 +#define EMPTY_RID_SLOT_NUM -1 + +class AttrComparator +{ +public: + void init(AttrType type, int length) + { + attr_type_ = type; + attr_length_ = length; + } + + int attr_length() const { + return attr_length_; + } + + int operator()(const char *v1, const char *v2) const { + switch (attr_type_) { + case INTS: { + return *(int *)v1 - *(int *)v2; + } + break; + case FLOATS: { + float result = *(float *)v1 - *(float *)v2; + if (-1e-6 < result && result < 1e-6) { + return 0; + } + return result > 0 ? 1 : -1; + } + case CHARS: { + return strncmp(v1, v2, attr_length_); + } + default:{ + LOG_ERROR("unknown attr type. %d", attr_type_); + abort(); + } + } + } +private: + AttrType attr_type_; + int attr_length_; +}; + +class KeyComparator +{ +public: + void init(AttrType type, int length) + { + attr_comparator_.init(type, length); + } + + const AttrComparator &attr_comparator() const { + return attr_comparator_; + } + + int operator() (const char *v1, const char *v2) const { + int result = attr_comparator_(v1, v2); + if (result != 0) { + return result; + } + + const RID *rid1 = (const RID *)(v1 + attr_comparator_.attr_length()); + const RID *rid2 = (const RID *)(v2 + attr_comparator_.attr_length()); + return RID::compare(rid1, rid2); + } + +private: + AttrComparator attr_comparator_; +}; + +class AttrPrinter +{ +public: + void init(AttrType type, int length) + { + attr_type_ = type; + attr_length_ = length; + } + + int attr_length() const { + return attr_length_; + } + + std::string operator()(const char *v) const { + switch (attr_type_) { + case INTS: { + return std::to_string(*(int*)v); + } + break; + case FLOATS: { + return std::to_string(*(float*)v); + } + case CHARS: { + return std::string(v, attr_length_); + } + default:{ + LOG_ERROR("unknown attr type. %d", attr_type_); + abort(); + } + } + } +private: + AttrType attr_type_; + int attr_length_; +}; + +class KeyPrinter +{ +public: + void init(AttrType type, int length) + { + attr_printer_.init(type, length); + } + + const AttrPrinter &attr_printer() const { + return attr_printer_; + } + + std::string operator() (const char *v) const { + std::stringstream ss; + ss << "{key:" << attr_printer_(v) << ","; + + const RID *rid = (const RID *)(v + attr_printer_.attr_length()); + ss << "rid:{" << rid->to_string() << "}}"; + return ss.str(); + } + +private: + AttrPrinter attr_printer_; +}; + +/** + * the meta information of bplus tree + * this is the first page of bplus tree. + * only one field can be supported, can you extend it to multi-fields? + */ +struct IndexFileHeader { + IndexFileHeader() + { + memset(this, 0, sizeof(IndexFileHeader)); + root_page = BP_INVALID_PAGE_NUM; + } + PageNum root_page; + int32_t internal_max_size; + int32_t leaf_max_size; + int32_t attr_length; + int32_t key_length; // attr length + sizeof(RID) + AttrType attr_type; + + const std::string to_string() + { + std::stringstream ss; + + ss << "attr_length:" << attr_length << "," + << "key_length:" << key_length << "," + << "attr_type:" << attr_type << "," + << "root_page:" << root_page << "," + << "internal_max_size:" << internal_max_size << "," + << "leaf_max_size:" << leaf_max_size << ";"; + + return ss.str(); + } +}; + +#define RECORD_RESERVER_PAIR_NUM 2 +/** + * the common part of page describtion of bplus tree + * storage format: + * | page type | item number | parent page id | + */ +struct IndexNode { + static constexpr int HEADER_SIZE = 12; + + bool is_leaf; + int key_num; + PageNum parent; +}; + +/** + * leaf page of bplus tree + * storage format: + * | common header | prev page id | next page id | + * | key0, rid0 | key1, rid1 | ... | keyn, ridn | + * + * the key is in format: the key value of record and rid. + * so the key in leaf page must be unique. + * the value is rid. + * can you implenment a cluster index ? + */ +struct LeafIndexNode : public IndexNode { + static constexpr int HEADER_SIZE = IndexNode::HEADER_SIZE + 8; + + PageNum prev_brother; + PageNum next_brother; + /** + * leaf can store order keys and rids at most + */ + char array[0]; +}; + +/** + * internal page of bplus tree + * storage format: + * | common header | + * | key(0),page_id(0) | key(1), page_id(1) | ... | key(n), page_id(n) | + * + * the first key is ignored(key0). + * so it will waste space, can you fix this? + */ +struct InternalIndexNode : public IndexNode { + static constexpr int HEADER_SIZE = IndexNode::HEADER_SIZE; + + /** + * internal node just store order -1 keys and order rids, the last rid is last rght child. + */ + char array[0]; +}; + +class IndexNodeHandler { +public: + IndexNodeHandler(const IndexFileHeader &header, BPPageHandle &page_handle); + + void init_empty(bool leaf); + + bool is_leaf() const; + int key_size() const; + int value_size() const; + int item_size() const; + + void increase_size(int n); + int size() const; + void set_parent_page_num(PageNum page_num); + PageNum parent_page_num() const; + + PageNum page_num() const; + + bool validate() const; + + friend std::string to_string(const IndexNodeHandler &handler); + +protected: + const IndexFileHeader &header_; + PageNum page_num_; + IndexNode *node_; +}; + +class LeafIndexNodeHandler : public IndexNodeHandler { +public: + LeafIndexNodeHandler(const IndexFileHeader &header, BPPageHandle &page_handle); + + void init_empty(); + void set_next_page(PageNum page_num); + void set_prev_page(PageNum page_num); + PageNum next_page() const; + PageNum prev_page() const; + + char *key_at(int index); + char *value_at(int index); + + /** + * 查找指定key的插入位置(注意不是key本身) + * 如果key已经存在,会设置found的值 + * NOTE: 当前lookup的实现效率非常低,你是否可以优化它? + */ + int lookup(const KeyComparator &comparator, const char *key, bool *found = nullptr) const; + + void insert(int index, const char *key, const char *value); + void remove(int index); + int remove(const char *key, const KeyComparator &comparator); + RC move_half_to(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id); + RC move_first_to_end(LeafIndexNodeHandler &other, DiskBufferPool *disk_buffer_pool, int file_id); + RC move_last_to_front(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id); + /** + * move all items to left page + */ + RC move_to(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id); + + int max_size() const; + int min_size() const; + + bool validate(const KeyComparator &comparator, DiskBufferPool *bp, int file_id) const; + + friend std::string to_string(const LeafIndexNodeHandler &handler, const KeyPrinter &printer); +private: + char *__item_at(int index) const; + char *__key_at(int index) const; + char *__value_at(int index) const; + + void append(const char *item); + void preappend(const char *item); + +private: + LeafIndexNode *leaf_node_; +}; + +class InternalIndexNodeHandler : public IndexNodeHandler { +public: + InternalIndexNodeHandler(const IndexFileHeader &header, BPPageHandle &page_handle); + + void init_empty(); + void create_new_root(PageNum first_page_num, const char *key, PageNum page_num); + + void insert(const char *key, PageNum page_num, const KeyComparator &comparator); + RC move_half_to(LeafIndexNodeHandler &other, DiskBufferPool *bp, int file_id); + char *key_at(int index); + PageNum value_at(int index); + + /** + * 返回指定子节点在当前节点中的索引 + */ + int value_index(PageNum page_num); + void set_key_at(int index, const char *key); + void remove(int index); + + /** + * 与Leaf节点不同,lookup返回指定key应该属于哪个子节点,返回这个子节点在当前节点中的索引 + * 如果想要返回插入位置,就提供 `insert_position` 参数 + * NOTE: 查找效率不高,你可以优化它吗? + */ + int lookup(const KeyComparator &comparator, const char *key, + bool *found = nullptr, int *insert_position = nullptr) const; + + int max_size() const; + int min_size() const; + + RC move_to(InternalIndexNodeHandler &other, DiskBufferPool *disk_buffer_pool, int file_id); + RC move_first_to_end(InternalIndexNodeHandler &other, DiskBufferPool *disk_buffer_pool, int file_id); + RC move_last_to_front(InternalIndexNodeHandler &other, DiskBufferPool *bp, int file_id); + RC move_half_to(InternalIndexNodeHandler &other, DiskBufferPool *bp, int file_id); + + bool validate(const KeyComparator &comparator, DiskBufferPool *bp, int file_id) const; + + friend std::string to_string(const InternalIndexNodeHandler &handler, const KeyPrinter &printer); +private: + RC copy_from(const char *items, int num, DiskBufferPool *disk_buffer_pool, int file_id); + RC append(const char *item, DiskBufferPool *bp, int file_id); + RC preappend(const char *item, DiskBufferPool *bp, int file_id); + +private: + char *__item_at(int index) const; + char *__key_at(int index) const; + char *__value_at(int index) const; + + int value_size() const; + int item_size() const; + +private: + InternalIndexNode *internal_node_; +}; + +class BplusTreeHandler { +public: + /** + * 此函数创建一个名为fileName的索引。 + * attrType描述被索引属性的类型,attrLength描述被索引属性的长度 + */ + RC create(const char *file_name, AttrType attr_type, int attr_length, + int internal_max_size = -1, int leaf_max_size = -1); + + /** + * 打开名为fileName的索引文件。 + * 如果方法调用成功,则indexHandle为指向被打开的索引句柄的指针。 + * 索引句柄用于在索引中插入或删除索引项,也可用于索引的扫描 + */ + RC open(const char *file_name); + + /** + * 关闭句柄indexHandle对应的索引文件 + */ + RC close(); + + /** + * 此函数向IndexHandle对应的索引中插入一个索引项。 + * 参数user_key指向要插入的属性值,参数rid标识该索引项对应的元组, + * 即向索引中插入一个值为(user_key,rid)的键值对 + */ + RC insert_entry(const char *user_key, const RID *rid); + + /** + * 从IndexHandle句柄对应的索引中删除一个值为(*pData,rid)的索引项 + * @return RECORD_INVALID_KEY 指定值不存在 + */ + RC delete_entry(const char *user_key, const RID *rid); + + bool is_empty() const; + + /** + * 获取指定值的record + * @param rid 返回值,记录记录所在的页面号和slot + */ + RC get_entry(const char *user_key, std::list &rids); + + RC sync(); + + const int get_file_id() + { + return file_id_; + } + + /** + * Check whether current B+ tree is invalid or not. + * return true means current tree is valid, return false means current tree is invalid. + * @return + */ + bool validate_tree(); + +public: + RC print_tree(); + RC print_leafs(); + +private: + RC print_leaf(BPPageHandle &page_handle); + RC print_internal_node_recursive(BPPageHandle &page_handle); + + bool validate_node(IndexNode *node); + bool validate_leaf_link(); + bool validate_node_recursive(BPPageHandle &page_handle); + +protected: + RC find_leaf(const char *key, BPPageHandle &page_handle); + RC left_most_page(BPPageHandle &page_handle); + RC right_most_page(BPPageHandle &page_handle); + RC find_leaf_internal(const std::function &child_page_getter, + BPPageHandle &page_handle); + + RC insert_into_parent( + PageNum parent_page, BPPageHandle &left_page_handle, const char *pkey, BPPageHandle &right_page_handle); + RC split_leaf(BPPageHandle &leaf_page_handle); + + RC delete_entry_internal(BPPageHandle &leaf_page_handle, const char *key); + + RC insert_into_new_root(BPPageHandle &left_page_handle, const char *pkey, BPPageHandle &right_page_handle); + + template + RC split(BPPageHandle &page_handle, BPPageHandle &new_page_handle); + template + RC coalesce_or_redistribute(BPPageHandle &page_handle); + template + RC coalesce(BPPageHandle &neighbor_page_handle, BPPageHandle &page_handle, + BPPageHandle &parent_page_handle, int index); + template + RC redistribute(BPPageHandle &neighbor_page_handle, BPPageHandle &page_handle, + BPPageHandle &parent_page_handle, int index); + + RC insert_entry_into_parent(BPPageHandle &page_handle, BPPageHandle &new_page_handle, const char *key); + RC insert_entry_into_leaf_node(BPPageHandle &page_handle, const char *pkey, const RID *rid); + RC update_root_page_num(); + RC create_new_tree(const char *key, const RID *rid); + + RC adjust_root(BPPageHandle &root_page_handle); + +private: + char *make_key(const char *user_key, const RID &rid); + void free_key(char *key); +protected: + DiskBufferPool *disk_buffer_pool_ = nullptr; + int file_id_ = -1; + bool header_dirty_ = false; + IndexFileHeader file_header_; + + KeyComparator key_comparator_; + KeyPrinter key_printer_; + + common::MemPoolItem *mem_pool_item_ = nullptr; + +private: + friend class BplusTreeScanner; + friend class BplusTreeTester; +}; + +class BplusTreeScanner { +public: + BplusTreeScanner(BplusTreeHandler &tree_handler); + ~BplusTreeScanner(); + + /** + * 扫描指定范围的数据 + * @param left_key 扫描范围的左边界,如果是null,则没有左边界 + * @param left_inclusive 左边界的值是否包含在内 + * @param right_key 扫描范围的右边界。如果是null,则没有右边界 + * @param right_inclusive 右边界的值是否包含在内 + */ + RC open(const char *left_user_key, bool left_inclusive, + const char *right_user_key, bool right_inclusive); + + RC next_entry(RID *rid); + + RC close(); + +private: + bool inited_ = false; + BplusTreeHandler &tree_handler_; + + /// 使用左右叶子节点和位置来表示扫描的起始位置和终止位置 + /// 起始位置和终止位置都是有效的数据 + BPPageHandle left_page_handle_; + BPPageHandle right_page_handle_; + int iter_index_ = -1; + int end_index_ = -1; // use -1 for end of scan +}; + +#endif //__OBSERVER_STORAGE_COMMON_INDEX_MANAGER_H_ diff --git a/src/observer/storage/common/bplus_tree_index.cpp b/src/observer/storage/index/bplus_tree_index.cpp similarity index 80% rename from src/observer/storage/common/bplus_tree_index.cpp rename to src/observer/storage/index/bplus_tree_index.cpp index 200a01b152560d047a7f940b387fe56849d4371b..36ccf3c43c0bfda5ad8300752a348ee3b8ab94f4 100644 --- a/src/observer/storage/common/bplus_tree_index.cpp +++ b/src/observer/storage/index/bplus_tree_index.cpp @@ -9,10 +9,10 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by wangyunlai.wyl on 2021/5/19. +// Created by Meiyi & wangyunlai.wyl on 2021/5/19. // -#include "storage/common/bplus_tree_index.h" +#include "storage/index/bplus_tree_index.h" #include "common/log/log.h" BplusTreeIndex::~BplusTreeIndex() noexcept @@ -100,17 +100,16 @@ RC BplusTreeIndex::delete_entry(const char *record, const RID *rid) return index_handler_.delete_entry(record + field_meta_.offset(), rid); } -IndexScanner *BplusTreeIndex::create_scanner(CompOp comp_op, const char *value) +IndexScanner *BplusTreeIndex::create_scanner(const char *left_key, bool left_inclusive, + const char *right_key, bool right_inclusive) { - BplusTreeScanner *bplus_tree_scanner = new BplusTreeScanner(index_handler_); - RC rc = bplus_tree_scanner->open(comp_op, value); + BplusTreeIndexScanner *index_scanner = new BplusTreeIndexScanner(index_handler_); + RC rc = index_scanner->open(left_key, left_inclusive, right_key, right_inclusive); if (rc != RC::SUCCESS) { - LOG_WARN("Failed to open index scanner. file_id:%d, rc=%d:%s", index_handler_.get_file_id(), rc, strrc(rc)); - delete bplus_tree_scanner; + LOG_WARN("failed to open index scanner. rc=%d:%s", rc, strrc(rc)); + delete index_scanner; return nullptr; } - - BplusTreeIndexScanner *index_scanner = new BplusTreeIndexScanner(bplus_tree_scanner); return index_scanner; } @@ -120,22 +119,26 @@ RC BplusTreeIndex::sync() } //////////////////////////////////////////////////////////////////////////////// -BplusTreeIndexScanner::BplusTreeIndexScanner(BplusTreeScanner *tree_scanner) : tree_scanner_(tree_scanner) +BplusTreeIndexScanner::BplusTreeIndexScanner(BplusTreeHandler &tree_handler) : tree_scanner_(tree_handler) {} BplusTreeIndexScanner::~BplusTreeIndexScanner() noexcept { - tree_scanner_->close(); - delete tree_scanner_; + tree_scanner_.close(); +} + +RC BplusTreeIndexScanner::open(const char *left_key, bool left_inclusive, const char *right_key, bool right_inclusive) +{ + return tree_scanner_.open(left_key, left_inclusive, right_key, right_inclusive); } RC BplusTreeIndexScanner::next_entry(RID *rid) { - return tree_scanner_->next_entry(rid); + return tree_scanner_.next_entry(rid); } RC BplusTreeIndexScanner::destroy() { delete this; return RC::SUCCESS; -} \ No newline at end of file +} diff --git a/src/observer/storage/common/bplus_tree_index.h b/src/observer/storage/index/bplus_tree_index.h similarity index 71% rename from src/observer/storage/common/bplus_tree_index.h rename to src/observer/storage/index/bplus_tree_index.h index 60296a563dbe2a5aaccad83dc2fa58f6f8435336..1c3ff920f9269c320660621a24ccd934984033dc 100644 --- a/src/observer/storage/common/bplus_tree_index.h +++ b/src/observer/storage/index/bplus_tree_index.h @@ -9,14 +9,14 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by wangyunlai.wyl on 2021/5/19. +// Created by Meiyi & wangyunlai.wyl on 2021/5/19. // #ifndef __OBSERVER_STORAGE_COMMON_BPLUS_TREE_INDEX_H_ #define __OBSERVER_STORAGE_COMMON_BPLUS_TREE_INDEX_H_ -#include "storage/common/index.h" -#include "storage/common/bplus_tree.h" +#include "storage/index/index.h" +#include "storage/index/bplus_tree.h" class BplusTreeIndex : public Index { public: @@ -30,7 +30,11 @@ public: RC insert_entry(const char *record, const RID *rid) override; RC delete_entry(const char *record, const RID *rid) override; - IndexScanner *create_scanner(CompOp comp_op, const char *value) override; + /** + * 扫描指定范围的数据 + */ + IndexScanner *create_scanner(const char *left_key, bool left_inclusive, + const char *right_key, bool right_inclusive) override; RC sync() override; @@ -41,13 +45,15 @@ private: class BplusTreeIndexScanner : public IndexScanner { public: - BplusTreeIndexScanner(BplusTreeScanner *tree_scanner); + BplusTreeIndexScanner(BplusTreeHandler &tree_handle); ~BplusTreeIndexScanner() noexcept override; RC next_entry(RID *rid) override; RC destroy() override; + + RC open(const char *left_key, bool left_inclusive, const char *right_key, bool right_inclusive); private: - BplusTreeScanner * tree_scanner_; + BplusTreeScanner tree_scanner_; }; -#endif //__OBSERVER_STORAGE_COMMON_BPLUS_TREE_INDEX_H_ +#endif //__OBSERVER_STORAGE_COMMON_BPLUS_TREE_INDEX_H_ diff --git a/src/observer/storage/common/index.cpp b/src/observer/storage/index/index.cpp similarity index 88% rename from src/observer/storage/common/index.cpp rename to src/observer/storage/index/index.cpp index eb641593609b26da2788ea9fd16b36fa7d2515e4..105c0378cdc77e299efe474e7e7e5e576382f468 100644 --- a/src/observer/storage/common/index.cpp +++ b/src/observer/storage/index/index.cpp @@ -9,13 +9,14 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by wangyunlai.wyl on 2021/5/19. +// Created by Meiyi & wangyunlai.wyl on 2021/5/19. // -#include "storage/common/index.h" +#include "storage/index/index.h" -RC Index::init(const IndexMeta &index_meta, const FieldMeta &field_meta) { +RC Index::init(const IndexMeta &index_meta, const FieldMeta &field_meta) +{ index_meta_ = index_meta; field_meta_ = field_meta; return RC::SUCCESS; -} \ No newline at end of file +} diff --git a/src/observer/storage/common/index.h b/src/observer/storage/index/index.h similarity index 77% rename from src/observer/storage/common/index.h rename to src/observer/storage/index/index.h index 7de8ec0e3b9fbdb63ba2e9e9a00cb3087b91f566..d66a09327de2559047fa510db59e143d1a7cca4b 100644 --- a/src/observer/storage/common/index.h +++ b/src/observer/storage/index/index.h @@ -9,7 +9,7 @@ MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. */ // -// Created by Wangyunlai on 2021/5/11. +// Created by Meiyi & Wangyunlai on 2021/5/11. // #ifndef __OBSERVER_STORAGE_COMMON_INDEX_H_ @@ -38,14 +38,16 @@ public: Index() = default; virtual ~Index() = default; - const IndexMeta &index_meta() const { + const IndexMeta &index_meta() const + { return index_meta_; } virtual RC insert_entry(const char *record, const RID *rid) = 0; virtual RC delete_entry(const char *record, const RID *rid) = 0; - virtual IndexScanner *create_scanner(CompOp comp_op, const char *value) = 0; + virtual IndexScanner *create_scanner(const char *left_key, bool left_inclusive, + const char *right_key, bool right_inclusive) = 0; virtual RC sync() = 0; @@ -53,8 +55,8 @@ protected: RC init(const IndexMeta &index_meta, const FieldMeta &field_meta); protected: - IndexMeta index_meta_; - FieldMeta field_meta_; /// 当前实现仅考虑一个字段的索引 + IndexMeta index_meta_; + FieldMeta field_meta_; /// 当前实现仅考虑一个字段的索引 }; class IndexScanner { @@ -62,8 +64,12 @@ public: IndexScanner() = default; virtual ~IndexScanner() = default; + /** + * 遍历元素数据 + * 如果没有更多的元素,返回RECORD_EOF + */ virtual RC next_entry(RID *rid) = 0; virtual RC destroy() = 0; }; -#endif // __OBSERVER_STORAGE_COMMON_INDEX_H_ \ No newline at end of file +#endif // __OBSERVER_STORAGE_COMMON_INDEX_H_ diff --git a/src/observer/storage/mem/mem_storage_stage.cpp b/src/observer/storage/mem/mem_storage_stage.cpp index 83f2a27f3c258770bdd8fbd574d1679a73674113..e20000ec08dc9afc49e6a2a7daefe8e2d98c5200 100644 --- a/src/observer/storage/mem/mem_storage_stage.cpp +++ b/src/observer/storage/mem/mem_storage_stage.cpp @@ -27,13 +27,16 @@ See the Mulan PSL v2 for more details. */ using namespace common; //! Constructor -MemStorageStage::MemStorageStage(const char *tag) : Stage(tag) {} +MemStorageStage::MemStorageStage(const char *tag) : Stage(tag) +{} //! Destructor -MemStorageStage::~MemStorageStage() {} +MemStorageStage::~MemStorageStage() +{} //! Parse properties, instantiate a stage object -Stage *MemStorageStage::make_stage(const std::string &tag) { +Stage *MemStorageStage::make_stage(const std::string &tag) +{ MemStorageStage *stage = new (std::nothrow) MemStorageStage(tag.c_str()); if (stage == nullptr) { LOG_ERROR("new MemStorageStage failed"); @@ -44,7 +47,8 @@ Stage *MemStorageStage::make_stage(const std::string &tag) { } //! Set properties for this object set in stage specific properties -bool MemStorageStage::set_properties() { +bool MemStorageStage::set_properties() +{ // std::string stageNameStr(stage_name_); // std::map section = g_properties()->get( // stageNameStr); @@ -57,7 +61,8 @@ bool MemStorageStage::set_properties() { } //! Initialize stage params and validate outputs -bool MemStorageStage::initialize() { +bool MemStorageStage::initialize() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); @@ -65,13 +70,15 @@ bool MemStorageStage::initialize() { } //! Cleanup after disconnection -void MemStorageStage::cleanup() { +void MemStorageStage::cleanup() +{ LOG_TRACE("Enter"); LOG_TRACE("Exit"); } -void MemStorageStage::handle_event(StageEvent *event) { +void MemStorageStage::handle_event(StageEvent *event) +{ LOG_TRACE("Enter\n"); TimerStat timerStat(*queryMetric); @@ -81,8 +88,8 @@ void MemStorageStage::handle_event(StageEvent *event) { return; } -void MemStorageStage::callback_event(StageEvent *event, - CallbackContext *context) { +void MemStorageStage::callback_event(StageEvent *event, CallbackContext *context) +{ LOG_TRACE("Enter\n"); LOG_TRACE("Exit\n"); diff --git a/src/observer/storage/mem/mem_storage_stage.h b/src/observer/storage/mem/mem_storage_stage.h index 4a542df7d0a4d76b2f0e1eefd08da27c0a80b6b6..468d2ac53a5c8f5711a4461a496b34e8bdd73923 100644 --- a/src/observer/storage/mem/mem_storage_stage.h +++ b/src/observer/storage/mem/mem_storage_stage.h @@ -31,13 +31,13 @@ protected: bool initialize(); void cleanup(); void handle_event(common::StageEvent *event); - void callback_event(common::StageEvent *event, - common::CallbackContext *context); + void callback_event(common::StageEvent *event, common::CallbackContext *context); protected: common::SimpleTimer *queryMetric = nullptr; static const std::string QUERY_METRIC_TAG; + private: }; -#endif //__OBSERVER_STORAGE_MEM_STORAGE_STAGE_H__ +#endif //__OBSERVER_STORAGE_MEM_STORAGE_STAGE_H__ diff --git a/src/observer/storage/trx/trx.cpp b/src/observer/storage/trx/trx.cpp index e1f9fa24b14522c2061fd07cdb852052fd56a117..f61f987b8fd310b5e6dc1791f57d7ec5c24b595e 100644 --- a/src/observer/storage/trx/trx.cpp +++ b/src/observer/storage/trx/trx.cpp @@ -23,39 +23,45 @@ See the Mulan PSL v2 for more details. */ static const uint32_t DELETED_FLAG_BIT_MASK = 0x80000000; static const uint32_t TRX_ID_BIT_MASK = 0x7FFFFFFF; -int32_t Trx::default_trx_id() { +int32_t Trx::default_trx_id() +{ return 0; } -int32_t Trx::next_trx_id() { +int32_t Trx::next_trx_id() +{ static std::atomic trx_id; return ++trx_id; } -const char *Trx::trx_field_name() { +const char *Trx::trx_field_name() +{ return "__trx"; } -AttrType Trx::trx_field_type() { +AttrType Trx::trx_field_type() +{ return INTS; } -int Trx::trx_field_len() { +int Trx::trx_field_len() +{ return sizeof(int32_t); } -Trx::Trx() { -} +Trx::Trx() +{} -Trx::~Trx() { -} +Trx::~Trx() +{} -RC Trx::insert_record(Table *table, Record *record) { +RC Trx::insert_record(Table *table, Record *record) +{ RC rc = RC::SUCCESS; - // 先校验是否以前是否存在过(应该不会存在) + // 先校验是否以前是否存在过(应该不会存在) Operation *old_oper = find_operation(table, record->rid); if (old_oper != nullptr) { - return RC::GENERIC_ERROR; // error code + return RC::GENERIC_ERROR; // error code } start_if_not_started(); @@ -67,7 +73,8 @@ RC Trx::insert_record(Table *table, Record *record) { return rc; } -RC Trx::delete_record(Table *table, Record *record) { +RC Trx::delete_record(Table *table, Record *record) +{ RC rc = RC::SUCCESS; start_if_not_started(); Operation *old_oper = find_operation(table, record->rid); @@ -84,29 +91,32 @@ RC Trx::delete_record(Table *table, Record *record) { return rc; } -void Trx::set_record_trx_id(Table *table, Record &record, int32_t trx_id, bool deleted) const { +void Trx::set_record_trx_id(Table *table, Record &record, int32_t trx_id, bool deleted) const +{ const FieldMeta *trx_field = table->table_meta().trx_field(); - int32_t *ptrx_id = (int32_t*)(record.data + trx_field->offset()); + int32_t *ptrx_id = (int32_t *)(record.data + trx_field->offset()); if (deleted) { trx_id |= DELETED_FLAG_BIT_MASK; } *ptrx_id = trx_id; } -void Trx::get_record_trx_id(Table *table, const Record &record, int32_t &trx_id, bool &deleted) { +void Trx::get_record_trx_id(Table *table, const Record &record, int32_t &trx_id, bool &deleted) +{ const FieldMeta *trx_field = table->table_meta().trx_field(); - int32_t trx = *(int32_t*)(record.data + trx_field->offset()); + int32_t trx = *(int32_t *)(record.data + trx_field->offset()); trx_id = trx & TRX_ID_BIT_MASK; deleted = (trx & DELETED_FLAG_BIT_MASK) != 0; } -Operation *Trx::find_operation(Table *table, const RID &rid) { +Operation *Trx::find_operation(Table *table, const RID &rid) +{ std::unordered_map::iterator table_operations_iter = operations_.find(table); if (table_operations_iter == operations_.end()) { return nullptr; } - OperationSet & table_operations = table_operations_iter->second; + OperationSet &table_operations = table_operations_iter->second; Operation tmp(Operation::Type::UNDEFINED, rid); OperationSet::iterator operation_iter = table_operations.find(tmp); if (operation_iter == table_operations.end()) { @@ -115,28 +125,31 @@ Operation *Trx::find_operation(Table *table, const RID &rid) { return const_cast(&(*operation_iter)); } -void Trx::insert_operation(Table *table, Operation::Type type, const RID &rid) { - OperationSet & table_operations = operations_[table]; +void Trx::insert_operation(Table *table, Operation::Type type, const RID &rid) +{ + OperationSet &table_operations = operations_[table]; table_operations.emplace(type, rid); } -void Trx::delete_operation(Table *table, const RID &rid) { +void Trx::delete_operation(Table *table, const RID &rid) +{ std::unordered_map
::iterator table_operations_iter = operations_.find(table); if (table_operations_iter == operations_.end()) { - return ; + return; } Operation tmp(Operation::Type::UNDEFINED, rid); table_operations_iter->second.erase(tmp); } -RC Trx::commit() { +RC Trx::commit() +{ RC rc = RC::SUCCESS; - for (const auto &table_operations: operations_) { + for (const auto &table_operations : operations_) { Table *table = table_operations.first; const OperationSet &operation_set = table_operations.second; - for (const Operation &operation: operation_set) { + for (const Operation &operation : operation_set) { RID rid; rid.page_num = operation.page_num(); @@ -147,24 +160,21 @@ RC Trx::commit() { rc = table->commit_insert(this, rid); if (rc != RC::SUCCESS) { // handle rc - LOG_ERROR("Failed to commit insert operation. rid=%d.%d, rc=%d:%s", - rid.page_num, rid.slot_num, rc, strrc(rc)); + LOG_ERROR( + "Failed to commit insert operation. rid=%d.%d, rc=%d:%s", rid.page_num, rid.slot_num, rc, strrc(rc)); } - } - break; + } break; case Operation::Type::DELETE: { rc = table->commit_delete(this, rid); if (rc != RC::SUCCESS) { // handle rc - LOG_ERROR("Failed to commit delete operation. rid=%d.%d, rc=%d:%s", - rid.page_num, rid.slot_num, rc, strrc(rc)); + LOG_ERROR( + "Failed to commit delete operation. rid=%d.%d, rc=%d:%s", rid.page_num, rid.slot_num, rc, strrc(rc)); } - } - break; + } break; default: { LOG_PANIC("Unknown operation. type=%d", (int)operation.type()); - } - break; + } break; } } } @@ -174,12 +184,13 @@ RC Trx::commit() { return rc; } -RC Trx::rollback() { +RC Trx::rollback() +{ RC rc = RC::SUCCESS; - for (const auto &table_operations: operations_) { + for (const auto &table_operations : operations_) { Table *table = table_operations.first; const OperationSet &operation_set = table_operations.second; - for (const Operation &operation: operation_set) { + for (const Operation &operation : operation_set) { RID rid; rid.page_num = operation.page_num(); @@ -190,24 +201,21 @@ RC Trx::rollback() { rc = table->rollback_insert(this, rid); if (rc != RC::SUCCESS) { // handle rc - LOG_ERROR("Failed to rollback insert operation. rid=%d.%d, rc=%d:%s", - rid.page_num, rid.slot_num, rc, strrc(rc)); + LOG_ERROR( + "Failed to rollback insert operation. rid=%d.%d, rc=%d:%s", rid.page_num, rid.slot_num, rc, strrc(rc)); } - } - break; + } break; case Operation::Type::DELETE: { rc = table->rollback_delete(this, rid); if (rc != RC::SUCCESS) { // handle rc - LOG_ERROR("Failed to rollback delete operation. rid=%d.%d, rc=%d:%s", - rid.page_num, rid.slot_num, rc, strrc(rc)); + LOG_ERROR( + "Failed to rollback delete operation. rid=%d.%d, rc=%d:%s", rid.page_num, rid.slot_num, rc, strrc(rc)); } - } - break; + } break; default: { LOG_PANIC("Unknown operation. type=%d", (int)operation.type()); - } - break; + } break; } } } @@ -217,17 +225,20 @@ RC Trx::rollback() { return rc; } -RC Trx::commit_insert(Table *table, Record &record) { +RC Trx::commit_insert(Table *table, Record &record) +{ set_record_trx_id(table, record, 0, false); return RC::SUCCESS; } -RC Trx::rollback_delete(Table *table, Record &record) { +RC Trx::rollback_delete(Table *table, Record &record) +{ set_record_trx_id(table, record, 0, false); return RC::SUCCESS; } -bool Trx::is_visible(Table *table, const Record *record) { +bool Trx::is_visible(Table *table, const Record *record) +{ int32_t record_trx_id; bool record_deleted; get_record_trx_id(table, *record, record_trx_id, record_deleted); @@ -237,14 +248,16 @@ bool Trx::is_visible(Table *table, const Record *record) { return !record_deleted; } - return record_deleted; // 当前记录上面有事务号,说明是未提交数据,那么如果有删除标记的话,就表示是未提交的删除 + return record_deleted; // 当前记录上面有事务号,说明是未提交数据,那么如果有删除标记的话,就表示是未提交的删除 } -void Trx::init_trx_info(Table *table, Record &record) { +void Trx::init_trx_info(Table *table, Record &record) +{ set_record_trx_id(table, record, trx_id_, false); } -void Trx::start_if_not_started() { +void Trx::start_if_not_started() +{ if (trx_id_ == 0) { trx_id_ = next_trx_id(); } diff --git a/src/observer/storage/trx/trx.h b/src/observer/storage/trx/trx.h index 82eb12bdb18f0b5c1428f000fdc2961b5d291110..be34c0df1d035d4cfaf326a4d7f47f66545a7212 100644 --- a/src/observer/storage/trx/trx.h +++ b/src/observer/storage/trx/trx.h @@ -28,7 +28,7 @@ class Table; class Operation { public: - enum class Type: int { + enum class Type : int { INSERT, UPDATE, DELETE, @@ -36,36 +36,40 @@ public: }; public: - Operation(Type type, const RID &rid) : type_(type), page_num_(rid.page_num), slot_num_(rid.slot_num){ - } + Operation(Type type, const RID &rid) : type_(type), page_num_(rid.page_num), slot_num_(rid.slot_num) + {} - Type type() const { + Type type() const + { return type_; } - PageNum page_num() const { + PageNum page_num() const + { return page_num_; } - SlotNum slot_num() const { + SlotNum slot_num() const + { return slot_num_; } private: Type type_; - PageNum page_num_; - SlotNum slot_num_; + PageNum page_num_; + SlotNum slot_num_; }; class OperationHasher { public: - size_t operator() (const Operation &op) const { + size_t operator()(const Operation &op) const + { return (((size_t)op.page_num()) << 32) | (op.slot_num()); } }; class OperationEqualer { public: - bool operator()(const Operation &op1, const Operation &op2) const { - return op1.page_num() == op2.page_num() && - op1.slot_num() == op2.slot_num(); + bool operator()(const Operation &op1, const Operation &op2) const + { + return op1.page_num() == op2.page_num() && op1.slot_num() == op2.slot_num(); } }; @@ -79,7 +83,7 @@ public: static int32_t next_trx_id(); static const char *trx_field_name(); static AttrType trx_field_type(); - static int trx_field_len(); + static int trx_field_len(); public: Trx(); @@ -112,9 +116,10 @@ private: private: void start_if_not_started(); + private: - int32_t trx_id_ = 0; + int32_t trx_id_ = 0; std::unordered_map
operations_; }; -#endif // __OBSERVER_STORAGE_TRX_TRX_H_ +#endif // __OBSERVER_STORAGE_TRX_TRX_H_ diff --git a/test/case/README.md b/test/case/README.md new file mode 100644 index 0000000000000000000000000000000000000000..865992ae9d72aa1bba209cf0f47b05e4e2500d31 --- /dev/null +++ b/test/case/README.md @@ -0,0 +1,4 @@ +# miniob-test +miniob自动化功能测试 +使用方法参考 miniob_test.py + diff --git a/test/case/case-scores.json b/test/case/case-scores.json new file mode 100644 index 0000000000000000000000000000000000000000..eab2da303d281e22b47645a993fbb31223e763ad --- /dev/null +++ b/test/case/case-scores.json @@ -0,0 +1,21 @@ +{ + "basic":{"necessary":true, "score":10}, + "primary-date":{"necessary":true, "score":10}, + "primary-update":{"necessary":true, "score":10}, + "primary-select-meta":{"necessary":true, "score":10}, + "primary-select-tables":{"necessary":true, "score":10}, + "primary-aggregation-func":{"necessary":true, "score":10}, + "primary-drop-table":{"necessary":true, "score":10}, + + "primary-insert":{"necessary":false, "score":10}, + "primary-join-tables":{"necessary":false, "score":20}, + "primary-null":{"necessary":false, "score":10}, + "primary-unique":{"necessary":false, "score":10}, + "primary-simple-sub-query":{"necessary":false, "score":10}, + "primary-multi-index":{"necessary":false, "score":20}, + "primary-text":{"necessary":false, "score":20}, + "primary-expression":{"necessary":false, "score":20}, + "primary-complex-sub-query":{"necessary":false, "score":20}, + "primary-order-by":{"necessary":false, "score":10}, + "primary-group-by":{"necessary":false, "score":20} +} diff --git a/test/case/miniob_test.py b/test/case/miniob_test.py new file mode 100644 index 0000000000000000000000000000000000000000..54e6369b4cc84ac78c9a2502cb997e5089f63f96 --- /dev/null +++ b/test/case/miniob_test.py @@ -0,0 +1,1393 @@ +# -*- coding: UTF-8 -*- + +from genericpath import exists +import os +import json +import http.client +import sys +import logging +import subprocess +import socket +import select +import time +import shutil +#import timeout_decorator +from enum import Enum +#import eventlet +#from timeout_decorator import TimeoutError +try: + from optparse import OptionParser +except: + print("cannot load optparse module") + exit(1) + +""" +为OceanBase 大赛测试平台设计的自动化测试程序 +测试流程: +获取源码 -> +编译源码 -> +获取测试用例文件 -> +启动observer -> +执行测试用例 -> +对比执行结果与预先设置的结果文件 + +- 获取源码的方式:支持通过git获取,也可以指定源码的zip压缩包路径 +- 编译源码:可以指定编译的cmake和make参数。也可以跳过这个步骤。 +- 测试用例文件:测试用例文件都以.test结尾,当前放在test目录下,分为necessary和option(后续可以考虑删除) +- 测试结果文件:预先设置的结果文件,以.result结尾,放在result目录下 +- 启动observer: 启动observer,使用unix socket,这样可以每个observer使用自己的socket文件 +- 执行测试用例:测试用例文件中,每行都是一个命令。命令可以是SQL语句,也可以是预先定义的命令,比如 echo,sort等 +- 评分文件:当前为 case-scores.json 文件,内容为json格式,描述每个case的分值 +- 测试:使用参数直接连接已经启动的observer + +TODO list +- 控制所有用例一共执行的时长 +- 简化部分配置项,已知:增加测试base-dir目录,在base-dir下查找test/result/case-scores.json文件 + +How to use: + 使用git下载代码然后测试 +python3 miniob_test.py \ + --test-case-dir=./test \ + --test-case-scores=case-scores.json \ + --test-result-dir=result \ + --test-result-tmp-dir=./result_tmp \ + --use-unix-socket \ + --git-repo=https://github.com/oceanbase/miniob.git \ + --git-branch=main \ + --code-type=git \ + --target-dir=./miniob \ + --log=stdout \ + --compile-make-args=-j4 +""" + + +class TimeoutException(BaseException): + def __init__(self, value="Timed Out"): + self.value = value + + def __str__(self): + return repr(self.value) + +class Result(Enum): + true = True + false = False + timeout = 0 + +class GlobalConfig: + default_encoding = "UTF-8" + debug = False + source_code_build_path_name = "build" + +def __get_source_path(target_dir: str): + return target_dir + '/miniob' + +def __get_project_path(target_dir: str): + return __get_source_path(target_dir) + +def __get_data_path(target_dir: str): + return target_dir + '/data' + +def __get_result_path(target_dir: str): + return target_dir + '/result' + +def __get_build_path(target_dir: str): + return target_dir + '/' + GlobalConfig.source_code_build_path_name + +class ResultWriter: + ''' + 写数据到指定文件,当前用于输出测试结果 + ''' + + def __init__(self, file): + self.__file = file + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + def close(self): + if self.__file is not None: + self.__file.close() + self.__file = None + + def write(self, arg: str): + self.__file.write(bytes(arg.upper(), GlobalConfig.default_encoding)) + + def write_line(self, arg: str): + self.write(str(arg).upper()) + self.write('\n') + +class MiniObServer: + ''' + 用来控制miniob的服务器程序。负责程序的启停和环境的初始化和清理工作 + ''' + + def __init__(self, base_dir: str, data_dir: str, config_file: str, server_port: int, server_socket: str, clean_data_dir: bool): + self.__check_base_dir(base_dir) + self.__check_data_dir(data_dir, clean_data_dir) + + self.__base_dir = base_dir + self.__data_dir = data_dir + + if config_file == None: + config_file = self.__default_config(base_dir) + self.__check_config(config_file) + self.__config = config_file + self.__server_port = server_port + self.__server_socket = server_socket.strip() + + self.__process = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + if self.__process is not None: + self.stop_server() + self.clean() + self.__process = None + + def __observer_path(self, base_dir: str): + ''' + observer程序所在路径 + ''' + return base_dir + "/bin/observer" + + def __default_config(self, base_dir: str): + return base_dir + "/etc/observer.ini" + + def __check_base_dir(self, base_dir: str): + if not(os.path.isdir(base_dir)): + raise(Exception("failed to check base directory. " + base_dir + " is not a directory")) + + observer_path = self.__observer_path(base_dir) + if not(os.path.isfile(observer_path)): + raise(Exception("observer not exists: " + observer_path)) + + def __check_data_dir(self, data_dir: str, clean_data_dir: bool): + if os.path.exists(data_dir) and clean_data_dir: + shutil.rmtree(data_dir) + + os.makedirs(data_dir, exist_ok=True) + if not(os.path.isdir(data_dir)): + raise(Exception(data_dir + " is not a directory or failed to create")) + + # results = os.listdir(data_dir) + # if len(results) != 0: + # raise(Exception(data_dir + " is not empty")) + + def __check_config(self, config_file: str): + if not(os.path.isfile(config_file)): + raise(Exception("config file does not exists: " + config_file)) + + def init_server(self): + logging.info("miniob-server inited") + # do nothing now + + def start_server(self): + ''' + 启动服务端程序,并使用探测端口的方式检测程序是否正常启动 + 调试模式如果可以使用调试器启动程序就好了 + ''' + + if self.__process != None: + logging.warn("Server has already been started") + return False + + time_begin = time.time() + logging.debug("use '%s' as observer work path", os.getcwd()) + observer_command = [self.__observer_path(self.__base_dir), '-f', self.__config] + if len(self.__server_socket) > 0: + observer_command.append('-s') + observer_command.append(self.__server_socket) + else: + observer_command.append('-p') + observer_command.append(str(self.__server_port)) + + process = subprocess.Popen(observer_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, cwd=self.__data_dir) + return_code = process.poll() + if return_code != None: + logging.error("Failed to start observer, exit with code %d", return_code) + return False + + logging.info('start subprocess with pid=%d', process.pid) + #os.setpgid(process.pid, GlobalConfig.group_id) + + self.__process = process + time.sleep(0.2) + if not self.__wait_server_started(10): + time_span = time.time() - time_begin + logging.error("Failed to start server in %f seconds", time_span) + return False + + time_span = time.time() - time_begin + logging.info("miniob-server started in %f seconds", time_span) + return True + + def stop_server(self): + if self.__process == None: + logging.warning("Server has not been started") + return True + + self.__process.terminate() + return_code = -1 + try: + return_code = self.__process.wait(10) + if return_code is None: + self.__process.kill() + logging.warning("Failed to stop server: %s", self.__base_dir) + return False + except Exception as ex: + self.__process.kill() + logging.warning("wait server exit timedout: %s", self.__base_dir) + return False + + logging.info("miniob-server exit with code %d. pid=%s", return_code, str(self.__process.pid)) + return True + + def clean(self): + ''' + 清理数据目录(如果没有配置调试模式) + 调试模式可能需要查看服务器程序运行的日志 + ''' + + if GlobalConfig.debug is False: + shutil.rmtree(self.__data_dir) + logging.info("miniob-server cleaned") + + def __check_unix_socket_server(self): + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s: + errno = s.connect_ex(self.__server_socket) + if errno == 0: + return True + else: + logging.debug("Failed to connect to server. err=%d:%s", errno, os.strerror(errno)) + return False + + def __check_tcp_socket_server(self): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + errno = s.connect_ex(('127.0.0.1', self.__server_port)) + if errno == 0: + return True + else: + logging.debug("Failed to connect to server. err=%d:%s", errno, os.strerror(errno)) + return False + + def __wait_server_started(self, timeout_seconds: int): + deadline = time.time() + timeout_seconds + + while time.time() <= deadline: + result = False + if len(self.__server_socket) > 0: + result = self.__check_unix_socket_server() + else: + result = self.__check_tcp_socket_server() + if result: + return result + time.sleep(0.5) + + return False + +class MiniObClient: + ''' + 测试客户端。使用TCP连接,向服务器发送命令并反馈结果 + ''' + + def __init__(self, server_port: int, server_socket: str, time_limit:int = 10): + if (server_port < 0 or server_port > 65535) and server_socket is None: + raise(Exception("Invalid server port: " + str(server_port))) + + self.__server_port = server_port + self.__server_socket = server_socket.strip() + self.__socket = None + self.__buffer_size = 8192 + + sock = None + if len(self.__server_socket) > 0: + sock = self.__init_unix_socket(self.__server_socket) + else: + sock = self.__init_tcp_socket(self.__server_port) + + self.__socket = sock + if sock != None: + self.__socket.setblocking(False) + #self.__socket.settimeout(time_limit) # do not work + + self.__time_limit = time_limit + self.__poller = select.poll() + self.__poller.register(self.__socket, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR) + + def __init_tcp_socket(self, server_port:int): + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + errno = s.connect_ex(('127.0.0.1', server_port)) + if errno != 0: + logging.error("Failed to connect to server with port %d. errno=%d:%s", + server_port, errno, os.strerror(errno)) + s = None + return s + + def __init_unix_socket(self, server_socket: str): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + errno = sock.connect_ex(server_socket) + if errno != 0: + logging.error("Failed to connect to server with address '%s'. errno=%d:%s", + server_socket, errno, os.strerror(errno)) + sock = None + return sock + + def is_valid(self): + return self.__socket is not None + + def __recv_response(self): + result = '' + + while True: + events = self.__poller.poll(self.__time_limit * 1000) + if len(events) == 0: + raise Exception('Poll timeout after %d second(s)' % self.__time_limit) + + (_, event) = events[0] + if event & (select.POLLHUP | select.POLLERR): + msg = "Failed to receive from server. poll return POLLHUP(%s) or POLLERR(%s)" % ( str(event & select.POLLHUP), str(event & select.POLLERR)) + logging.info(msg) + raise Exception(msg) + + data = self.__socket.recv(self.__buffer_size) + if len(data) > 0: + result_tmp = data.decode(encoding= GlobalConfig.default_encoding) + logging.debug("receive from server[size=%d]: '%s'", len(data), result_tmp) + if data[len(data) - 1] == 0: + result += result_tmp[0:-2] + return result.strip() + '\n' + else: + result += result_tmp # TODO 返回数据量比较大的时候,python可能会hang住 + # 可以考虑返回列表 + else: + logging.info("receive from server error. result len=%d", len(data)) + raise Exception("receive return error. the connection may be closed") + + + def run_sql(self, sql: str): + try: + data = str.encode(sql, GlobalConfig.default_encoding) + self.__socket.sendall(data) + self.__socket.sendall(b'\0') + logging.debug("send command to server(size=%d) '%s'", len(data) + 1, sql) + result = self.__recv_response() + logging.debug("receive result from server '%s'", result) + return True, result + except Exception as ex: + logging.error("Failed to send message to server: '%s'", str(ex)) + return False, None + + def close(self): + if self.__socket is not None: + self.__socket.close() + self.__socket = None + +class CommandRunner: + __default_client_name = "default" + __command_prefix = "--" + __comment_prefix = "#" + + def __init__(self, result_writer: ResultWriter, server_port: int, unix_socket: str): + self.__result_writer = result_writer + self.__clients = {} + + # create default client + default_client = MiniObClient(server_port, unix_socket) + if not( default_client.is_valid()): + self.__is_valid = False + else: + self.__is_valid = True + self.__clients[self.__default_client_name] = default_client + + self.__current_client = default_client + self.__server_port = server_port + self.__unix_socket = unix_socket + + def is_valid(self): + return self.__is_valid + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + def close(self): + for client in self.__clients.values(): + client.close() + self.__clients.clear() + self.__current_client = None + + def run_connection(self, name: str): + ''' + 切换当前连接 + ''' + + client = self.__clients[name] + if client == None: + logging.error("No such client named %s", name) + return False + + self.__current_client = client + return True + + def run_connect(self, name: str): + ''' + 创建一个连接。每个连接有一个名字,可以使用使用connection name来切换当前的连接 + ''' + name = name.strip() + if len(name) == 0: + logging.error("Found empty client name") + return False + + client = self.__clients[name] + if client != None: + logging.error("Client with name %s already exists", name) + return False + + client = MiniObClient(self.__server_port, self.__unix_socket) + if not(client.is_valid()): + logging.error("Failed to create client with name: %s", name) + return False + + self.__clients[name] = client + return True + + def run_echo(self, arg: str): + ''' + echo 命令。参数可以是#开头的注释,这里不关心 + ''' + + self.__result_writer.write_line(arg) + return True + + def run_sql(self, sql): + self.__result_writer.write_line(sql) + result, data = self.__current_client.run_sql(sql) + if result is False: + return False + self.__result_writer.write(data) + return True + + def run_sort(self, sql): + self.__result_writer.write_line(sql) + result, data = self.__current_client.run_sql(sql) + if result is False: + return False + data_l = data.strip().split('\n') + data_l.sort() + data = '\n'.join(data_l) + '\n' + self.__result_writer.write(data) + return result + + def run_command(self, command_line: str): + ''' + 执行一条命令。命令的参数使用空格分开, 第一个字符串是命令类型 + ''' + command_line = command_line[len(self.__command_prefix) : ] + command_line = command_line.lstrip() + args = command_line.split(' ', 1) + command = args[0] + + command_arg = '' + if len(args) > 1: + command_arg = args[1] + + result = True + if 'echo' == command: + result = self.run_echo(command_arg) + elif 'connect' == command: + result = self.run_connect(command_arg) + elif 'connection' == command: + result = self.run_connection(command_arg) + elif 'sort' == command: + result = self.run_sort(command_arg) + else: + logging.error("No such command %s", command) + result = False + + return result + + def run_anything(self, argline: str): + argline = argline.strip() + if len(argline) == 0: + self.__result_writer.write_line('') # 读取到一个空行,也写入一个空行 + return True + + if argline.startswith(self.__comment_prefix): + return True + + if argline.startswith(self.__command_prefix): + return self.run_command(argline) + + return self.run_sql(argline) + +class TestCase: + + def __init__(self, is_necessary: bool, score: int): + self.__name = '' + self.__necessary = is_necessary + self.__score = score + self.__lines = [] + + def init_with_file(self, name, filename): + self.__name = name + with open(filename, mode='r') as f: + self.__lines = f.readlines() + return True + + def init_with_content(self, name, lines): + self.__name = name + self.__lines = lines + return True + + def command_lines(self): + return self.__lines + + def get_name(self): + return self.__name + + def is_necessary(self): + return self.__necessary + + def get_score(self): + return self.__score + + def result_file(self, base_dir): + subdir = '' + #if self.__necessary: + # subdir = self.NECESSARY_DIR + #else: + # subdir = self.OPTION_DIR + return base_dir + "/" + subdir + "/" + self.__name + ".result" + + def tmp_result_file(self, base_dir): + result_file = self.result_file(base_dir) + return result_file + '.tmp' + +class TestCaseLister: + + def __init__(self, suffix = None): + if suffix != None: + self.__suffix = suffix + else: + self.__suffix = ".test" + + def list_by_test_score_file(self, test_scores, test_case_file_dir: str): + ''' + 从test-score文件中加载所有测试用例 + ''' + test_cases = [] + test_score_infos = test_scores.get_all() + for case_name, test_score in test_score_infos.items(): + test_case = TestCase(test_score.is_necessary(), test_score.score()) + test_case_file = test_case_file_dir + '/' + case_name + self.__suffix + test_case.init_with_file(case_name, test_case_file) + test_cases.append(test_case) + + return test_cases + + def list_directory(self, base_dir : str, is_necessary: bool): + test_case_files = [] + + is_dir = os.path.isdir(base_dir) + if False == is_dir: + raise(Exception("Failed to list directory while getting test cases. " + base_dir + " is not a directory")) + + files = os.listdir(base_dir) + for filename in files: + logging.debug("find file %s", filename) + if filename.startswith('.'): + continue + + full_path = base_dir + "/" + filename + is_file = os.path.isfile(full_path) + if False == is_file: + continue + if filename.endswith(self.__suffix): + test_case_files.append(filename) + + test_cases = [] + for test_case_file in test_case_files: + full_path = base_dir + "/" + test_case_file + test_case_name = test_case_file[0 : -len(self.__suffix)] + test_case = TestCase(is_necessary, 0) + test_case.init_with_file(test_case_name, full_path) + test_cases.append(test_case) + logging.debug("got a test case file %s", str(test_case_file)) + + return test_cases + + def list_all(self, base_dir, test_names): + is_dir = os.path.isdir(base_dir) + if False == is_dir: + raise("Failed to list all test cases. " + base_dir + " is not a directory") + + test_cases = [] + for test_name in test_names: + full_path = base_dir + "/" + test_name + self.__suffix + if not(os.path.isfile(full_path)): + raise(Exception(full_path + " is not a file")) + + test_case = TestCase(False, 0) + test_case.init_with_file(test_name, full_path) + test_cases.append(test_case) + logging.debug("got a test case %s", test_case) + + return test_cases + +class TestScore: + def __init__(self, is_necessary: bool, score: int): + self.__necessary = is_necessary + self.__score = score + + def is_necessary(self): + return self.__necessary + def score(self): + return self.__score + +class TestScores: + def __init__(self): + self.__scores = {} + self.__is_valid = False + + def is_valid(self): + return self.__is_valid + + def init_file(self, fp): + score_infos = json.load(fp) + self.__init(score_infos) + + def init_content(self, content: str): + score_infos = json.loads(content) + self.__init(score_infos) + + def __init(self, score_info_dict: dict): + scores = {} + for name, score_info in score_info_dict.items(): + scores[name] = TestScore(score_info['necessary'], score_info['score']) + + self.__scores = scores + self.__is_valid = True + + def is_necessary(self, name): + if name in self.__scores.keys(): + return self.__scores[name].is_necessary() + + return None + + def acquire_score(self, name): + if name in self.__scores.keys(): + return self.__scores[name].score() + + return None + + def get_all(self): + return self.__scores + +class EvalResult: + def __init__(self): + self.__message = [] + self.__necessary_score = 0 + self.__option_score = 0 + self.__status = -1 + + def clear_message(self): + self.__message = [] + + def append_message(self, message): + self.__message.append(message) + + def get_message(self): + return "\n".join(self.__message) + + def add_necessary_score(self, score: int): + self.__necessary_score += score + + def add_option_score(self, score: int): + self.__option_score += score + + def clear_option_score(self): + self.__option_score = 0 + + def clear_score(self): + self.__option_score = 0 + self.__necessary_score = 0 + + def get_score(self): + return self.__necessary_score + self.__option_score + + def set_cost(self): + self.__status = 0 + + def set_no_cost(self): + self.__status = -1 + + def get_status(self): + return self.__status + + def is_success(self): + return self.__status == 0 + + def to_json_string(self): + json_dict = {} + json_dict['score'] = self.get_score() + json_dict['message'] = self.get_message() + + json_encoder = json.encoder.JSONEncoder() + json_encoder.item_separator = ',' + json_encoder.key_separator = ':' + return json_encoder.encode(json_dict) + +class TestSuite: + + def __init__(self): + self.__report_only = False # 本次测试为了获取测试结果,不是为了校验结果 + self.__test_case_base_dir = "./test" + self.__test_result_base_dir = "./result" + self.__test_result_tmp_dir = "./result/tmp" # 生成的结果存放的临时目录 + self.__db_server_base_dir = None + self.__db_data_dir = None + self.__db_config = None + self.__server_port = 0 + self.__use_unix_socket = False # 如果指定unix socket,那么就不再使用TCP连接 + self.__need_start_server = True + self.__test_names = None # 如果指定测试哪些Case,就不再遍历所有的cases + self.__miniob_server = None + self.__test_case_scores = TestScores() + + def set_test_names(self, tests): + self.__test_names = tests + + def set_test_case_base_dir(self, test_case_base_dir): + self.__test_case_base_dir = test_case_base_dir + + def set_test_result_base_dir(self, test_result_base_dir): + self.__test_result_base_dir = test_result_base_dir + + def set_test_result_tmp_dir(self, test_result_tmp_dir: str): + self.__test_result_tmp_dir = test_result_tmp_dir + os.makedirs(test_result_tmp_dir, exist_ok=True) + if not(os.path.isdir(test_result_tmp_dir)): + raise(Exception("Failed to set test result temp directory. " + test_result_tmp_dir + " is not a directory or failed to create")) + + def set_test_case_scores(self, scores_path: str): + with open(scores_path) as fp: + self.__test_case_scores.init_file(fp) + + def set_db_server_base_dir(self, db_server_base_dir): + self.__db_server_base_dir = db_server_base_dir + + def set_db_data_dir(self, db_data_dir): + self.__db_data_dir = db_data_dir + + def set_db_config(self, db_config): + self.__db_config = db_config + + def set_server_port(self, server_port): + self.__server_port = server_port + + def set_use_unix_socket(self, use_unix_socket: bool): + self.__use_unix_socket = use_unix_socket + + def donot_need_start_server(self): + self.__need_start_server = False + + def set_report_only(self, report_only): + self.__report_only = report_only + + def __compare_files(self, file1, file2): + with open(file1, 'r') as f1, open(file2, 'r') as f2: + lines1 = f1.readlines() + lines2 = f2.readlines() + if len(lines1) != len(lines2): + return False + + line_num = len(lines1) + for i in range(line_num): + if lines1[i].upper() != lines2[i].upper(): + logging.info('file1=%s, file2=%s, line1=%s, line2=%s', file1, file2, lines1[i], lines2[i]) + return False + return True + + def run_case(self, test_case, timeout=20): + # eventlet.monkey_patch() + #@timeout_decorator.timeout(timeout) + #def decorator(): + try: + #with eventlet.Timeout(timeout): + ret = self.__run_case(test_case) + if ret: + return Result.true + else: + return Result.false + except TimeoutException as ex: + return Result.timeout + + # try: + # ret = decorator() + # if ret: + # return Result.true + # return Result.false + # except TimeoutError: + # return Result.timeout + + def __run_case(self, test_case: TestCase): + result_tmp_file_name = test_case.tmp_result_file(self.__test_result_tmp_dir) + + unix_socket = '' + if self.__use_unix_socket: + unix_socket = self.__get_unix_socket_address() + + with open(result_tmp_file_name, mode='wb') as result_file: + result_writer = ResultWriter(result_file) + + with CommandRunner(result_writer, self.__server_port, unix_socket) as command_runner: + if command_runner.is_valid() == False: + return False + + for command_line in test_case.command_lines(): + result = command_runner.run_anything(command_line) + if result is False: + logging.error("Failed to run command %s in case %s", command_line, test_case.get_name()) + return result + + result_file_name = test_case.result_file(self.__test_result_base_dir) + if self.__report_only: + os.rename(result_tmp_file_name, result_file_name) + return True + else: + result = self.__compare_files(result_tmp_file_name, result_file_name) + if not GlobalConfig.debug: + #os.remove(result_tmp_file_name) + pass + return result + + def __get_unix_socket_address(self): + return self.__db_data_dir + '/miniob.sock' + + def __get_all_test_cases(self): + test_case_lister = TestCaseLister() + test_cases = [] + if self.__test_case_scores.is_valid(): + test_cases = test_case_lister.list_by_test_score_file(self.__test_case_scores, self.__test_case_base_dir) + else: + test_cases = test_case_lister.list_directory(self.__test_case_base_dir) + + if self.__test_names is None: # 没有指定测试哪个case + return test_cases + + # 指定了测试case,就从中捞出来 + # 找出指定了要测试某个case,但是没有发现 + test_case_result = [] + for case_name in self.__test_names: + found = False + for test_case in test_cases: + if test_case.get_name() == case_name: + test_case_result.append(test_case) + logging.debug("got case: " + case_name) + found = True + if found == False: + logging.error("No such test case with name '%s'" % case_name) + return [] + + return test_case_result + + def run(self, eval_result: EvalResult): + + # 找出所有需要测试Case + test_cases = self.__get_all_test_cases() + + if test_cases is None or len(test_cases) == 0: + logging.info("Cannot find any test cases") + return True + + logging.info("Starting observer server") + + # 测试每个Case + success_count = 0 + failure_count = 0 + timeout_count = 0 + necessary_all_passed = True + for test_case in test_cases: + try: + # 每个case都清理并重启一下服务端,这样可以方式某个case core之后,还能测试其它case + self.__clean_server_if_need() + + result = self.__start_server_if_need(True) + if result is False: + eval_result.append_message('Failed to start server.') + eval_result.set_no_cost() + return False + + logging.info(test_case.get_name() + " starting ...") + result = self.run_case(test_case) + + if result is Result.true: + logging.info("Case passed: %s", test_case.get_name()) + success_count += 1 + if test_case.is_necessary(): + eval_result.add_necessary_score(test_case.get_score()) + else: + eval_result.add_option_score(test_case.get_score()) + eval_result.append_message("%s is success" % test_case.get_name()) + else: + if self.__test_case_scores.is_necessary(test_case.get_name()): + necessary_all_passed = False + + if result is Result.false: + logging.info("Case failed: %s", test_case.get_name()) + failure_count += 1 + eval_result.append_message("%s is error" % test_case.get_name()) + else: + logging.info("Case timeout: %s", test_case.get_name()) + timeout_count += 1 + eval_result.append_message("%s is timeout" % test_case.get_name()) + except Exception as ex: + logging.error("Failed to run case %s", test_case.get_name()) + self.__clean_server_if_need() + raise ex + + logging.info("All done. %d passed, %d failed, %d timeout", success_count, failure_count, timeout_count) + logging.debug(eval_result.get_message()) + if necessary_all_passed is False: + eval_result.clear_option_score() + eval_result.set_cost() + self.__clean_server_if_need() + return True + + def __start_server_if_need(self, clean_data_dir: bool): + if self.__miniob_server is not None: + return True + + if self.__need_start_server: + unix_socket = '' + if self.__use_unix_socket: + unix_socket = self.__get_unix_socket_address() + + miniob_server = MiniObServer(self.__db_server_base_dir, self.__db_data_dir, + self.__db_config, self.__server_port, unix_socket, clean_data_dir) + miniob_server.init_server() + result = miniob_server.start_server() + if result is False: + logging.error("Failed to start db server") + miniob_server.stop_server() + miniob_server.clean() + return False + self.__miniob_server = miniob_server + + return True + + def __clean_server_if_need(self): + if self.__miniob_server is not None: + self.__miniob_server.stop_server() + # 不再清理掉中间结果。如果从解压代码开始,那么执行的中间结果不需要再清理,所有的数据都在临时目录 + # self.__miniob_server.clean() + self.__miniob_server = None + +def __init_options(): + options_parser = OptionParser() + # 是否仅仅生成结果,而不对结果做校验。一般在新生成一个case时使用 + options_parser.add_option('', '--report-only', action='store_true', dest='report_only', default=False, + help='just report the result') + # 测试case文件存放的目录 + options_parser.add_option('', '--test-case-dir', action='store', type='string', dest='test_case_base_dir', default='test', + help='the directory that contains the test files') + # 测试case文件存放的目录 + options_parser.add_option('', '--test-case-scores', action='store', type='string', dest='test_case_scores', default='score.json', + help='a json file that records score of the test cases') + # 测试结果文件存放目录 + options_parser.add_option('', '--test-result-dir', action='store', type='string', dest='test_result_base_dir', default='result', + help='the directory that contains the test result files') + # 生成的测试结果文件临时目录 + options_parser.add_option('', '--test-result-tmp-dir', action='store', type='string', dest='test_result_tmp_dir', default='result/tmp', + help='the directory that contains the generated test result files') + + # 测试哪些用例。不指定就会扫描test-case-dir目录下面的所有测试用例。指定的话,就从test-case-dir目录下面按照名字找 + options_parser.add_option('', '--test-cases', action='store', type='string', dest='test_cases', + help='test cases. If none, we will iterate the test case directory. Split with \',\' if more than one') + + # 测试时服务器程序基础路径,下面包含bin/observer执行主程序和etc/observer.ini配置文件 + options_parser.add_option('', '--db-base-dir', action='store', type='string', dest='db_base_dir', + help='the directory of miniob database which db-base-dir/bin contains the binary executor file') + + # 测试时服务器程序的数据文件存放目录 + options_parser.add_option('', '--db-data-dir', action='store', type='string', dest='db_data_dir', default='miniob_data_test', + help='the directory of miniob database\'s data for test') + + # 服务程序配置文件 + options_parser.add_option('', '--db-config', action='store', type='string', dest='db_config', + help='the configuration of db for test. default is base_dir/etc/observer.ini') + # 服务程序端口号,客户端也使用这个端口连接服务器。目前还不具备通过配置文件解析端口配置的能力 + options_parser.add_option('', '--server-port', action='store', type='int', dest='server_port', default=6789, + help='the server port. should be the same with the value in the config') + options_parser.add_option('', '--use-unix-socket', action='store_true', dest='use_unix_socket', + help='If true, server-port will be ignored and will use a random address socket.') + + # 可以手动启动服务端程序,然后添加这个选项,就不会再启动服务器程序。一般调试时使用 + options_parser.add_option('', '--server-started', action='store_true', dest='server_started', default=False, + help='Whether the server is already started. If true, we will not start the server') + + # 测试过程中生成的日志存放的文件。使用stdout/stderr输出到控制台 + options_parser.add_option('', '--log', action='store', type='string', dest='log_file', default='miniob-test.log', + help='log file. stdout=standard output and stderr=standard error') + # 是否启动调试模式。调试模式不会清理服务器的数据目录 + options_parser.add_option('-d', '--debug', action='store_true', dest='debug', default=False, + help='enable debug mode') + + # 测试时代码压缩文件的路径 + options_parser.add_option('', '--db-code-dir', action='store', type='string', dest='db_code_dir', + help='the directory of miniob\'s code') + # 测试时代码压缩文件的解压目录 + options_parser.add_option('', '--target-dir', action='store', type='string', dest='target_dir', + help='the working directory of miniob database') + # 解压的目录存在时,是否覆盖 + options_parser.add_option('', '--decompress-overwrite', action='store_true', dest='decompress_overwrite', default=False, + help='whether overwrite the decompress target path if exists') + # 是否需要解压和编译代码 + options_parser.add_option('', '--code-type', action='store', dest='code_type', default='compress', + help='compress/git/none. Compress: decompress the code and compile. git: git clone and compile. none: do nothing') + options_parser.add_option('', '--compile-make-args', action='store', type='string', dest='compile_make_args', default='', + help='compile args used by make') + options_parser.add_option('', '--compile-cmake-args', action='store', type='string', dest='compile_cmake_args', default='', + help='compile args used by cmake') + # 之前已经编译过,是否需要重新编译,还是直接执行make就可以了 + options_parser.add_option('', '--compile-rebuild', action='store_true', default=False, dest='compile_rebuild', + help='whether rebuild if build path exists') + options_parser.add_option('', '--git-repo', action='store', dest='git_repo', + help='the git repo in https') + options_parser.add_option('', '--git-branch', action='store', dest='git_branch', default='', + help='the git repo branch') + options_parser.add_option('', '--git-repo-prefix', action='store', dest='git_repo_prefix', default='https://github.com', + help='the git repo prefix in https') + options_parser.add_option('', '--git-user', action='store', dest='git_user', default='', + help='git user name to download source code') + options_parser.add_option('', '--git-token', action='store', dest='git_token', default='', + help='git token to download source code') + + options, args = options_parser.parse_args(sys.argv[1:]) + return options + +def __init_log(options): + log_level = logging.INFO + if options.debug: + log_level = logging.DEBUG + GlobalConfig.debug = True + + GlobalConfig.debug = True + log_stream = None + if 'stdout' == options.log_file: + log_stream = sys.stdout + elif 'stderr' == options.log_file: + log_stream = sys.stderr + else: + log_file_dir = os.path.dirname(options.log_file) + os.makedirs(log_file_dir, exist_ok=True) + + log_format = "%(asctime)s - %(levelname)-5s %(name)s %(lineno)s - %(message)s" + log_date_format = "%Y-%m-%d %H:%M:%S" + + if log_stream is None: + logging.basicConfig(level=log_level, filename=options.log_file, format=log_format, datefmt=log_date_format) + else: + logging.basicConfig(level=log_level, stream=log_stream, format=log_format, datefmt=log_date_format) + +def __init_test_suite(options): + test_suite = TestSuite() + test_suite.set_test_case_base_dir(os.path.abspath(options.test_case_base_dir)) + test_suite.set_test_case_scores(os.path.abspath(options.test_case_scores)) + test_suite.set_test_result_base_dir(os.path.abspath(options.test_result_base_dir)) + test_suite.set_test_result_tmp_dir(os.path.abspath(options.test_result_tmp_dir)) + + if options.db_base_dir is not None: + test_suite.set_db_server_base_dir(os.path.abspath(options.db_base_dir)) + if options.db_data_dir is not None: + test_suite.set_db_data_dir(os.path.abspath(options.db_data_dir)) + + test_suite.set_server_port(options.server_port) + test_suite.set_use_unix_socket(options.use_unix_socket) + + if options.server_started: + test_suite.donot_need_start_server() + + if options.db_config is not None: + test_suite.set_db_config(os.path.abspath(options.db_config)) + + if options.test_cases is not None: + test_suite.set_test_names(options.test_cases.split(',')) + + if options.report_only: + test_suite.set_report_only(True) + + return test_suite + +def __init_test_suite_with_source_code(options, eval_result): + os.makedirs(options.target_dir, exist_ok=True) + target_path = os.path.abspath(options.target_dir) + proj_path = __get_project_path(target_path) + build_path = __get_build_path(target_path) + + if options.code_type == 'compress': + code_path = os.path.abspath(options.db_code_dir) + if not unzip(code_path, target_path, options.decompress_overwrite): + message = "decompress the code failed" + logging.error(message) + raise Exception(message) + else: + logging.info("decompress source code done") + elif options.code_type == 'git': + result = git_clone(options.git_repo, options.git_branch, options.git_repo_prefix, + options.git_user, options.git_token, proj_path, 10, eval_result) + if not result: + return None + + if not compile(proj_path, build_path, options.compile_cmake_args, options.compile_make_args, options.compile_rebuild, eval_result): + message = "Failed to compile source code" + logging.error(message) + return None + + logging.info("compile source code done") + + # 覆盖一些测试的路径 + logging.info("some config will be override if exists") + test_suite = __init_test_suite(options) + test_suite.set_db_data_dir(__get_data_path(target_path)) + test_suite.set_db_server_base_dir(__get_build_path(target_path)) + test_suite.set_db_config(proj_path + '/etc/observer.ini') + return test_suite + +def __run_shell_command(command_args): + ''' + 运行shell命令,返回命令的执行结果码和输出到控制台的信息 + 返回的控制台信息是每行一个字符串的字符串列表 + ''' + + logging.info("running command: '%s'", ' '.join(command_args)) + + outputs = [] + command_process = subprocess.Popen(command_args, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) + while True: + line = command_process.stderr.readline() + line_str = line.decode(GlobalConfig.default_encoding) + if isinstance(line_str, str): + outputs.append(line_str.strip()) + + return_code = command_process.poll() + if return_code is not None: + return return_code, outputs + +def git_pull(to_path: str, timeout:int, eval_result:EvalResult): + logging.info('running git pull in %s and will wait %d seconds', to_path, timeout) + command_args = ['git', 'pull'] + process = subprocess.Popen(command_args, cwd=to_path) + try: + return_code = process.wait(timeout=timeout) + if return_code != 0: + logging.error("Failed to pull source code from repo. return code=%d", return_code) + return False + logging.info("pull source code success") + return True + except Exception as ex: + process.kill() + logging.error("Failed to pull source code from repo. exception=%s", str(ex)) + return False + return True + +def git_clone(repo: str, branch: str, repo_prefix: str, + user_name: str, password: str, + to_path: str, timeout:int, eval_result: EvalResult): + ''' + 从指定仓库拉取代码。 + to_path: 拉取的代码放的目录。比如 test-tmp/ob_rookie/miniob + ''' + if os.path.exists(to_path): + # 目标目录已经存在,可以尝试直接执行git pull + result = git_pull(to_path, timeout, eval_result) + if result: # 如果拉取失败,就尝试重新clone + return True + + # 清理原有目录,再重新拉取 + logging.info("Failed to pull source code. clean the directory and clone it. path=%s", to_path) + shutil.rmtree(to_path) + + if not repo.startswith(repo_prefix): + error = 'git repo must be starts with ' + repo_prefix + ', but got ' + repo + logging.error(error) + eval_result.append_message(error) + return False + + if user_name or password: + target_repo = repo.replace(repo_prefix, 'https://' + user_name + ':' + password + '@github.com/') + target_repo_in_log = target_repo.replace(password, '****') + else: + target_repo = repo + target_repo_in_log = target_repo + + logging.info('git clone from %s', target_repo_in_log) + command_args = ['git', 'clone', target_repo, to_path] + if len(branch) != 0: + command_args.append('-b') + command_args.append(branch) + process = subprocess.Popen(command_args) + try: + return_code = process.wait(timeout=timeout) + if return_code != 0: + error = 'Failed to clone repo from ' + target_repo_in_log + ', return code =' + str(return_code) + logging.error(error) + eval_result.append_message(error) + return False + except Exception as ex: + process.kill() + error = 'failed to clone repo from ' + target_repo_in_log + '. exception=' + str(ex) + logging.error(error) + eval_result.append_message(error) + return False + return True + +def unzip(source_dir: str, target_dir: str, overwrite: bool): + if not os.path.exists(source_dir): + logging.error('The source_dir %s doesn\'t exist, please provide a vaild source path.', source_dir) + return False + + if os.path.isdir(target_dir) and len(os.listdir(target_dir)) != 0: + if overwrite: + shutil.rmtree(target_dir) + logging.info("target directory will be cleaned: %s", target_dir) + else: + logging.error('target directory is not empty: %s', target_dir) + return False + + if not os.path.exists(target_dir): + logging.info("decompress target directory does not exists, try to create it") + os.makedirs(target_dir) + + ret, outputs = __run_shell_command(["unzip", "-q", "-d", target_dir, source_dir]) + if ret != 0: + logging.error("Failed to decompress the zip package. source_dir=%s, target_dir=%s", + source_dir, target_dir) + + for output in outputs: + logging.error(output) + return False + + logging.info("decompress the zip package success. source_dir=%s, target_dir=%s", + source_dir, target_dir) + return True + +def run_cmake(work_dir: str, build_path: str, cmake_args: str): + cmake_command = ["cmake", "-B", build_path, "--log-level=WARNING"] + if isinstance(cmake_args, str): + args = cmake_args.split(';') + for arg in args: + arg = arg.strip() + if len(arg) > 0: + cmake_command.append(arg) + cmake_command.append(work_dir) + + ret, outputs = __run_shell_command(cmake_command) + if ret != 0: + logging.error("Failed to run cmake command") + for output in outputs: + logging.error(output) + return False, outputs + return True, [] + +def compile(work_dir: str, build_dir: str, cmake_args: str, make_args: str, rebuild_all: bool, eval_result: EvalResult): + ''' + workd_dir是源代码所在目录(miniob目录) + build_dir 是编译结果的目录 + ''' + if not os.path.exists(work_dir): + logging.error('The work_dir %s doesn\'t exist, please provide a vaild work path.', work_dir) + return False + + #now_path = os.getcwd() + build_path = build_dir + if os.path.exists(build_path) and rebuild_all: + logging.info('build directory is not empty but will be cleaned before compile: %s', build_path) + shutil.rmtree(build_path) + + os.makedirs(build_path, exist_ok=True) + + logging.info("start compiling ... build path=%s", build_path) + ret, outputs = run_cmake(work_dir, build_path, cmake_args) + if ret == False: + # cmake 执行失败时,清空整个Build目录,再重新执行一次cmake命令 + shutil.rmtree(build_path) + os.makedirs(build_path, exist_ok=True) + ret, outputs = run_cmake(work_dir, build_path, cmake_args) + if ret == False: + for output in outputs: + logging.error(output) + eval_result.append_message(output) + return False + + make_command = ["make", "--silent", "-C", build_path] + if isinstance(make_args, str): + args = make_args.split(';') + for arg in args: + arg = arg.strip() + if len(arg) > 0: + make_command.append(arg) + + ret, outputs = __run_shell_command(make_command) + if ret != 0: + logging.error("Compile failed") + for output in outputs: + logging.error(output.strip()) + eval_result.append_message(output.strip()) + return False + + return True + +def run(options): + ''' + return result, reason + result: True or False + + ''' + __init_log(options) + + logging.info("miniob test starting ...") + + # 由于miniob-test测试程序导致的失败,才认为是失败 + # 比如解压代码失败,git clone超时,目录没有权限等,对miniob-test来说都是成功的 + # git clone由于权限原因失败、编译失败等,对miniob-test来说是成功的 + result = True + eval_result = EvalResult() + + try: + test_suite = None + if options.code_type == 'compress' or options.code_type == 'git': + test_suite = __init_test_suite_with_source_code(options, eval_result) + else: + test_suite = __init_test_suite(options) + + if test_suite != None: + result = test_suite.run(eval_result) + # result = True + except Exception as ex: + logging.exception(ex) + result = False + #eval_result.clear_message() + eval_result.append_message(str(ex.args)) + eval_result.set_no_cost() + eval_result.clear_score() + + return result, eval_result.to_json_string() + +if __name__ == '__main__': + os.setpgrp() + options = __init_options() + + result, evaluation = run(options) + + exit_code = 0 + if result is False: + exit_code = 1 + else: + logging.info(evaluation) + exit(exit_code) + diff --git a/test/case/result/basic.result b/test/case/result/basic.result new file mode 100644 index 0000000000000000000000000000000000000000..4adb3d44e48fabab70449c292fb1d57b6d850eb6 --- /dev/null +++ b/test/case/result/basic.result @@ -0,0 +1,102 @@ +BASIC INSERT + +create table t_basic(id int, age int, name char, score float); +SUCCESS +insert into t_basic values(1,1, 'a', 1.0); +SUCCESS +insert into t_basic values(2,2, 'b', 2.0); +SUCCESS +insert into t_basic values(4,4, 'c', 3.0); +SUCCESS +insert into t_basic values(3,3, 'd', 4.0); +SUCCESS +insert into t_basic values(5,5, 'e', 5.5); +SUCCESS +insert into t_basic values(6,6, 'f', 6.6); +SUCCESS +insert into t_basic values(7,7, 'g', 7.7); +SUCCESS + +select * from t_basic; +1 | 1 | A | 1 +2 | 2 | B | 2 +3 | 3 | D | 4 +4 | 4 | C | 3 +5 | 5 | E | 5.5 +6 | 6 | F | 6.6 +7 | 7 | G | 7.7 +ID | AGE | NAME | SCORE + +BASIC DELETE +delete from t_basic where id=3; +SUCCESS +select * from t_basic; +1 | 1 | A | 1 +2 | 2 | B | 2 +4 | 4 | C | 3 +5 | 5 | E | 5.5 +6 | 6 | F | 6.6 +7 | 7 | G | 7.7 +ID | AGE | NAME | SCORE + +BASIC SELECT +select * from t_basic where id=1; +ID | AGE | NAME | SCORE +1 | 1 | A | 1 + +select * from t_basic where id>=5; +5 | 5 | E | 5.5 +6 | 6 | F | 6.6 +7 | 7 | G | 7.7 +ID | AGE | NAME | SCORE + +select * from t_basic where age>1 and age<3; +ID | AGE | NAME | SCORE +2 | 2 | B | 2 + +select * from t_basic where t_basic.id=1 and t_basic.age=1; +ID | AGE | NAME | SCORE +1 | 1 | A | 1 + +select * from t_basic where id=1 and age=1; +ID | AGE | NAME | SCORE +1 | 1 | A | 1 + +select id, age, name, score from t_basic; +1 | 1 | A | 1 +2 | 2 | B | 2 +4 | 4 | C | 3 +5 | 5 | E | 5.5 +6 | 6 | F | 6.6 +7 | 7 | G | 7.7 +ID | AGE | NAME | SCORE + +select t_basic.id, t_basic.age, t_basic.name, t_basic.score from t_basic; +1 | 1 | A | 1 +2 | 2 | B | 2 +4 | 4 | C | 3 +5 | 5 | E | 5.5 +6 | 6 | F | 6.6 +7 | 7 | G | 7.7 +ID | AGE | NAME | SCORE + +select t_basic.id, t_basic.age, name from t_basic; +1 | 1 | A +2 | 2 | B +4 | 4 | C +5 | 5 | E +6 | 6 | F +7 | 7 | G +ID | AGE | NAME + +CREATE INDEX +create index i_id on t_basic (id); +SUCCESS +select * from t_basic; +1 | 1 | A | 1 +2 | 2 | B | 2 +4 | 4 | C | 3 +5 | 5 | E | 5.5 +6 | 6 | F | 6.6 +7 | 7 | G | 7.7 +ID | AGE | NAME | SCORE diff --git a/test/case/result/primary-aggregation-func.result b/test/case/result/primary-aggregation-func.result new file mode 100644 index 0000000000000000000000000000000000000000..c4f152051ec82f0e485e58b87feae666976b8b6e --- /dev/null +++ b/test/case/result/primary-aggregation-func.result @@ -0,0 +1,99 @@ +INITIALIZATION +CREATE TABLE aggregation_func(id int, num int, price float, addr char, birthday date); +SUCCESS + +INSERT INTO aggregation_func VALUES (1, 18, 10.0, 'abc', '2020-01-01'); +SUCCESS +INSERT INTO aggregation_func VALUES (2, 15, 20.0, 'abc', '2010-01-11'); +SUCCESS +INSERT INTO aggregation_func VALUES (3, 12, 30.0, 'def', '2021-01-21'); +SUCCESS +INSERT INTO aggregation_func VALUES (4, 15, 30.0, 'dei', '2021-01-31'); +SUCCESS + +1. COUNT +SELECT count(*) FROM aggregation_func; +COUNT(*) +4 + +SELECT count(num) FROM aggregation_func; +COUNT(NUM) +4 + +2. MIN +SELECT min(num) FROM aggregation_func; +MIN(NUM) +12 + +SELECT min(price) FROM aggregation_func; +MIN(PRICE) +10 + +SELECT min(addr) FROM aggregation_func; +MIN(ADDR) +ABC + +3. MAX +SELECT max(num) FROM aggregation_func; +MAX(NUM) +18 + +SELECT max(price) FROM aggregation_func; +MAX(PRICE) +30 + +SELECT max(addr) FROM aggregation_func; +MAX(ADDR) +DEI + +4. AVG +SELECT avg(num) FROM aggregation_func; +AVG(NUM) +15 + +SELECT avg(price) FROM aggregation_func; +AVG(PRICE) +22.5 + +5. ERROR WITH * +SELECT min(*) FROM aggregation_func; +FAILURE +SELECT max(*) FROM aggregation_func; +FAILURE +SELECT avg(*) FROM aggregation_func; +FAILURE + +6. ERROR WITH REDUNDANT COLUMNS +SELECT count(*,num) FROM aggregation_func; +FAILURE +SELECT min(num,price) FROM aggregation_func; +FAILURE +SELECT max(num,price) FROM aggregation_func; +FAILURE +SELECT avg(num,price) FROM aggregation_func; +FAILURE + +7. ERROR WITH EMPTY COLUMNS +SELECT count() FROM aggregation_func; +FAILURE +SELECT min() FROM aggregation_func; +FAILURE +SELECT max() FROM aggregation_func; +FAILURE +SELECT avg() FROM aggregation_func; +FAILURE + +8. ERROR WITH NON-EXISTENT COLUMNS +SELECT count(id2) FROM aggregation_func; +FAILURE +SELECT min(id2) FROM aggregation_func; +FAILURE +SELECT max(id2) FROM aggregation_func; +FAILURE +SELECT avg(id2) FROM aggregation_func; +FAILURE + +9. SELECT MANY AGGREGATION +SELECT min(num),max(num),avg(num) FROM aggregation_func; +MIN(NUM) | MAX(NUM) | AVG(NUM) +12 | 18 | 15 diff --git a/test/case/result/primary-complex-sub-query.result b/test/case/result/primary-complex-sub-query.result new file mode 100644 index 0000000000000000000000000000000000000000..d499849bac5a247ff9680f63086468f1c59854f9 --- /dev/null +++ b/test/case/result/primary-complex-sub-query.result @@ -0,0 +1,113 @@ +INITIALIZATION +CREATE TABLE csq_1(id int, col1 int, feat1 float); +SUCCESS +CREATE TABLE csq_2(id int, col2 int, feat2 float); +SUCCESS +CREATE TABLE csq_3(id int, col3 int, feat3 float); +SUCCESS +CREATE TABLE csq_4(id int, col4 int, feat4 float); +SUCCESS + +INSERT INTO csq_1 VALUES (1, 4, 11.2); +SUCCESS +INSERT INTO csq_1 VALUES (2, 2, 12.0); +SUCCESS +INSERT INTO csq_1 VALUES (3, 3, 13.5); +SUCCESS +INSERT INTO csq_2 VALUES (1, 2, 13.0); +SUCCESS +INSERT INTO csq_2 VALUES (2, 7, 10.5); +SUCCESS +INSERT INTO csq_2 VALUES (5, 3, 12.6); +SUCCESS +INSERT INTO csq_3 VALUES (1, 2, 11.0); +SUCCESS +INSERT INTO csq_3 VALUES (3, 6, 16.5); +SUCCESS +INSERT INTO csq_3 VALUES (5, 5, 14.6); +SUCCESS + +1. SELECT +select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id in (select csq_3.id from csq_3)); +1 | 4 | 11.2 +ID | COL1 | FEAT1 + +select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id not in (select csq_3.id from csq_3)); +2 | 2 | 12 +ID | COL1 | FEAT1 + +select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id not in (select csq_3.id from csq_3)); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id in (select csq_3.id from csq_3)); +1 | 4 | 11.2 +ID | COL1 | FEAT1 + +select * from csq_1 where col1 > (select avg(csq_2.col2) from csq_2 where csq_2.feat2 >= (select min(csq_3.feat3) from csq_3)); +1 | 4 | 11.2 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +select * from csq_1 where (select avg(csq_2.col2) from csq_2 where csq_2.feat2 > (select min(csq_3.feat3) from csq_3)) = col1; +ID | COL1 | FEAT1 + +select * from csq_1 where (select avg(csq_2.col2) from csq_2) <> (select avg(csq_3.col3) from csq_3); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +select * from csq_1 where feat1 > (select min(csq_2.feat2) from csq_2) and col1 <= (select min(csq_3.col3) from csq_3); +2 | 2 | 12 +ID | COL1 | FEAT1 + +select * from csq_1 where (select max(csq_2.feat2) from csq_2) > feat1 and col1 > (select min(csq_3.col3) from csq_3); +1 | 4 | 11.2 +ID | COL1 | FEAT1 + +select * from csq_1 where (select max(csq_2.feat2) from csq_2) > feat1 and (select min(csq_3.col3) from csq_3) < col1; +1 | 4 | 11.2 +ID | COL1 | FEAT1 + +select * from csq_1 where feat1 <> (select avg(csq_2.feat2) from csq_2 where csq_2.feat2 > csq_1.feat1); +1 | 4 | 11.2 +2 | 2 | 12 +ID | COL1 | FEAT1 + +select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id in (select csq_3.id from csq_3 where csq_1.id = csq_3.id)); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +2. SELECT WITH EMPTY TABLE +select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id in (select csq_3.id from csq_3 where 1=0)); +ID | COL1 | FEAT1 +select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id in (select csq_3.id from csq_3 where 1=0) and 1=0); +ID | COL1 | FEAT1 +select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id not in (select csq_3.id from csq_3 where 1=0)); +1 | 4 | 11.2 +ID | COL1 | FEAT1 +select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id not in (select csq_3.id from csq_3) and 1=0); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 +select * from csq_3 where feat3 < (select max(csq_2.feat2) from csq_2 where csq_2.id not in (select csq_3.id from csq_3 where 1=0)); +1 | 2 | 11 +ID | COL3 | FEAT3 +select * from csq_3 where feat3 < (select max(csq_2.feat2) from csq_2 where csq_2.id not in (select csq_3.id from csq_3 ) and 1=0); +ID | COL3 | FEAT3 + +3. ERROR +select * from csq_1 where col1 = (select csq_2.col2 from csq_2); +FAILURE +select * from csq_1 where col1 = (select * from csq_2); +FAILURE +select * from csq_1 where col1 in (select * from csq_2); +FAILURE +select * from csq_1 where col1 not in (select * from csq_2); +FAILURE diff --git a/test/case/result/primary-date.result b/test/case/result/primary-date.result new file mode 100644 index 0000000000000000000000000000000000000000..11e7408c3ff8734546339212b9f904a10991d9c6 --- /dev/null +++ b/test/case/result/primary-date.result @@ -0,0 +1,69 @@ +INITIALIZATION +CREATE TABLE date_table(id int, u_date date); +SUCCESS +CREATE INDEX index_id on date_table(u_date); +SUCCESS + +1. INSERT NORMAL DATE DATA +INSERT INTO date_table VALUES (1,'2020-01-21'); +SUCCESS +INSERT INTO date_table VALUES (2,'2020-10-21'); +SUCCESS +INSERT INTO date_table VALUES (3,'2020-1-01'); +SUCCESS +INSERT INTO date_table VALUES (4,'2020-01-1'); +SUCCESS +INSERT INTO date_table VALUES (5,'2019-12-21'); +SUCCESS +INSERT INTO date_table VALUES (6,'2016-2-29'); +SUCCESS +INSERT INTO date_table VALUES (7,'1970-1-1'); +SUCCESS +INSERT INTO date_table VALUES (8,'2000-01-01'); +SUCCESS +INSERT INTO date_table VALUES (9,'2038-1-19'); +SUCCESS + +2. COMPARE DATE DATA +SELECT * FROM date_table WHERE u_date>'2020-1-20'; +1 | 2020-01-21 +2 | 2020-10-21 +9 | 2038-01-19 +ID | U_DATE +SELECT * FROM date_table WHERE u_date<'2019-12-31'; +5 | 2019-12-21 +6 | 2016-02-29 +7 | 1970-01-01 +8 | 2000-01-01 +ID | U_DATE +SELECT * FROM date_table WHERE u_date='2020-1-1'; +3 | 2020-01-01 +4 | 2020-01-01 +ID | U_DATE + +3. DELETE DATA +DELETE FROM date_table WHERE u_date>'2012-2-29'; +SUCCESS +SELECT * FROM date_table; +7 | 1970-01-01 +8 | 2000-01-01 +ID | U_DATE + +4. CHECK INVALID DATE DATA +SELECT * FROM date_table WHERE u_date='2017-2-29'; +FAILURE +SELECT * FROM date_table WHERE u_date='2017-21-29'; +FAILURE +SELECT * FROM date_table WHERE u_date='2017-12-32'; +FAILURE +SELECT * FROM date_table WHERE u_date='2017-11-31'; +FAILURE + +INSERT INTO date_table VALUES (10,'2017-2-29'); +FAILURE +INSERT INTO date_table VALUES (11,'2017-21-29'); +FAILURE +INSERT INTO date_table VALUES (12,'2017-12-32'); +FAILURE +INSERT INTO date_table VALUES (13,'2017-11-31'); +FAILURE diff --git a/test/case/result/primary-drop-table.result b/test/case/result/primary-drop-table.result new file mode 100644 index 0000000000000000000000000000000000000000..39b2331ff0985770510e42304fed97ee7d67d8ae --- /dev/null +++ b/test/case/result/primary-drop-table.result @@ -0,0 +1,69 @@ +1. DROP EMPTY TABLE +CREATE TABLE Drop_table_1(id int, t_name char); +SUCCESS +DROP TABLE Drop_table_1; +SUCCESS + +2. DROP NON-EMPTY TABLE +CREATE TABLE Drop_table_2(id int, t_name char); +SUCCESS +INSERT INTO Drop_table_2 VALUES (1,'OB'); +SUCCESS +DROP TABLE Drop_table_2; +SUCCESS + +3. CHECK THE ACCURACY OF DROPPING TABLE +CREATE TABLE Drop_table_3(id int, t_name char); +SUCCESS +INSERT INTO Drop_table_3 VALUES (1,'OB'); +SUCCESS +SELECT * FROM Drop_table_3; +1 | OB +ID | T_NAME +DROP TABLE Drop_table_3; +SUCCESS +INSERT INTO Drop_table_3 VALUES (1,'OB'); +FAILURE +SELECT * FROM Drop_table_3; +FAILURE +DELETE FROM Drop_table_3 WHERE id = 3; +FAILURE +CREATE TABLE Drop_table_3(id int, t_name char); +SUCCESS +SELECT * FROM Drop_table_3; +ID | T_NAME + +4. DROP NON-EXISTENT TABLE +CREATE TABLE Drop_table_4(id int, t_name char); +SUCCESS +DROP TABLE Drop_table_4; +SUCCESS +DROP TABLE Drop_table_4; +FAILURE +DROP TABLE Drop_table_4_1; +FAILURE + +5. CREATE A TABLE WHICH HAS DROPPED +CREATE TABLE Drop_table_5(id int, t_name char); +SUCCESS +DROP TABLE Drop_table_5; +SUCCESS +CREATE TABLE Drop_table_5(id int, t_name char); +SUCCESS +SELECT * FROM Drop_table_5; +ID | T_NAME + +6. DROP A TABLE WITH INDEX +CREATE TABLE Drop_table_6(id int, t_name char); +SUCCESS +CREATE INDEX index_id on Drop_table_6(id); +SUCCESS +INSERT INTO Drop_table_6 VALUES (1,'OB'); +SUCCESS +SELECT * FROM Drop_table_6; +1 | OB +ID | T_NAME +DROP TABLE Drop_table_6; +SUCCESS +SELECT * FROM Drop_table_6; +FAILURE diff --git a/test/case/result/primary-expression.result b/test/case/result/primary-expression.result new file mode 100644 index 0000000000000000000000000000000000000000..4e683c036862ca763893d9951a54d6ac72f11544 --- /dev/null +++ b/test/case/result/primary-expression.result @@ -0,0 +1,77 @@ +INITIALIZATION +create table exp_table(id int, col1 int, col2 int, col3 float, col4 float); +SUCCESS +insert into exp_table VALUES (1, 1, 1, 1.0, 1.5); +SUCCESS +insert into exp_table VALUES (2, 2, -2, 5.5, 1.0); +SUCCESS +insert into exp_table VALUES (3, 3, 4, 5.0, 4.0); +SUCCESS + +1. SELECT +select * from exp_table where 1 = 5/4; +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where col1-2 > 0; +3 | 3 | 4 | 5 | 4 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where 2+col2 < 1; +2 | 2 | -2 | 5.5 | 1 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where col1*col2 < 0; +2 | 2 | -2 | 5.5 | 1 +ID | COL1 | COL2 | COL3 | COL4 + +select * from exp_table where 5/4 = 1; +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where 0 < col1-2; +3 | 3 | 4 | 5 | 4 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where 1.0 > 2+col2; +2 | 2 | -2 | 5.5 | 1 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where -0 < col1-col2; +2 | 2 | -2 | 5.5 | 1 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where 0 < -2+col1; +3 | 3 | 4 | 5 | 4 +ID | COL1 | COL2 | COL3 | COL4 + +select * from exp_table where 1+1 = 2*1.0; +1 | 1 | 1 | 1 | 1.5 +2 | 2 | -2 | 5.5 | 1 +3 | 3 | 4 | 5 | 4 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where 5/4*8 < 4+col2*col3/2; +3 | 3 | 4 | 5 | 4 +ID | COL1 | COL2 | COL3 | COL4 +select * from exp_table where 5/4*8 < (4+col2)*col3/2; +3 | 3 | 4 | 5 | 4 +ID | COL1 | COL2 | COL3 | COL4 + +select id,-(col2*(-1)+1)+(col4+2)*(col1+col3*2),(4+col2)*col3/2 from exp_table where -(col2*(-1)+1)+(col4+2)*(col1+col3*2) > (4+col2)*col3/2; +1 | 10.5 | 2.5 +2 | 36 | 5.5 +3 | 81 | 20 +ID | -(COL2*(-1)+1)+(COL4+2)*(COL1+COL3*2) | (4+COL2)*COL3/2 +select id,col1,col2,col3,col4,6-(col2*(1+col1))+(col4+2)/(1+col1*4+col3*2) from exp_table where 6-(col2*(1+col1))+(col4+2)/(1+col1*4+col3*2) > 5; +2 | 2 | -2 | 5.5 | 1 | 12.15 +ID | COL1 | COL2 | COL3 | COL4 | 6-(COL2*(1+COL1))+(COL4+2)/(1+COL1*4+COL3*2) +select id,col1,col2,col3,col4,3*col1/(col2+2) from exp_table where 3*col1/(col2+2) > 1; +3 | 3 | 4 | 5 | 4 | 1.5 +ID | COL1 | COL2 | COL3 | COL4 | 3*COL1/(COL2+2) +select id,3*col1/(col2+2) from exp_table where 3*col1/(col2+2)+1/0 > 1; +ID | 3*COL1/(COL2+2) +select * from exp_table where 1/0 = 1/0; +ID | COL1 | COL2 | COL3 | COL4 + +2. EXPRESSION ABOUT MANY TABLES +create table exp_table2(id int, col1 int); +SUCCESS +insert into exp_table2 VALUES (1, 1); +SUCCESS +insert into exp_table2 VALUES (2, 3); +SUCCESS +select exp_table.id,3*exp_table2.col1/(exp_table.col2+2) from exp_table,exp_table2 where 3*exp_table2.col1/(exp_table.col2+2)>1; +1 | 3 +3 | 1.5 +exp_table.ID | 3*EXP_TABLE2.COL1/(EXP_TABLE.COL2+2) diff --git a/test/case/result/primary-group-by.result b/test/case/result/primary-group-by.result new file mode 100644 index 0000000000000000000000000000000000000000..cec89e77dd3eb30f78131f26961909c20cc157e3 --- /dev/null +++ b/test/case/result/primary-group-by.result @@ -0,0 +1,81 @@ +1. CREATE TABLE +create table t_group_by (id int, score float, name char); +SUCCESS +create table t_group_by_2 (id int, age int); +SUCCESS + +2. INSERT RECORDS +insert into t_group_by values(3, 1.0, 'a'); +SUCCESS +insert into t_group_by values(1, 2.0, 'b'); +SUCCESS +insert into t_group_by values(4, 3.0, 'c'); +SUCCESS +insert into t_group_by values(3, 2.0, 'c'); +SUCCESS +insert into t_group_by values(3, 4.0, 'c'); +SUCCESS +insert into t_group_by values(3, 3.0, 'd'); +SUCCESS +insert into t_group_by values(3, 2.0, 'f'); +SUCCESS + +insert into t_group_by_2 values(1, 10); +SUCCESS +insert into t_group_by_2 values(2, 20); +SUCCESS +insert into t_group_by_2 values(3, 10); +SUCCESS +insert into t_group_by_2 values(3, 20); +SUCCESS +insert into t_group_by_2 values(3, 40); +SUCCESS +insert into t_group_by_2 values(4, 20); +SUCCESS + +3. PRIMARY GROUP BY +select id, avg(score) from t_group_by group by id; +1 | 2 +3 | 2.4 +4 | 3 +ID | AVG(SCORE) + +select name, min(id), max(score) from t_group_by group by name; +A | 3 | 1 +B | 1 | 2 +C | 3 | 4 +D | 3 | 3 +F | 3 | 2 +NAME | MIN(ID) | MAX(SCORE) + +select id, name, avg(score) from t_group_by group by id, name; +1 | B | 2 +3 | A | 1 +3 | C | 3 +3 | D | 3 +3 | F | 2 +4 | C | 3 +ID | NAME | AVG(SCORE) + +4. WITH WHERE CONDITION +select id, avg(score) from t_group_by where id>2 group by id; +3 | 2.4 +4 | 3 +ID | AVG(SCORE) + +select name, count(id), max(score) from t_group_by where name > 'a' and id>=0 group by name; +B | 1 | 2 +C | 3 | 4 +D | 1 | 3 +F | 1 | 2 +NAME | COUNT(ID) | MAX(SCORE) + +5. MULTI TABLE +select t_group_by.id, t_group_by.name, avg(t_group_by.score), avg(t_group_by_2.age) from t_group_by, t_group_by_2 where t_group_by.id=t_group_by_2.id group by t_group_by.id, t_group_by.name; +1 | B | 2 | 10 +3 | A | 1 | 23.33 +3 | C | 3 | 23.33 +3 | D | 3 | 23.33 +3 | F | 2 | 23.33 +4 | C | 3 | 20 +T_GROUP_BY.ID | T_GROUP_BY.NAME | AVG(T_GROUP_BY.SCORE) | AVG(T_GROUP_BY_2.AGE) diff --git a/test/case/result/primary-insert.result b/test/case/result/primary-insert.result new file mode 100644 index 0000000000000000000000000000000000000000..cb054382d5cd8b6f8780e15c10f446c6f510ba20 --- /dev/null +++ b/test/case/result/primary-insert.result @@ -0,0 +1,22 @@ +INITIALIZATION +CREATE TABLE insert_table(id int, t_name char, col1 int, col2 int); +SUCCESS + +1. INSERT +INSERT INTO insert_table VALUES (1,'N1',1,1); +SUCCESS +INSERT INTO insert_table VALUES (2,'N2',1,1),(3,'N3',2,1); +SUCCESS + +2. ERROR +INSERT INTO insert_table VALUES (4,'N4',1,1),(1,1,1); +FAILURE +INSERT INTO insert_table VALUES (4,'N4',1,1),(1,1,1,1); +FAILURE + +3. SELECT +SELECT * FROM insert_table; +1 | N1 | 1 | 1 +2 | N2 | 1 | 1 +3 | N3 | 2 | 1 +ID | T_NAME | COL1 | COL2 diff --git a/test/case/result/primary-join-tables.result b/test/case/result/primary-join-tables.result new file mode 100644 index 0000000000000000000000000000000000000000..6f515f4869bf837c077607e884a6d2f51a980a23 --- /dev/null +++ b/test/case/result/primary-join-tables.result @@ -0,0 +1,1375 @@ +INITIALIZATION +CREATE TABLE join_table_1(id int, name char); +SUCCESS +CREATE TABLE join_table_2(id int, num int); +SUCCESS +CREATE TABLE join_table_3(id int, num2 int); +SUCCESS +create table join_table_empty_1(id int, num_empty_1 int); +SUCCESS +create table join_table_empty_2(id int, num_empty_2 int); +SUCCESS + +INSERT INTO join_table_1 VALUES (1, 'a'); +SUCCESS +INSERT INTO join_table_1 VALUES (2, 'b'); +SUCCESS +INSERT INTO join_table_1 VALUES (3, 'c'); +SUCCESS +INSERT INTO join_table_2 VALUES (1, 2); +SUCCESS +INSERT INTO join_table_2 VALUES (2, 15); +SUCCESS +INSERT INTO join_table_3 VALUES (1, 120); +SUCCESS +INSERT INTO join_table_3 VALUES (3, 800); +SUCCESS + +1. SELECT +Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id; +1 | A | 1 | 2 +2 | B | 2 | 15 +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM +Select join_table_1.name from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id; +A +B +JOIN_TABLE_1.NAME +Select join_table_2.num from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id; +15 +2 +JOIN_TABLE_2.NUM +Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id inner join join_table_3 on join_table_1.id=join_table_3.id; +1 | A | 1 | 2 | 1 | 120 +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM | JOIN_TABLE_3.ID | JOIN_TABLE_3.NUM2 +Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id and join_table_2.num>13 where join_table_1.name='b'; +2 | B | 2 | 15 +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM +Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id and join_table_2.num>13 where join_table_1.name='a'; +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM +Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id and join_table_2.num>23 where join_table_1.name='b'; +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM + +3. EMPTY +select * from join_table_1 inner join join_table_empty_1 on join_table_1.id=join_table_empty_1.id; +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_EMPTY_1.ID | JOIN_TABLE_EMPTY_1.NUM_EMPTY_1 +select * from join_table_empty_1 inner join join_table_1 on join_table_empty_1.id=join_table_1.id; +JOIN_TABLE_EMPTY_1.ID | JOIN_TABLE_EMPTY_1.NUM_EMPTY_1 | JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME +select * from join_table_empty_1 inner join join_table_empty_2 on join_table_empty_1.id = join_table_empty_2.id; +JOIN_TABLE_EMPTY_1.ID | JOIN_TABLE_EMPTY_1.NUM_EMPTY_1 | JOIN_TABLE_EMPTY_2.ID | JOIN_TABLE_EMPTY_2.NUM_EMPTY_2 +select * from join_table_1 inner join join_table_2 on join_table_1.id = join_table_2.id inner join join_table_empty_1 on join_table_1.id=join_table_empty_1.id; +JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM | JOIN_TABLE_EMPTY_1.ID | JOIN_TABLE_EMPTY_1.NUM_EMPTY_1 +select * from join_table_empty_1 inner join join_table_1 on join_table_empty_1.id=join_table_1.id inner join join_table_2 on join_table_1.id=join_table_2.id; +JOIN_TABLE_EMPTY_1.ID | JOIN_TABLE_EMPTY_1.NUM_EMPTY_1 | JOIN_TABLE_1.ID | JOIN_TABLE_1.NAME | JOIN_TABLE_2.ID | JOIN_TABLE_2.NUM + +4. VERY LARGE JOIN +create table join_table_large_1(id int, num1 int); +SUCCESS +create table join_table_large_2(id int, num2 int); +SUCCESS +create table join_table_large_3(id int, num3 int); +SUCCESS +create table join_table_large_4(id int, num4 int); +SUCCESS +create table join_table_large_5(id int, num5 int); +SUCCESS +create table join_table_large_6(id int, num6 int); +SUCCESS + +insert into join_table_large_1 values(1, 1); +SUCCESS +insert into join_table_large_1 values(2, 2); +SUCCESS +insert into join_table_large_1 values(3, 3); +SUCCESS +insert into join_table_large_1 values(4, 4); +SUCCESS +insert into join_table_large_1 values(5, 5); +SUCCESS +insert into join_table_large_1 values(6, 6); +SUCCESS +insert into join_table_large_1 values(7, 7); +SUCCESS +insert into join_table_large_1 values(8, 8); +SUCCESS +insert into join_table_large_1 values(9, 9); +SUCCESS +insert into join_table_large_1 values(10, 10); +SUCCESS +insert into join_table_large_1 values(11, 11); +SUCCESS +insert into join_table_large_1 values(12, 12); +SUCCESS +insert into join_table_large_1 values(13, 13); +SUCCESS +insert into join_table_large_1 values(14, 14); +SUCCESS +insert into join_table_large_1 values(15, 15); +SUCCESS +insert into join_table_large_1 values(16, 16); +SUCCESS +insert into join_table_large_1 values(17, 17); +SUCCESS +insert into join_table_large_1 values(18, 18); +SUCCESS +insert into join_table_large_1 values(19, 19); +SUCCESS +insert into join_table_large_1 values(20, 20); +SUCCESS +insert into join_table_large_1 values(21, 21); +SUCCESS +insert into join_table_large_1 values(22, 22); +SUCCESS +insert into join_table_large_1 values(23, 23); +SUCCESS +insert into join_table_large_1 values(24, 24); +SUCCESS +insert into join_table_large_1 values(25, 25); +SUCCESS +insert into join_table_large_1 values(26, 26); +SUCCESS +insert into join_table_large_1 values(27, 27); +SUCCESS +insert into join_table_large_1 values(28, 28); +SUCCESS +insert into join_table_large_1 values(29, 29); +SUCCESS +insert into join_table_large_1 values(30, 30); +SUCCESS +insert into join_table_large_1 values(31, 31); +SUCCESS +insert into join_table_large_1 values(32, 32); +SUCCESS +insert into join_table_large_1 values(33, 33); +SUCCESS +insert into join_table_large_1 values(34, 34); +SUCCESS +insert into join_table_large_1 values(35, 35); +SUCCESS +insert into join_table_large_1 values(36, 36); +SUCCESS +insert into join_table_large_1 values(37, 37); +SUCCESS +insert into join_table_large_1 values(38, 38); +SUCCESS +insert into join_table_large_1 values(39, 39); +SUCCESS +insert into join_table_large_1 values(40, 40); +SUCCESS +insert into join_table_large_1 values(41, 41); +SUCCESS +insert into join_table_large_1 values(42, 42); +SUCCESS +insert into join_table_large_1 values(43, 43); +SUCCESS +insert into join_table_large_1 values(44, 44); +SUCCESS +insert into join_table_large_1 values(45, 45); +SUCCESS +insert into join_table_large_1 values(46, 46); +SUCCESS +insert into join_table_large_1 values(47, 47); +SUCCESS +insert into join_table_large_1 values(48, 48); +SUCCESS +insert into join_table_large_1 values(49, 49); +SUCCESS +insert into join_table_large_1 values(50, 50); +SUCCESS +insert into join_table_large_1 values(51, 51); +SUCCESS +insert into join_table_large_1 values(52, 52); +SUCCESS +insert into join_table_large_1 values(53, 53); +SUCCESS +insert into join_table_large_1 values(54, 54); +SUCCESS +insert into join_table_large_1 values(55, 55); +SUCCESS +insert into join_table_large_1 values(56, 56); +SUCCESS +insert into join_table_large_1 values(57, 57); +SUCCESS +insert into join_table_large_1 values(58, 58); +SUCCESS +insert into join_table_large_1 values(59, 59); +SUCCESS +insert into join_table_large_1 values(60, 60); +SUCCESS +insert into join_table_large_1 values(61, 61); +SUCCESS +insert into join_table_large_1 values(62, 62); +SUCCESS +insert into join_table_large_1 values(63, 63); +SUCCESS +insert into join_table_large_1 values(64, 64); +SUCCESS +insert into join_table_large_1 values(65, 65); +SUCCESS +insert into join_table_large_1 values(66, 66); +SUCCESS +insert into join_table_large_1 values(67, 67); +SUCCESS +insert into join_table_large_1 values(68, 68); +SUCCESS +insert into join_table_large_1 values(69, 69); +SUCCESS +insert into join_table_large_1 values(70, 70); +SUCCESS +insert into join_table_large_1 values(71, 71); +SUCCESS +insert into join_table_large_1 values(72, 72); +SUCCESS +insert into join_table_large_1 values(73, 73); +SUCCESS +insert into join_table_large_1 values(74, 74); +SUCCESS +insert into join_table_large_1 values(75, 75); +SUCCESS +insert into join_table_large_1 values(76, 76); +SUCCESS +insert into join_table_large_1 values(77, 77); +SUCCESS +insert into join_table_large_1 values(78, 78); +SUCCESS +insert into join_table_large_1 values(79, 79); +SUCCESS +insert into join_table_large_1 values(80, 80); +SUCCESS +insert into join_table_large_1 values(81, 81); +SUCCESS +insert into join_table_large_1 values(82, 82); +SUCCESS +insert into join_table_large_1 values(83, 83); +SUCCESS +insert into join_table_large_1 values(84, 84); +SUCCESS +insert into join_table_large_1 values(85, 85); +SUCCESS +insert into join_table_large_1 values(86, 86); +SUCCESS +insert into join_table_large_1 values(87, 87); +SUCCESS +insert into join_table_large_1 values(88, 88); +SUCCESS +insert into join_table_large_1 values(89, 89); +SUCCESS +insert into join_table_large_1 values(90, 90); +SUCCESS +insert into join_table_large_1 values(91, 91); +SUCCESS +insert into join_table_large_1 values(92, 92); +SUCCESS +insert into join_table_large_1 values(93, 93); +SUCCESS +insert into join_table_large_1 values(94, 94); +SUCCESS +insert into join_table_large_1 values(95, 95); +SUCCESS +insert into join_table_large_1 values(96, 96); +SUCCESS +insert into join_table_large_1 values(97, 97); +SUCCESS +insert into join_table_large_1 values(98, 98); +SUCCESS +insert into join_table_large_1 values(99, 99); +SUCCESS +insert into join_table_large_1 values(100, 100); +SUCCESS + +insert into join_table_large_2 values(1, 1); +SUCCESS +insert into join_table_large_2 values(2, 2); +SUCCESS +insert into join_table_large_2 values(3, 3); +SUCCESS +insert into join_table_large_2 values(4, 4); +SUCCESS +insert into join_table_large_2 values(5, 5); +SUCCESS +insert into join_table_large_2 values(6, 6); +SUCCESS +insert into join_table_large_2 values(7, 7); +SUCCESS +insert into join_table_large_2 values(8, 8); +SUCCESS +insert into join_table_large_2 values(9, 9); +SUCCESS +insert into join_table_large_2 values(10, 10); +SUCCESS +insert into join_table_large_2 values(11, 11); +SUCCESS +insert into join_table_large_2 values(12, 12); +SUCCESS +insert into join_table_large_2 values(13, 13); +SUCCESS +insert into join_table_large_2 values(14, 14); +SUCCESS +insert into join_table_large_2 values(15, 15); +SUCCESS +insert into join_table_large_2 values(16, 16); +SUCCESS +insert into join_table_large_2 values(17, 17); +SUCCESS +insert into join_table_large_2 values(18, 18); +SUCCESS +insert into join_table_large_2 values(19, 19); +SUCCESS +insert into join_table_large_2 values(20, 20); +SUCCESS +insert into join_table_large_2 values(21, 21); +SUCCESS +insert into join_table_large_2 values(22, 22); +SUCCESS +insert into join_table_large_2 values(23, 23); +SUCCESS +insert into join_table_large_2 values(24, 24); +SUCCESS +insert into join_table_large_2 values(25, 25); +SUCCESS +insert into join_table_large_2 values(26, 26); +SUCCESS +insert into join_table_large_2 values(27, 27); +SUCCESS +insert into join_table_large_2 values(28, 28); +SUCCESS +insert into join_table_large_2 values(29, 29); +SUCCESS +insert into join_table_large_2 values(30, 30); +SUCCESS +insert into join_table_large_2 values(31, 31); +SUCCESS +insert into join_table_large_2 values(32, 32); +SUCCESS +insert into join_table_large_2 values(33, 33); +SUCCESS +insert into join_table_large_2 values(34, 34); +SUCCESS +insert into join_table_large_2 values(35, 35); +SUCCESS +insert into join_table_large_2 values(36, 36); +SUCCESS +insert into join_table_large_2 values(37, 37); +SUCCESS +insert into join_table_large_2 values(38, 38); +SUCCESS +insert into join_table_large_2 values(39, 39); +SUCCESS +insert into join_table_large_2 values(40, 40); +SUCCESS +insert into join_table_large_2 values(41, 41); +SUCCESS +insert into join_table_large_2 values(42, 42); +SUCCESS +insert into join_table_large_2 values(43, 43); +SUCCESS +insert into join_table_large_2 values(44, 44); +SUCCESS +insert into join_table_large_2 values(45, 45); +SUCCESS +insert into join_table_large_2 values(46, 46); +SUCCESS +insert into join_table_large_2 values(47, 47); +SUCCESS +insert into join_table_large_2 values(48, 48); +SUCCESS +insert into join_table_large_2 values(49, 49); +SUCCESS +insert into join_table_large_2 values(50, 50); +SUCCESS +insert into join_table_large_2 values(51, 51); +SUCCESS +insert into join_table_large_2 values(52, 52); +SUCCESS +insert into join_table_large_2 values(53, 53); +SUCCESS +insert into join_table_large_2 values(54, 54); +SUCCESS +insert into join_table_large_2 values(55, 55); +SUCCESS +insert into join_table_large_2 values(56, 56); +SUCCESS +insert into join_table_large_2 values(57, 57); +SUCCESS +insert into join_table_large_2 values(58, 58); +SUCCESS +insert into join_table_large_2 values(59, 59); +SUCCESS +insert into join_table_large_2 values(60, 60); +SUCCESS +insert into join_table_large_2 values(61, 61); +SUCCESS +insert into join_table_large_2 values(62, 62); +SUCCESS +insert into join_table_large_2 values(63, 63); +SUCCESS +insert into join_table_large_2 values(64, 64); +SUCCESS +insert into join_table_large_2 values(65, 65); +SUCCESS +insert into join_table_large_2 values(66, 66); +SUCCESS +insert into join_table_large_2 values(67, 67); +SUCCESS +insert into join_table_large_2 values(68, 68); +SUCCESS +insert into join_table_large_2 values(69, 69); +SUCCESS +insert into join_table_large_2 values(70, 70); +SUCCESS +insert into join_table_large_2 values(71, 71); +SUCCESS +insert into join_table_large_2 values(72, 72); +SUCCESS +insert into join_table_large_2 values(73, 73); +SUCCESS +insert into join_table_large_2 values(74, 74); +SUCCESS +insert into join_table_large_2 values(75, 75); +SUCCESS +insert into join_table_large_2 values(76, 76); +SUCCESS +insert into join_table_large_2 values(77, 77); +SUCCESS +insert into join_table_large_2 values(78, 78); +SUCCESS +insert into join_table_large_2 values(79, 79); +SUCCESS +insert into join_table_large_2 values(80, 80); +SUCCESS +insert into join_table_large_2 values(81, 81); +SUCCESS +insert into join_table_large_2 values(82, 82); +SUCCESS +insert into join_table_large_2 values(83, 83); +SUCCESS +insert into join_table_large_2 values(84, 84); +SUCCESS +insert into join_table_large_2 values(85, 85); +SUCCESS +insert into join_table_large_2 values(86, 86); +SUCCESS +insert into join_table_large_2 values(87, 87); +SUCCESS +insert into join_table_large_2 values(88, 88); +SUCCESS +insert into join_table_large_2 values(89, 89); +SUCCESS +insert into join_table_large_2 values(90, 90); +SUCCESS +insert into join_table_large_2 values(91, 91); +SUCCESS +insert into join_table_large_2 values(92, 92); +SUCCESS +insert into join_table_large_2 values(93, 93); +SUCCESS +insert into join_table_large_2 values(94, 94); +SUCCESS +insert into join_table_large_2 values(95, 95); +SUCCESS +insert into join_table_large_2 values(96, 96); +SUCCESS +insert into join_table_large_2 values(97, 97); +SUCCESS +insert into join_table_large_2 values(98, 98); +SUCCESS +insert into join_table_large_2 values(99, 99); +SUCCESS +insert into join_table_large_2 values(100, 100); +SUCCESS + +insert into join_table_large_3 values(1, 1); +SUCCESS +insert into join_table_large_3 values(2, 2); +SUCCESS +insert into join_table_large_3 values(3, 3); +SUCCESS +insert into join_table_large_3 values(4, 4); +SUCCESS +insert into join_table_large_3 values(5, 5); +SUCCESS +insert into join_table_large_3 values(6, 6); +SUCCESS +insert into join_table_large_3 values(7, 7); +SUCCESS +insert into join_table_large_3 values(8, 8); +SUCCESS +insert into join_table_large_3 values(9, 9); +SUCCESS +insert into join_table_large_3 values(10, 10); +SUCCESS +insert into join_table_large_3 values(11, 11); +SUCCESS +insert into join_table_large_3 values(12, 12); +SUCCESS +insert into join_table_large_3 values(13, 13); +SUCCESS +insert into join_table_large_3 values(14, 14); +SUCCESS +insert into join_table_large_3 values(15, 15); +SUCCESS +insert into join_table_large_3 values(16, 16); +SUCCESS +insert into join_table_large_3 values(17, 17); +SUCCESS +insert into join_table_large_3 values(18, 18); +SUCCESS +insert into join_table_large_3 values(19, 19); +SUCCESS +insert into join_table_large_3 values(20, 20); +SUCCESS +insert into join_table_large_3 values(21, 21); +SUCCESS +insert into join_table_large_3 values(22, 22); +SUCCESS +insert into join_table_large_3 values(23, 23); +SUCCESS +insert into join_table_large_3 values(24, 24); +SUCCESS +insert into join_table_large_3 values(25, 25); +SUCCESS +insert into join_table_large_3 values(26, 26); +SUCCESS +insert into join_table_large_3 values(27, 27); +SUCCESS +insert into join_table_large_3 values(28, 28); +SUCCESS +insert into join_table_large_3 values(29, 29); +SUCCESS +insert into join_table_large_3 values(30, 30); +SUCCESS +insert into join_table_large_3 values(31, 31); +SUCCESS +insert into join_table_large_3 values(32, 32); +SUCCESS +insert into join_table_large_3 values(33, 33); +SUCCESS +insert into join_table_large_3 values(34, 34); +SUCCESS +insert into join_table_large_3 values(35, 35); +SUCCESS +insert into join_table_large_3 values(36, 36); +SUCCESS +insert into join_table_large_3 values(37, 37); +SUCCESS +insert into join_table_large_3 values(38, 38); +SUCCESS +insert into join_table_large_3 values(39, 39); +SUCCESS +insert into join_table_large_3 values(40, 40); +SUCCESS +insert into join_table_large_3 values(41, 41); +SUCCESS +insert into join_table_large_3 values(42, 42); +SUCCESS +insert into join_table_large_3 values(43, 43); +SUCCESS +insert into join_table_large_3 values(44, 44); +SUCCESS +insert into join_table_large_3 values(45, 45); +SUCCESS +insert into join_table_large_3 values(46, 46); +SUCCESS +insert into join_table_large_3 values(47, 47); +SUCCESS +insert into join_table_large_3 values(48, 48); +SUCCESS +insert into join_table_large_3 values(49, 49); +SUCCESS +insert into join_table_large_3 values(50, 50); +SUCCESS +insert into join_table_large_3 values(51, 51); +SUCCESS +insert into join_table_large_3 values(52, 52); +SUCCESS +insert into join_table_large_3 values(53, 53); +SUCCESS +insert into join_table_large_3 values(54, 54); +SUCCESS +insert into join_table_large_3 values(55, 55); +SUCCESS +insert into join_table_large_3 values(56, 56); +SUCCESS +insert into join_table_large_3 values(57, 57); +SUCCESS +insert into join_table_large_3 values(58, 58); +SUCCESS +insert into join_table_large_3 values(59, 59); +SUCCESS +insert into join_table_large_3 values(60, 60); +SUCCESS +insert into join_table_large_3 values(61, 61); +SUCCESS +insert into join_table_large_3 values(62, 62); +SUCCESS +insert into join_table_large_3 values(63, 63); +SUCCESS +insert into join_table_large_3 values(64, 64); +SUCCESS +insert into join_table_large_3 values(65, 65); +SUCCESS +insert into join_table_large_3 values(66, 66); +SUCCESS +insert into join_table_large_3 values(67, 67); +SUCCESS +insert into join_table_large_3 values(68, 68); +SUCCESS +insert into join_table_large_3 values(69, 69); +SUCCESS +insert into join_table_large_3 values(70, 70); +SUCCESS +insert into join_table_large_3 values(71, 71); +SUCCESS +insert into join_table_large_3 values(72, 72); +SUCCESS +insert into join_table_large_3 values(73, 73); +SUCCESS +insert into join_table_large_3 values(74, 74); +SUCCESS +insert into join_table_large_3 values(75, 75); +SUCCESS +insert into join_table_large_3 values(76, 76); +SUCCESS +insert into join_table_large_3 values(77, 77); +SUCCESS +insert into join_table_large_3 values(78, 78); +SUCCESS +insert into join_table_large_3 values(79, 79); +SUCCESS +insert into join_table_large_3 values(80, 80); +SUCCESS +insert into join_table_large_3 values(81, 81); +SUCCESS +insert into join_table_large_3 values(82, 82); +SUCCESS +insert into join_table_large_3 values(83, 83); +SUCCESS +insert into join_table_large_3 values(84, 84); +SUCCESS +insert into join_table_large_3 values(85, 85); +SUCCESS +insert into join_table_large_3 values(86, 86); +SUCCESS +insert into join_table_large_3 values(87, 87); +SUCCESS +insert into join_table_large_3 values(88, 88); +SUCCESS +insert into join_table_large_3 values(89, 89); +SUCCESS +insert into join_table_large_3 values(90, 90); +SUCCESS +insert into join_table_large_3 values(91, 91); +SUCCESS +insert into join_table_large_3 values(92, 92); +SUCCESS +insert into join_table_large_3 values(93, 93); +SUCCESS +insert into join_table_large_3 values(94, 94); +SUCCESS +insert into join_table_large_3 values(95, 95); +SUCCESS +insert into join_table_large_3 values(96, 96); +SUCCESS +insert into join_table_large_3 values(97, 97); +SUCCESS +insert into join_table_large_3 values(98, 98); +SUCCESS +insert into join_table_large_3 values(99, 99); +SUCCESS +insert into join_table_large_3 values(100, 100); +SUCCESS + +insert into join_table_large_4 values(1, 1); +SUCCESS +insert into join_table_large_4 values(2, 2); +SUCCESS +insert into join_table_large_4 values(3, 3); +SUCCESS +insert into join_table_large_4 values(4, 4); +SUCCESS +insert into join_table_large_4 values(5, 5); +SUCCESS +insert into join_table_large_4 values(6, 6); +SUCCESS +insert into join_table_large_4 values(7, 7); +SUCCESS +insert into join_table_large_4 values(8, 8); +SUCCESS +insert into join_table_large_4 values(9, 9); +SUCCESS +insert into join_table_large_4 values(10, 10); +SUCCESS +insert into join_table_large_4 values(11, 11); +SUCCESS +insert into join_table_large_4 values(12, 12); +SUCCESS +insert into join_table_large_4 values(13, 13); +SUCCESS +insert into join_table_large_4 values(14, 14); +SUCCESS +insert into join_table_large_4 values(15, 15); +SUCCESS +insert into join_table_large_4 values(16, 16); +SUCCESS +insert into join_table_large_4 values(17, 17); +SUCCESS +insert into join_table_large_4 values(18, 18); +SUCCESS +insert into join_table_large_4 values(19, 19); +SUCCESS +insert into join_table_large_4 values(20, 20); +SUCCESS +insert into join_table_large_4 values(21, 21); +SUCCESS +insert into join_table_large_4 values(22, 22); +SUCCESS +insert into join_table_large_4 values(23, 23); +SUCCESS +insert into join_table_large_4 values(24, 24); +SUCCESS +insert into join_table_large_4 values(25, 25); +SUCCESS +insert into join_table_large_4 values(26, 26); +SUCCESS +insert into join_table_large_4 values(27, 27); +SUCCESS +insert into join_table_large_4 values(28, 28); +SUCCESS +insert into join_table_large_4 values(29, 29); +SUCCESS +insert into join_table_large_4 values(30, 30); +SUCCESS +insert into join_table_large_4 values(31, 31); +SUCCESS +insert into join_table_large_4 values(32, 32); +SUCCESS +insert into join_table_large_4 values(33, 33); +SUCCESS +insert into join_table_large_4 values(34, 34); +SUCCESS +insert into join_table_large_4 values(35, 35); +SUCCESS +insert into join_table_large_4 values(36, 36); +SUCCESS +insert into join_table_large_4 values(37, 37); +SUCCESS +insert into join_table_large_4 values(38, 38); +SUCCESS +insert into join_table_large_4 values(39, 39); +SUCCESS +insert into join_table_large_4 values(40, 40); +SUCCESS +insert into join_table_large_4 values(41, 41); +SUCCESS +insert into join_table_large_4 values(42, 42); +SUCCESS +insert into join_table_large_4 values(43, 43); +SUCCESS +insert into join_table_large_4 values(44, 44); +SUCCESS +insert into join_table_large_4 values(45, 45); +SUCCESS +insert into join_table_large_4 values(46, 46); +SUCCESS +insert into join_table_large_4 values(47, 47); +SUCCESS +insert into join_table_large_4 values(48, 48); +SUCCESS +insert into join_table_large_4 values(49, 49); +SUCCESS +insert into join_table_large_4 values(50, 50); +SUCCESS +insert into join_table_large_4 values(51, 51); +SUCCESS +insert into join_table_large_4 values(52, 52); +SUCCESS +insert into join_table_large_4 values(53, 53); +SUCCESS +insert into join_table_large_4 values(54, 54); +SUCCESS +insert into join_table_large_4 values(55, 55); +SUCCESS +insert into join_table_large_4 values(56, 56); +SUCCESS +insert into join_table_large_4 values(57, 57); +SUCCESS +insert into join_table_large_4 values(58, 58); +SUCCESS +insert into join_table_large_4 values(59, 59); +SUCCESS +insert into join_table_large_4 values(60, 60); +SUCCESS +insert into join_table_large_4 values(61, 61); +SUCCESS +insert into join_table_large_4 values(62, 62); +SUCCESS +insert into join_table_large_4 values(63, 63); +SUCCESS +insert into join_table_large_4 values(64, 64); +SUCCESS +insert into join_table_large_4 values(65, 65); +SUCCESS +insert into join_table_large_4 values(66, 66); +SUCCESS +insert into join_table_large_4 values(67, 67); +SUCCESS +insert into join_table_large_4 values(68, 68); +SUCCESS +insert into join_table_large_4 values(69, 69); +SUCCESS +insert into join_table_large_4 values(70, 70); +SUCCESS +insert into join_table_large_4 values(71, 71); +SUCCESS +insert into join_table_large_4 values(72, 72); +SUCCESS +insert into join_table_large_4 values(73, 73); +SUCCESS +insert into join_table_large_4 values(74, 74); +SUCCESS +insert into join_table_large_4 values(75, 75); +SUCCESS +insert into join_table_large_4 values(76, 76); +SUCCESS +insert into join_table_large_4 values(77, 77); +SUCCESS +insert into join_table_large_4 values(78, 78); +SUCCESS +insert into join_table_large_4 values(79, 79); +SUCCESS +insert into join_table_large_4 values(80, 80); +SUCCESS +insert into join_table_large_4 values(81, 81); +SUCCESS +insert into join_table_large_4 values(82, 82); +SUCCESS +insert into join_table_large_4 values(83, 83); +SUCCESS +insert into join_table_large_4 values(84, 84); +SUCCESS +insert into join_table_large_4 values(85, 85); +SUCCESS +insert into join_table_large_4 values(86, 86); +SUCCESS +insert into join_table_large_4 values(87, 87); +SUCCESS +insert into join_table_large_4 values(88, 88); +SUCCESS +insert into join_table_large_4 values(89, 89); +SUCCESS +insert into join_table_large_4 values(90, 90); +SUCCESS +insert into join_table_large_4 values(91, 91); +SUCCESS +insert into join_table_large_4 values(92, 92); +SUCCESS +insert into join_table_large_4 values(93, 93); +SUCCESS +insert into join_table_large_4 values(94, 94); +SUCCESS +insert into join_table_large_4 values(95, 95); +SUCCESS +insert into join_table_large_4 values(96, 96); +SUCCESS +insert into join_table_large_4 values(97, 97); +SUCCESS +insert into join_table_large_4 values(98, 98); +SUCCESS +insert into join_table_large_4 values(99, 99); +SUCCESS +insert into join_table_large_4 values(100, 100); +SUCCESS + +insert into join_table_large_5 values(1, 1); +SUCCESS +insert into join_table_large_5 values(2, 2); +SUCCESS +insert into join_table_large_5 values(3, 3); +SUCCESS +insert into join_table_large_5 values(4, 4); +SUCCESS +insert into join_table_large_5 values(5, 5); +SUCCESS +insert into join_table_large_5 values(6, 6); +SUCCESS +insert into join_table_large_5 values(7, 7); +SUCCESS +insert into join_table_large_5 values(8, 8); +SUCCESS +insert into join_table_large_5 values(9, 9); +SUCCESS +insert into join_table_large_5 values(10, 10); +SUCCESS +insert into join_table_large_5 values(11, 11); +SUCCESS +insert into join_table_large_5 values(12, 12); +SUCCESS +insert into join_table_large_5 values(13, 13); +SUCCESS +insert into join_table_large_5 values(14, 14); +SUCCESS +insert into join_table_large_5 values(15, 15); +SUCCESS +insert into join_table_large_5 values(16, 16); +SUCCESS +insert into join_table_large_5 values(17, 17); +SUCCESS +insert into join_table_large_5 values(18, 18); +SUCCESS +insert into join_table_large_5 values(19, 19); +SUCCESS +insert into join_table_large_5 values(20, 20); +SUCCESS +insert into join_table_large_5 values(21, 21); +SUCCESS +insert into join_table_large_5 values(22, 22); +SUCCESS +insert into join_table_large_5 values(23, 23); +SUCCESS +insert into join_table_large_5 values(24, 24); +SUCCESS +insert into join_table_large_5 values(25, 25); +SUCCESS +insert into join_table_large_5 values(26, 26); +SUCCESS +insert into join_table_large_5 values(27, 27); +SUCCESS +insert into join_table_large_5 values(28, 28); +SUCCESS +insert into join_table_large_5 values(29, 29); +SUCCESS +insert into join_table_large_5 values(30, 30); +SUCCESS +insert into join_table_large_5 values(31, 31); +SUCCESS +insert into join_table_large_5 values(32, 32); +SUCCESS +insert into join_table_large_5 values(33, 33); +SUCCESS +insert into join_table_large_5 values(34, 34); +SUCCESS +insert into join_table_large_5 values(35, 35); +SUCCESS +insert into join_table_large_5 values(36, 36); +SUCCESS +insert into join_table_large_5 values(37, 37); +SUCCESS +insert into join_table_large_5 values(38, 38); +SUCCESS +insert into join_table_large_5 values(39, 39); +SUCCESS +insert into join_table_large_5 values(40, 40); +SUCCESS +insert into join_table_large_5 values(41, 41); +SUCCESS +insert into join_table_large_5 values(42, 42); +SUCCESS +insert into join_table_large_5 values(43, 43); +SUCCESS +insert into join_table_large_5 values(44, 44); +SUCCESS +insert into join_table_large_5 values(45, 45); +SUCCESS +insert into join_table_large_5 values(46, 46); +SUCCESS +insert into join_table_large_5 values(47, 47); +SUCCESS +insert into join_table_large_5 values(48, 48); +SUCCESS +insert into join_table_large_5 values(49, 49); +SUCCESS +insert into join_table_large_5 values(50, 50); +SUCCESS +insert into join_table_large_5 values(51, 51); +SUCCESS +insert into join_table_large_5 values(52, 52); +SUCCESS +insert into join_table_large_5 values(53, 53); +SUCCESS +insert into join_table_large_5 values(54, 54); +SUCCESS +insert into join_table_large_5 values(55, 55); +SUCCESS +insert into join_table_large_5 values(56, 56); +SUCCESS +insert into join_table_large_5 values(57, 57); +SUCCESS +insert into join_table_large_5 values(58, 58); +SUCCESS +insert into join_table_large_5 values(59, 59); +SUCCESS +insert into join_table_large_5 values(60, 60); +SUCCESS +insert into join_table_large_5 values(61, 61); +SUCCESS +insert into join_table_large_5 values(62, 62); +SUCCESS +insert into join_table_large_5 values(63, 63); +SUCCESS +insert into join_table_large_5 values(64, 64); +SUCCESS +insert into join_table_large_5 values(65, 65); +SUCCESS +insert into join_table_large_5 values(66, 66); +SUCCESS +insert into join_table_large_5 values(67, 67); +SUCCESS +insert into join_table_large_5 values(68, 68); +SUCCESS +insert into join_table_large_5 values(69, 69); +SUCCESS +insert into join_table_large_5 values(70, 70); +SUCCESS +insert into join_table_large_5 values(71, 71); +SUCCESS +insert into join_table_large_5 values(72, 72); +SUCCESS +insert into join_table_large_5 values(73, 73); +SUCCESS +insert into join_table_large_5 values(74, 74); +SUCCESS +insert into join_table_large_5 values(75, 75); +SUCCESS +insert into join_table_large_5 values(76, 76); +SUCCESS +insert into join_table_large_5 values(77, 77); +SUCCESS +insert into join_table_large_5 values(78, 78); +SUCCESS +insert into join_table_large_5 values(79, 79); +SUCCESS +insert into join_table_large_5 values(80, 80); +SUCCESS +insert into join_table_large_5 values(81, 81); +SUCCESS +insert into join_table_large_5 values(82, 82); +SUCCESS +insert into join_table_large_5 values(83, 83); +SUCCESS +insert into join_table_large_5 values(84, 84); +SUCCESS +insert into join_table_large_5 values(85, 85); +SUCCESS +insert into join_table_large_5 values(86, 86); +SUCCESS +insert into join_table_large_5 values(87, 87); +SUCCESS +insert into join_table_large_5 values(88, 88); +SUCCESS +insert into join_table_large_5 values(89, 89); +SUCCESS +insert into join_table_large_5 values(90, 90); +SUCCESS +insert into join_table_large_5 values(91, 91); +SUCCESS +insert into join_table_large_5 values(92, 92); +SUCCESS +insert into join_table_large_5 values(93, 93); +SUCCESS +insert into join_table_large_5 values(94, 94); +SUCCESS +insert into join_table_large_5 values(95, 95); +SUCCESS +insert into join_table_large_5 values(96, 96); +SUCCESS +insert into join_table_large_5 values(97, 97); +SUCCESS +insert into join_table_large_5 values(98, 98); +SUCCESS +insert into join_table_large_5 values(99, 99); +SUCCESS +insert into join_table_large_5 values(100, 100); +SUCCESS + +insert into join_table_large_6 values(1, 1); +SUCCESS +insert into join_table_large_6 values(2, 2); +SUCCESS +insert into join_table_large_6 values(3, 3); +SUCCESS +insert into join_table_large_6 values(4, 4); +SUCCESS +insert into join_table_large_6 values(5, 5); +SUCCESS +insert into join_table_large_6 values(6, 6); +SUCCESS +insert into join_table_large_6 values(7, 7); +SUCCESS +insert into join_table_large_6 values(8, 8); +SUCCESS +insert into join_table_large_6 values(9, 9); +SUCCESS +insert into join_table_large_6 values(10, 10); +SUCCESS +insert into join_table_large_6 values(11, 11); +SUCCESS +insert into join_table_large_6 values(12, 12); +SUCCESS +insert into join_table_large_6 values(13, 13); +SUCCESS +insert into join_table_large_6 values(14, 14); +SUCCESS +insert into join_table_large_6 values(15, 15); +SUCCESS +insert into join_table_large_6 values(16, 16); +SUCCESS +insert into join_table_large_6 values(17, 17); +SUCCESS +insert into join_table_large_6 values(18, 18); +SUCCESS +insert into join_table_large_6 values(19, 19); +SUCCESS +insert into join_table_large_6 values(20, 20); +SUCCESS +insert into join_table_large_6 values(21, 21); +SUCCESS +insert into join_table_large_6 values(22, 22); +SUCCESS +insert into join_table_large_6 values(23, 23); +SUCCESS +insert into join_table_large_6 values(24, 24); +SUCCESS +insert into join_table_large_6 values(25, 25); +SUCCESS +insert into join_table_large_6 values(26, 26); +SUCCESS +insert into join_table_large_6 values(27, 27); +SUCCESS +insert into join_table_large_6 values(28, 28); +SUCCESS +insert into join_table_large_6 values(29, 29); +SUCCESS +insert into join_table_large_6 values(30, 30); +SUCCESS +insert into join_table_large_6 values(31, 31); +SUCCESS +insert into join_table_large_6 values(32, 32); +SUCCESS +insert into join_table_large_6 values(33, 33); +SUCCESS +insert into join_table_large_6 values(34, 34); +SUCCESS +insert into join_table_large_6 values(35, 35); +SUCCESS +insert into join_table_large_6 values(36, 36); +SUCCESS +insert into join_table_large_6 values(37, 37); +SUCCESS +insert into join_table_large_6 values(38, 38); +SUCCESS +insert into join_table_large_6 values(39, 39); +SUCCESS +insert into join_table_large_6 values(40, 40); +SUCCESS +insert into join_table_large_6 values(41, 41); +SUCCESS +insert into join_table_large_6 values(42, 42); +SUCCESS +insert into join_table_large_6 values(43, 43); +SUCCESS +insert into join_table_large_6 values(44, 44); +SUCCESS +insert into join_table_large_6 values(45, 45); +SUCCESS +insert into join_table_large_6 values(46, 46); +SUCCESS +insert into join_table_large_6 values(47, 47); +SUCCESS +insert into join_table_large_6 values(48, 48); +SUCCESS +insert into join_table_large_6 values(49, 49); +SUCCESS +insert into join_table_large_6 values(50, 50); +SUCCESS +insert into join_table_large_6 values(51, 51); +SUCCESS +insert into join_table_large_6 values(52, 52); +SUCCESS +insert into join_table_large_6 values(53, 53); +SUCCESS +insert into join_table_large_6 values(54, 54); +SUCCESS +insert into join_table_large_6 values(55, 55); +SUCCESS +insert into join_table_large_6 values(56, 56); +SUCCESS +insert into join_table_large_6 values(57, 57); +SUCCESS +insert into join_table_large_6 values(58, 58); +SUCCESS +insert into join_table_large_6 values(59, 59); +SUCCESS +insert into join_table_large_6 values(60, 60); +SUCCESS +insert into join_table_large_6 values(61, 61); +SUCCESS +insert into join_table_large_6 values(62, 62); +SUCCESS +insert into join_table_large_6 values(63, 63); +SUCCESS +insert into join_table_large_6 values(64, 64); +SUCCESS +insert into join_table_large_6 values(65, 65); +SUCCESS +insert into join_table_large_6 values(66, 66); +SUCCESS +insert into join_table_large_6 values(67, 67); +SUCCESS +insert into join_table_large_6 values(68, 68); +SUCCESS +insert into join_table_large_6 values(69, 69); +SUCCESS +insert into join_table_large_6 values(70, 70); +SUCCESS +insert into join_table_large_6 values(71, 71); +SUCCESS +insert into join_table_large_6 values(72, 72); +SUCCESS +insert into join_table_large_6 values(73, 73); +SUCCESS +insert into join_table_large_6 values(74, 74); +SUCCESS +insert into join_table_large_6 values(75, 75); +SUCCESS +insert into join_table_large_6 values(76, 76); +SUCCESS +insert into join_table_large_6 values(77, 77); +SUCCESS +insert into join_table_large_6 values(78, 78); +SUCCESS +insert into join_table_large_6 values(79, 79); +SUCCESS +insert into join_table_large_6 values(80, 80); +SUCCESS +insert into join_table_large_6 values(81, 81); +SUCCESS +insert into join_table_large_6 values(82, 82); +SUCCESS +insert into join_table_large_6 values(83, 83); +SUCCESS +insert into join_table_large_6 values(84, 84); +SUCCESS +insert into join_table_large_6 values(85, 85); +SUCCESS +insert into join_table_large_6 values(86, 86); +SUCCESS +insert into join_table_large_6 values(87, 87); +SUCCESS +insert into join_table_large_6 values(88, 88); +SUCCESS +insert into join_table_large_6 values(89, 89); +SUCCESS +insert into join_table_large_6 values(90, 90); +SUCCESS +insert into join_table_large_6 values(91, 91); +SUCCESS +insert into join_table_large_6 values(92, 92); +SUCCESS +insert into join_table_large_6 values(93, 93); +SUCCESS +insert into join_table_large_6 values(94, 94); +SUCCESS +insert into join_table_large_6 values(95, 95); +SUCCESS +insert into join_table_large_6 values(96, 96); +SUCCESS +insert into join_table_large_6 values(97, 97); +SUCCESS +insert into join_table_large_6 values(98, 98); +SUCCESS +insert into join_table_large_6 values(99, 99); +SUCCESS +insert into join_table_large_6 values(100, 100); +SUCCESS + +select * from join_table_large_1 inner join join_table_large_2 on join_table_large_1.id=join_table_large_2.id inner join join_table_large_3 on join_table_large_1.id=join_table_large_3.id inner join join_table_large_4 on join_table_large_3.id=join_table_large_4.id inner join join_table_large_5 on 1=1 inner join join_table_large_6 on join_table_large_5.id=join_table_large_6.id where join_table_large_3.num3 <10 and join_table_large_5.num5>90; +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 100 | 100 | 100 | 100 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 91 | 91 | 91 | 91 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 92 | 92 | 92 | 92 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 93 | 93 | 93 | 93 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 94 | 94 | 94 | 94 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 95 | 95 | 95 | 95 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 96 | 96 | 96 | 96 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 97 | 97 | 97 | 97 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 98 | 98 | 98 | 98 +1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 99 | 99 | 99 | 99 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 100 | 100 | 100 | 100 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 91 | 91 | 91 | 91 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 92 | 92 | 92 | 92 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 93 | 93 | 93 | 93 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 94 | 94 | 94 | 94 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 95 | 95 | 95 | 95 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 96 | 96 | 96 | 96 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 97 | 97 | 97 | 97 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 98 | 98 | 98 | 98 +2 | 2 | 2 | 2 | 2 | 2 | 2 | 2 | 99 | 99 | 99 | 99 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 100 | 100 | 100 | 100 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 91 | 91 | 91 | 91 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 92 | 92 | 92 | 92 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 93 | 93 | 93 | 93 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 94 | 94 | 94 | 94 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 95 | 95 | 95 | 95 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 96 | 96 | 96 | 96 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 97 | 97 | 97 | 97 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 98 | 98 | 98 | 98 +3 | 3 | 3 | 3 | 3 | 3 | 3 | 3 | 99 | 99 | 99 | 99 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 100 | 100 | 100 | 100 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 91 | 91 | 91 | 91 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 92 | 92 | 92 | 92 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 93 | 93 | 93 | 93 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 94 | 94 | 94 | 94 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 95 | 95 | 95 | 95 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 96 | 96 | 96 | 96 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 97 | 97 | 97 | 97 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 98 | 98 | 98 | 98 +4 | 4 | 4 | 4 | 4 | 4 | 4 | 4 | 99 | 99 | 99 | 99 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 100 | 100 | 100 | 100 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 91 | 91 | 91 | 91 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 92 | 92 | 92 | 92 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 93 | 93 | 93 | 93 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 94 | 94 | 94 | 94 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 95 | 95 | 95 | 95 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 96 | 96 | 96 | 96 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 97 | 97 | 97 | 97 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 98 | 98 | 98 | 98 +5 | 5 | 5 | 5 | 5 | 5 | 5 | 5 | 99 | 99 | 99 | 99 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 100 | 100 | 100 | 100 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 91 | 91 | 91 | 91 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 92 | 92 | 92 | 92 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 93 | 93 | 93 | 93 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 94 | 94 | 94 | 94 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 95 | 95 | 95 | 95 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 96 | 96 | 96 | 96 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 97 | 97 | 97 | 97 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 98 | 98 | 98 | 98 +6 | 6 | 6 | 6 | 6 | 6 | 6 | 6 | 99 | 99 | 99 | 99 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 100 | 100 | 100 | 100 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 91 | 91 | 91 | 91 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 92 | 92 | 92 | 92 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 93 | 93 | 93 | 93 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 94 | 94 | 94 | 94 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 95 | 95 | 95 | 95 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 96 | 96 | 96 | 96 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 97 | 97 | 97 | 97 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 98 | 98 | 98 | 98 +7 | 7 | 7 | 7 | 7 | 7 | 7 | 7 | 99 | 99 | 99 | 99 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 100 | 100 | 100 | 100 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 91 | 91 | 91 | 91 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 92 | 92 | 92 | 92 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 93 | 93 | 93 | 93 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 94 | 94 | 94 | 94 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 95 | 95 | 95 | 95 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 96 | 96 | 96 | 96 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 97 | 97 | 97 | 97 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 98 | 98 | 98 | 98 +8 | 8 | 8 | 8 | 8 | 8 | 8 | 8 | 99 | 99 | 99 | 99 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 100 | 100 | 100 | 100 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 91 | 91 | 91 | 91 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 92 | 92 | 92 | 92 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 93 | 93 | 93 | 93 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 94 | 94 | 94 | 94 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 95 | 95 | 95 | 95 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 96 | 96 | 96 | 96 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 97 | 97 | 97 | 97 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 98 | 98 | 98 | 98 +9 | 9 | 9 | 9 | 9 | 9 | 9 | 9 | 99 | 99 | 99 | 99 +JOIN_TABLE_LARGE_1.ID | JOIN_TABLE_LARGE_1.NUM1 | JOIN_TABLE_LARGE_2.ID | JOIN_TABLE_LARGE_2.NUM2 | JOIN_TABLE_LARGE_3.ID | JOIN_TABLE_LARGE_3.NUM3 | JOIN_TABLE_LARGE_4.ID | JOIN_TABLE_LARGE_4.NUM4 | JOIN_TABLE_LARGE_5.ID | JOIN_TABLE_LARGE_5.NUM5 | JOIN_TABLE_LARGE_6.ID | JOIN_TABLE_LARGE_6.NUM6 diff --git a/test/case/result/primary-multi-index.result b/test/case/result/primary-multi-index.result new file mode 100644 index 0000000000000000000000000000000000000000..7939d7a97a80743fe5e089e87a08779cc4777f09 --- /dev/null +++ b/test/case/result/primary-multi-index.result @@ -0,0 +1,169 @@ +1. MULTI INDEX OF EMPTY TABLE +CREATE TABLE multi_index(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +SUCCESS +CREATE INDEX i_1_12 ON multi_index(col1,col2); +SUCCESS +CREATE INDEX i_1_345 ON multi_index(col3, col4, col5); +SUCCESS +CREATE INDEX i_1_56 ON multi_index(col5, col6); +SUCCESS +CREATE INDEX i_1_456 ON multi_index(col4, col5, col6); +SUCCESS +SELECT * FROM multi_index; +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +2. MULTI INDEX OF NON-EMPTY TABLE +CREATE TABLE multi_index2(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +SUCCESS +INSERT INTO multi_index2 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +SUCCESS +INSERT INTO multi_index2 VALUES (2, 1, 16.2, 'x', '2021-01-02', 1, 61); +SUCCESS +INSERT INTO multi_index2 VALUES (3, 1, 11.6, 'h', '2023-01-02', 10, 17); +SUCCESS + +CREATE INDEX i_2_12 ON multi_index2(col1,col2); +SUCCESS +CREATE INDEX i_2_345 ON multi_index2(col3, col4, col5); +SUCCESS +CREATE INDEX i_2_56 ON multi_index2(col5, col6); +SUCCESS +CREATE INDEX i_2_456 ON multi_index2(col4, col5, col6); +SUCCESS +SELECT * FROM multi_index2; +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +2 | 1 | 16.2 | X | 2021-01-02 | 1 | 61 +3 | 1 | 11.6 | H | 2023-01-02 | 10 | 17 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +3. INFLUENCE OF INSERTING +CREATE TABLE multi_index3(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +SUCCESS +CREATE INDEX i_3_i1 ON multi_index3(id,col1); +SUCCESS + +INSERT INTO multi_index3 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +SUCCESS +INSERT INTO multi_index3 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +SUCCESS +SELECT * FROM multi_index3; +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 +CREATE INDEX i_3_14 ON multi_index3(col1,col4); +SUCCESS +INSERT INTO multi_index3 VALUES (2, 1, 16.2, 'x', '2021-01-02', 1, 61); +SUCCESS +INSERT INTO multi_index3 VALUES (3, 1, 11.6, 'h', '2023-01-02', 10, 17); +SUCCESS +INSERT INTO multi_index3 VALUES (4, 2, 12.2, 'e', '2022-01-04', 13, 10); +SUCCESS +INSERT INTO multi_index3 VALUES (5, 3, 14.2, 'd', '2020-04-02', 12, 2); +SUCCESS +SELECT * FROM multi_index3; +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +2 | 1 | 16.2 | X | 2021-01-02 | 1 | 61 +3 | 1 | 11.6 | H | 2023-01-02 | 10 | 17 +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +4. QUERY WITH INDEXS +SELECT * FROM multi_index3 WHERE id = 1; +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 +SELECT * FROM multi_index3 WHERE col1 > 1 and col4 = '2021-01-02'; +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 +SELECT * FROM multi_index3 WHERE col1 <> 1 and col4 >= '2021-01-02'; +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 +SELECT * FROM multi_index3 WHERE col2 < 15.0 and col4 <> '2021-01-02'; +3 | 1 | 11.6 | H | 2023-01-02 | 10 | 17 +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +5. INFLUENCE OF DELETING +DELETE FROM multi_index3 WHERE id = 1; +SUCCESS +DELETE FROM multi_index3 WHERE id = 61; +SUCCESS +SELECT * FROM multi_index3; +2 | 1 | 16.2 | X | 2021-01-02 | 1 | 61 +3 | 1 | 11.6 | H | 2023-01-02 | 10 | 17 +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +DELETE FROM multi_index3 WHERE col3 = 'x'; +SUCCESS +SELECT * FROM multi_index3; +3 | 1 | 11.6 | H | 2023-01-02 | 10 | 17 +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +DELETE FROM multi_index3 WHERE id = 4 and col1 = 1; +SUCCESS +DELETE FROM multi_index3 WHERE id = 90 and col1 = 13; +SUCCESS +DELETE FROM multi_index3 WHERE id = 90 and col1 = 1; +SUCCESS +DELETE FROM multi_index3 WHERE id = 4 and col1 = 13; +SUCCESS +DELETE FROM multi_index3 WHERE id = 3 and col1 = 1; +SUCCESS +DELETE FROM multi_index3 WHERE id = 3 and col1 = 1; +SUCCESS +SELECT * FROM multi_index3; +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +INSERT INTO multi_index3 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +SUCCESS +INSERT INTO multi_index3 VALUES (2, 1, 11.2, 'x', '2021-01-02', 1, 61); +SUCCESS +INSERT INTO multi_index3 VALUES (3, 1, 11.2, 'h', '2023-01-02', 10, 17); +SUCCESS +SELECT * FROM multi_index3; +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +2 | 1 | 11.2 | X | 2021-01-02 | 1 | 61 +3 | 1 | 11.2 | H | 2023-01-02 | 10 | 17 +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +6. INFLUENCE OF UPDATING +UPDATE multi_index3 SET col6=49 where id=2; +SUCCESS +UPDATE multi_index3 SET col4='1999-02-01' where id=2; +SUCCESS +UPDATE multi_index3 SET col1=2 where id=2; +SUCCESS +UPDATE multi_index3 SET col1=5 where col6=49; +SUCCESS +SELECT * FROM multi_index3; +1 | 1 | 11.2 | A | 2021-01-02 | 1 | 1 +2 | 5 | 11.2 | X | 1999-02-01 | 1 | 49 +3 | 1 | 11.2 | H | 2023-01-02 | 10 | 17 +4 | 2 | 12.2 | E | 2022-01-04 | 13 | 10 +5 | 3 | 14.2 | D | 2020-04-02 | 12 | 2 +ID | COL1 | COL2 | COL3 | COL4 | COL5 | COL6 + +7. INFLUENCE OF DROPPING TABLE +DROP table multi_index; +SUCCESS + +8. ERROR +CREATE TABLE multi_index4(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +SUCCESS + +CREATE INDEX i_4_i7 ON multi_index4(id,col7); +FAILURE +CREATE INDEX i_4_78 ON multi_index4(col7,col8); +FAILURE +CREATE INDEX i_4_i78 ON multi_index4(id,col7,col8); +FAILURE diff --git a/test/case/result/primary-null.result b/test/case/result/primary-null.result new file mode 100644 index 0000000000000000000000000000000000000000..b04d9522760640bb415460272e435c1129c02409 --- /dev/null +++ b/test/case/result/primary-null.result @@ -0,0 +1,184 @@ +INITIALIZATION +CREATE TABLE null_table(id int, num int nullable, price float not null, birthday date nullable); +SUCCESS +CREATE TABLE null_table2(id int, num int nullable, price float not null, birthday date nullable); +SUCCESS +CREATE INDEX index_num on null_table(num); +SUCCESS + +1. INSERT +INSERT INTO null_table VALUES (1, 18, 10.0, '2020-01-01'); +SUCCESS +INSERT INTO null_table VALUES (2, null, 20.0, '2010-01-11'); +SUCCESS +INSERT INTO null_table VALUES (3, 12, 30.0, null); +SUCCESS +INSERT INTO null_table VALUES (4, 15, 30.0, '2021-01-31'); +SUCCESS +INSERT INTO null_table2 VALUES (1, 18, 30.0, '2021-01-31'); +SUCCESS +INSERT INTO null_table2 VALUES (2, null, 40.0, null); +SUCCESS + +INSERT INTO null_table VALUES (5, 15, null, '2021-01-31'); +FAILURE +INSERT INTO null_table VALUES (null, 15, 30.0, '2021-01-31'); +FAILURE + +2. SELECT +SELECT * FROM null_table; +1 | 18 | 10 | 2020-01-01 +2 | NULL | 20 | 2010-01-11 +3 | 12 | 30 | NULL +4 | 15 | 30 | 2021-01-31 +ID | NUM | PRICE | BIRTHDAY + +3. SELECT WITH CONSTANT +SELECT * FROM null_table where 1 is null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where 1 is not null; +1 | 18 | 10 | 2020-01-01 +2 | NULL | 20 | 2010-01-11 +3 | 12 | 30 | NULL +4 | 15 | 30 | 2021-01-31 +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where null=1; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where 1=null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where 1<>null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where 1null; +ID | NUM | PRICE | BIRTHDAY + +SELECT * FROM null_table where null is null; +1 | 18 | 10 | 2020-01-01 +2 | NULL | 20 | 2010-01-11 +3 | 12 | 30 | NULL +4 | 15 | 30 | 2021-01-31 +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where null is not null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE null=null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE null<>null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE null>null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE nullnull; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE 'a'>null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE 'a'null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table WHERE '2021-01-31' null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where birthday > null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where birthday < null; +ID | NUM | PRICE | BIRTHDAY + +SELECT * FROM null_table where num is not null; +1 | 18 | 10 | 2020-01-01 +3 | 12 | 30 | NULL +4 | 15 | 30 | 2021-01-31 +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where num is null; +2 | NULL | 20 | 2010-01-11 +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where num = null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where null = num; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where num <> null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where num > null; +ID | NUM | PRICE | BIRTHDAY +SELECT * FROM null_table where num < null; +ID | NUM | PRICE | BIRTHDAY + +SELECT null_table.num,null_table2.num,null_table.birthday FROM null_table,null_table2 where null_table.num=null_table2.num; +18 | 18 | 2020-01-01 +NULL_TABLE.NUM | NULL_TABLE2.NUM | NULL_TABLE.BIRTHDAY + +5. AGGREGATION +SELECT count(*) FROM null_table; +COUNT(*) +4 +SELECT count(price) FROM null_table; +COUNT(PRICE) +4 +SELECT count(birthday) FROM null_table; +COUNT(BIRTHDAY) +3 +SELECT avg(num) FROM null_table; +AVG(NUM) +15 + +6. AGGREGATION WITH NULL COLUMNS +CREATE TABLE null_table3(id int, num int nullable); +SUCCESS +INSERT INTO null_table3 VALUES (1, null); +SUCCESS +INSERT INTO null_table3 VALUES (2, null); +SUCCESS +SELECT count(num) FROM null_table3; +COUNT(NUM) +0 +SELECT min(num) FROM null_table3; +MIN(NUM) +NULL +SELECT max(num) FROM null_table3; +MAX(NUM) +NULL +SELECT avg(num) FROM null_table3; +AVG(NUM) +NULL diff --git a/test/case/result/primary-order-by.result b/test/case/result/primary-order-by.result new file mode 100644 index 0000000000000000000000000000000000000000..81bada5533ec37b4869ecaf7a82f5c3a52f2a08c --- /dev/null +++ b/test/case/result/primary-order-by.result @@ -0,0 +1,181 @@ +1. CREATE TABLE +create table t_order_by(id int, score float, name char); +SUCCESS +create table t_order_by_2(id int, age int); +SUCCESS + +2. INSERT RECORDS +insert into t_order_by values(3, 1.0, 'a'); +SUCCESS +insert into t_order_by values(1, 2.0, 'b'); +SUCCESS +insert into t_order_by values(4, 3.0, 'c'); +SUCCESS +insert into t_order_by values(3, 2.0, 'c'); +SUCCESS +insert into t_order_by values(3, 4.0, 'c'); +SUCCESS +insert into t_order_by values(3, 3.0, 'd'); +SUCCESS +insert into t_order_by values(3, 2.0, 'f'); +SUCCESS + +insert into t_order_by_2 values(1, 10); +SUCCESS +insert into t_order_by_2 values(2, 20); +SUCCESS +insert into t_order_by_2 values(3, 10); +SUCCESS +insert into t_order_by_2 values(3, 20); +SUCCESS +insert into t_order_by_2 values(3, 40); +SUCCESS +insert into t_order_by_2 values(4, 20); +SUCCESS + +3. PRIMARY ORDER BY +select * from t_order_by order by id; +1 | 2 | B +3 | 1 | A +3 | 2 | C +3 | 2 | F +3 | 3 | D +3 | 4 | C +4 | 3 | C +ID | SCORE | NAME + +select * from t_order_by order by id asc; +1 | 2 | B +3 | 1 | A +3 | 2 | C +3 | 2 | F +3 | 3 | D +3 | 4 | C +4 | 3 | C +ID | SCORE | NAME + +select * from t_order_by order by id desc; +1 | 2 | B +3 | 1 | A +3 | 2 | C +3 | 2 | F +3 | 3 | D +3 | 4 | C +4 | 3 | C +ID | SCORE | NAME + +select * from t_order_by order by score desc; +1 | 2 | B +3 | 1 | A +3 | 2 | C +3 | 2 | F +3 | 3 | D +3 | 4 | C +4 | 3 | C +ID | SCORE | NAME + +select * from t_order_by order by name desc; +1 | 2 | B +3 | 1 | A +3 | 2 | C +3 | 2 | F +3 | 3 | D +3 | 4 | C +4 | 3 | C +ID | SCORE | NAME + +4. ORDER BY MORE THAN ONE FIELDS +select * from t_order_by order by id, score, name; +ID | SCORE | NAME +1 | 2 | B +3 | 1 | A +3 | 2 | C +3 | 2 | F +3 | 3 | D +3 | 4 | C +4 | 3 | C + +select * from t_order_by order by id desc, score asc, name desc; +ID | SCORE | NAME +4 | 3 | C +3 | 1 | A +3 | 2 | F +3 | 2 | C +3 | 3 | D +3 | 4 | C +1 | 2 | B + +5. ORDER BY ASSOCIATE WITH WHERE CONDITION +select * from t_order_by where id=3 and name>='a' order by score desc, name; +ID | SCORE | NAME +3 | 4 | C +3 | 3 | D +3 | 2 | C +3 | 2 | F +3 | 1 | A + +6. MULTI-TABLE ORDER BY +select * from t_order_by,t_order_by_2 order by t_order_by.id,t_order_by.score,t_order_by.name,t_order_by_2.id,t_order_by_2.age; +T_ORDER_BY.ID | T_ORDER_BY.SCORE | T_ORDER_BY.NAME | T_ORDER_BY_2.ID | T_ORDER_BY_2.AGE +1 | 2 | B | 1 | 10 +1 | 2 | B | 2 | 20 +1 | 2 | B | 3 | 10 +1 | 2 | B | 3 | 20 +1 | 2 | B | 3 | 40 +1 | 2 | B | 4 | 20 +3 | 1 | A | 1 | 10 +3 | 1 | A | 2 | 20 +3 | 1 | A | 3 | 10 +3 | 1 | A | 3 | 20 +3 | 1 | A | 3 | 40 +3 | 1 | A | 4 | 20 +3 | 2 | C | 1 | 10 +3 | 2 | C | 2 | 20 +3 | 2 | C | 3 | 10 +3 | 2 | C | 3 | 20 +3 | 2 | C | 3 | 40 +3 | 2 | C | 4 | 20 +3 | 2 | F | 1 | 10 +3 | 2 | F | 2 | 20 +3 | 2 | F | 3 | 10 +3 | 2 | F | 3 | 20 +3 | 2 | F | 3 | 40 +3 | 2 | F | 4 | 20 +3 | 3 | D | 1 | 10 +3 | 3 | D | 2 | 20 +3 | 3 | D | 3 | 10 +3 | 3 | D | 3 | 20 +3 | 3 | D | 3 | 40 +3 | 3 | D | 4 | 20 +3 | 4 | C | 1 | 10 +3 | 4 | C | 2 | 20 +3 | 4 | C | 3 | 10 +3 | 4 | C | 3 | 20 +3 | 4 | C | 3 | 40 +3 | 4 | C | 4 | 20 +4 | 3 | C | 1 | 10 +4 | 3 | C | 2 | 20 +4 | 3 | C | 3 | 10 +4 | 3 | C | 3 | 20 +4 | 3 | C | 3 | 40 +4 | 3 | C | 4 | 20 + +select * from t_order_by, t_order_by_2 where t_order_by.id=t_order_by_2.id order by t_order_by.score desc, t_order_by_2.age asc, t_order_by.id asc, t_order_by.name; +T_ORDER_BY.ID | T_ORDER_BY.SCORE | T_ORDER_BY.NAME | T_ORDER_BY_2.ID | T_ORDER_BY_2.AGE +3 | 4 | C | 3 | 10 +3 | 4 | C | 3 | 20 +3 | 4 | C | 3 | 40 +3 | 3 | D | 3 | 10 +3 | 3 | D | 3 | 20 +4 | 3 | C | 4 | 20 +3 | 3 | D | 3 | 40 +1 | 2 | B | 1 | 10 +3 | 2 | C | 3 | 10 +3 | 2 | F | 3 | 10 +3 | 2 | C | 3 | 20 +3 | 2 | F | 3 | 20 +3 | 2 | C | 3 | 40 +3 | 2 | F | 3 | 40 +3 | 1 | A | 3 | 10 +3 | 1 | A | 3 | 20 +3 | 1 | A | 3 | 40 diff --git a/test/case/result/primary-select-meta.result b/test/case/result/primary-select-meta.result new file mode 100644 index 0000000000000000000000000000000000000000..ea7216767f5849eb540a9f02abf0aeffb8a3216a --- /dev/null +++ b/test/case/result/primary-select-meta.result @@ -0,0 +1,13 @@ +INITIALIZATION +CREATE TABLE Select_meta(id int, age int); +SUCCESS + +1. SELECT FROM A NON-EXISTENT TABLE +select * from no_table; +FAILURE + +2. SELECT FROM A NON-EXISTENT COLUMN +select home from Select_meta; +FAILURE +select * from Select_meta where home='001'; +FAILURE diff --git a/test/case/result/primary-select-tables.result b/test/case/result/primary-select-tables.result new file mode 100644 index 0000000000000000000000000000000000000000..774f70ca17c02242b4ddc5a964f108dc3ac44929 --- /dev/null +++ b/test/case/result/primary-select-tables.result @@ -0,0 +1,107 @@ +INITIALIZATION +CREATE TABLE Select_tables_1(id int, age int, u_name char); +SUCCESS +CREATE TABLE Select_tables_2(id int, age int, u_name char); +SUCCESS +CREATE TABLE Select_tables_3(id int, res int, u_name char); +SUCCESS +CREATE TABLE Select_tables_4(id int, age int, u_name char); +SUCCESS +CREATE TABLE Select_tables_5(id int, res int, u_name char); +SUCCESS + +INSERT INTO Select_tables_1 VALUES (1,18,'a'); +SUCCESS +INSERT INTO Select_tables_1 VALUES (2,15,'b'); +SUCCESS +INSERT INTO Select_tables_2 VALUES (1,20,'a'); +SUCCESS +INSERT INTO Select_tables_2 VALUES (2,21,'c'); +SUCCESS +INSERT INTO Select_tables_3 VALUES (1,35,'a'); +SUCCESS +INSERT INTO Select_tables_3 VALUES (2,37,'a'); +SUCCESS + +INSERT DATA INTO SELECT_TABLES_4 AND SELECT_TABLES_5 +INSERT INTO Select_tables_4 VALUES (1, 2, 'a'); +SUCCESS +INSERT INTO Select_tables_4 VALUES (1, 3, 'b'); +SUCCESS +INSERT INTO Select_tables_4 VALUES (2, 2, 'c'); +SUCCESS +INSERT INTO Select_tables_4 VALUES (2, 4, 'd'); +SUCCESS +INSERT INTO Select_tables_5 VALUES (1, 10, 'g'); +SUCCESS +INSERT INTO Select_tables_5 VALUES (1, 11, 'f'); +SUCCESS +INSERT INTO Select_tables_5 VALUES (2, 12, 'c'); +SUCCESS + +1. MULTI-TABLE QUERY +SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3; +1 | 18 | A | 1 | 20 | A | 1 | 35 | A +1 | 18 | A | 1 | 20 | A | 2 | 37 | A +1 | 18 | A | 2 | 21 | C | 1 | 35 | A +1 | 18 | A | 2 | 21 | C | 2 | 37 | A +2 | 15 | B | 1 | 20 | A | 1 | 35 | A +2 | 15 | B | 1 | 20 | A | 2 | 37 | A +2 | 15 | B | 2 | 21 | C | 1 | 35 | A +2 | 15 | B | 2 | 21 | C | 2 | 37 | A +SELECT_TABLES_1.ID | SELECT_TABLES_1.AGE | SELECT_TABLES_1.U_NAME | SELECT_TABLES_2.ID | SELECT_TABLES_2.AGE | SELECT_TABLES_2.U_NAME | SELECT_TABLES_3.ID | SELECT_TABLES_3.RES | SELECT_TABLES_3.U_NAME +SELECT Select_tables_1.id,Select_tables_2.u_name,Select_tables_3.res FROM Select_tables_1,Select_tables_2,Select_tables_3; +1 | A | 35 +1 | A | 37 +1 | C | 35 +1 | C | 37 +2 | A | 35 +2 | A | 37 +2 | C | 35 +2 | C | 37 +SELECT_TABLES_1.ID | SELECT_TABLES_2.U_NAME | SELECT_TABLES_3.RES +Select Select_tables_1.res FROM Select_tables_1,Select_tables_2,Select_tables_3; +FAILURE + +2. CONDITIONAL QUERY +SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3 WHERE Select_tables_1.u_name=Select_tables_2.u_name AND Select_tables_2.u_name=Select_tables_3.u_name; +1 | 18 | A | 1 | 20 | A | 1 | 35 | A +1 | 18 | A | 1 | 20 | A | 2 | 37 | A +SELECT_TABLES_1.ID | SELECT_TABLES_1.AGE | SELECT_TABLES_1.U_NAME | SELECT_TABLES_2.ID | SELECT_TABLES_2.AGE | SELECT_TABLES_2.U_NAME | SELECT_TABLES_3.ID | SELECT_TABLES_3.RES | SELECT_TABLES_3.U_NAME +SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3 WHERE Select_tables_1.id=Select_tables_2.id AND Select_tables_3.res=35; +1 | 18 | A | 1 | 20 | A | 1 | 35 | A +2 | 15 | B | 2 | 21 | C | 1 | 35 | A +SELECT_TABLES_1.ID | SELECT_TABLES_1.AGE | SELECT_TABLES_1.U_NAME | SELECT_TABLES_2.ID | SELECT_TABLES_2.AGE | SELECT_TABLES_2.U_NAME | SELECT_TABLES_3.ID | SELECT_TABLES_3.RES | SELECT_TABLES_3.U_NAME +SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3 WHERE Select_tables_1.age<18 AND Select_tables_2.u_name='c' AND Select_tables_3.res=35 AND Select_tables_1.id=Select_tables_2.id AND Select_tables_2.id=Select_tables_3.id; +SELECT_TABLES_1.ID | SELECT_TABLES_1.AGE | SELECT_TABLES_1.U_NAME | SELECT_TABLES_2.ID | SELECT_TABLES_2.AGE | SELECT_TABLES_2.U_NAME | SELECT_TABLES_3.ID | SELECT_TABLES_3.RES | SELECT_TABLES_3.U_NAME +SELECT Select_tables_2.age FROM Select_tables_1,Select_tables_2 WHERE Select_tables_1.age<18 AND Select_tables_2.u_name='c' AND Select_tables_1.id=Select_tables_2.id; +21 +SELECT_TABLES_2.AGE + +3. DUPLICATE KEY QUERY +SELECT * from Select_tables_4, Select_tables_5 where Select_tables_4.id=Select_tables_5.id; +1 | 2 | A | 1 | 10 | G +1 | 2 | A | 1 | 11 | F +1 | 3 | B | 1 | 10 | G +1 | 3 | B | 1 | 11 | F +2 | 2 | C | 2 | 12 | C +2 | 4 | D | 2 | 12 | C +SELECT_TABLES_4.ID | SELECT_TABLES_4.AGE | SELECT_TABLES_4.U_NAME | SELECT_TABLES_5.ID | SELECT_TABLES_5.RES | SELECT_TABLES_5.U_NAME +select * from Select_tables_4, Select_tables_5 where Select_tables_4.id >= Select_tables_5.id; +1 | 2 | A | 1 | 10 | G +1 | 2 | A | 1 | 11 | F +1 | 3 | B | 1 | 10 | G +1 | 3 | B | 1 | 11 | F +2 | 2 | C | 1 | 10 | G +2 | 2 | C | 1 | 11 | F +2 | 2 | C | 2 | 12 | C +2 | 4 | D | 1 | 10 | G +2 | 4 | D | 1 | 11 | F +2 | 4 | D | 2 | 12 | C +SELECT_TABLES_4.ID | SELECT_TABLES_4.AGE | SELECT_TABLES_4.U_NAME | SELECT_TABLES_5.ID | SELECT_TABLES_5.RES | SELECT_TABLES_5.U_NAME + +4. JOIN EMPTY TABLE +CREATE TABLE Select_tables_6(id int, res int); +SUCCESS +SELECT Select_tables_1.id,Select_tables_6.id from Select_tables_1, Select_tables_6 where Select_tables_1.id=Select_tables_6.id; +SELECT_TABLES_1.ID | SELECT_TABLES_6.ID diff --git a/test/case/result/primary-simple-sub-query.result b/test/case/result/primary-simple-sub-query.result new file mode 100644 index 0000000000000000000000000000000000000000..0ffc1cfc47bdfbc144b01ab7ea7b6ed940fdc44e --- /dev/null +++ b/test/case/result/primary-simple-sub-query.result @@ -0,0 +1,109 @@ +INITIALIZATION +CREATE TABLE ssq_1(id int, col1 int, feat1 float); +SUCCESS +CREATE TABLE ssq_2(id int, col2 int, feat2 float); +SUCCESS +CREATE TABLE ssq_3(id int, col3 int, feat3 float); +SUCCESS + +INSERT INTO ssq_1 VALUES (1, 4, 11.2); +SUCCESS +INSERT INTO ssq_1 VALUES (2, 2, 12.0); +SUCCESS +INSERT INTO ssq_1 VALUES (3, 3, 13.5); +SUCCESS +INSERT INTO ssq_2 VALUES (1, 2, 13.0); +SUCCESS +INSERT INTO ssq_2 VALUES (2, 7, 10.5); +SUCCESS +INSERT INTO ssq_2 VALUES (5, 3, 12.6); +SUCCESS + +1. SELECT +select * from ssq_1 where id in (select ssq_2.id from ssq_2); +1 | 4 | 11.2 +2 | 2 | 12 +ID | COL1 | FEAT1 +select * from ssq_1 where col1 not in (select ssq_2.col2 from ssq_2); +1 | 4 | 11.2 +ID | COL1 | FEAT1 + +select * from ssq_1 where col1 = (select avg(ssq_2.col2) from ssq_2); +1 | 4 | 11.2 +ID | COL1 | FEAT1 +select * from ssq_1 where (select avg(ssq_2.col2) from ssq_2) = col1; +1 | 4 | 11.2 +ID | COL1 | FEAT1 + +select * from ssq_1 where feat1 >= (select min(ssq_2.feat2) from ssq_2); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 +select * from ssq_1 where (select min(ssq_2.feat2) from ssq_2) <= feat1; +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +select * from ssq_1 where feat1 <= (select max(ssq_2.feat2) from ssq_2); +1 | 4 | 11.2 +2 | 2 | 12 +ID | COL1 | FEAT1 +select * from ssq_1 where (select max(ssq_2.feat2) from ssq_2) >= feat1; +1 | 4 | 11.2 +2 | 2 | 12 +ID | COL1 | FEAT1 + +select * from ssq_1 where feat1 > (select min(ssq_2.feat2) from ssq_2); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 +select * from ssq_1 where (select min(ssq_2.feat2) from ssq_2) < feat1; +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +select * from ssq_1 where feat1 < (select max(ssq_2.feat2) from ssq_2); +1 | 4 | 11.2 +2 | 2 | 12 +ID | COL1 | FEAT1 +select * from ssq_1 where (select max(ssq_2.feat2) from ssq_2) > feat1; +1 | 4 | 11.2 +2 | 2 | 12 +ID | COL1 | FEAT1 + +select * from ssq_1 where feat1 <> (select avg(ssq_2.feat2) from ssq_2); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 + +2. SELECT WITH EMPTY TABLE +select * from ssq_1 where feat1 < (select max(ssq_2.feat2) from ssq_2 where 1=0); +ID | COL1 | FEAT1 +select * from ssq_1 where id in (select ssq_2.id from ssq_2 where 1=0); +ID | COL1 | FEAT1 +select * from ssq_1 where id not in (select ssq_2.id from ssq_2 where 1=0); +1 | 4 | 11.2 +2 | 2 | 12 +3 | 3 | 13.5 +ID | COL1 | FEAT1 +select * from ssq_3 where feat3 < (select max(ssq_2.feat2) from ssq_2); +ID | COL3 | FEAT3 +select * from ssq_3 where id in (select ssq_2.id from ssq_2); +ID | COL3 | FEAT3 +select * from ssq_3 where id not in (select ssq_2.id from ssq_2); +ID | COL3 | FEAT3 + +3. ERROR +select * from ssq_1 where col1 = (select ssq_2.col2 from ssq_2); +FAILURE +select * from ssq_1 where col1 = (select * from ssq_2); +FAILURE +select * from ssq_1 where col1 in (select * from ssq_2); +FAILURE +select * from ssq_1 where col1 not in (select * from ssq_2); +FAILURE diff --git a/test/case/result/primary-text.result b/test/case/result/primary-text.result new file mode 100644 index 0000000000000000000000000000000000000000..5b31e52918ccda20a3b87cfbd1902c8baec3212f --- /dev/null +++ b/test/case/result/primary-text.result @@ -0,0 +1,50 @@ +INITIALIZATION +create table text_table(id int, info text); +SUCCESS + +1. INSERT +insert into text_table values (1,'this is a very very long string'); +SUCCESS +insert into text_table values (2,'this is a very very long string2'); +SUCCESS +insert into text_table values (3,'this is a very very long string3'); +SUCCESS +select * from text_table; +1 | THIS IS A VERY VERY LONG STRING +2 | THIS IS A VERY VERY LONG STRING2 +3 | THIS IS A VERY VERY LONG STRING3 +ID | INFO + +2. CONDITION +delete from text_table where id=1; +SUCCESS +select * from text_table; +2 | THIS IS A VERY VERY LONG STRING2 +3 | THIS IS A VERY VERY LONG STRING3 +ID | INFO + +3. UPDATE +UPDATE text_table set info='a tmp data' where id = 2; +SUCCESS +select * from text_table; +2 | A TMP DATA +3 | THIS IS A VERY VERY LONG STRING3 +ID | INFO + +4. BOUNDARY TEST WITH LENGTH 4096 +insert into text_table values (4,'this is a very very long string pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad1'); +SUCCESS +select * from text_table; +2 | A TMP DATA +3 | THIS IS A VERY VERY LONG STRING3 +4 | THIS IS A VERY VERY LONG STRING PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD1 +ID | INFO + +insert into text_table values (5,'this is a very very long string pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad1 pad pad pad pad'); +SUCCESS +select * from text_table; +2 | A TMP DATA +3 | THIS IS A VERY VERY LONG STRING3 +4 | THIS IS A VERY VERY LONG STRING PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD1 +5 | THIS IS A VERY VERY LONG STRING PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD PAD1 +ID | INFO diff --git a/test/case/result/primary-unique.result b/test/case/result/primary-unique.result new file mode 100644 index 0000000000000000000000000000000000000000..54a36709af07be45a4d64e4f37e51f0542a7fa5c --- /dev/null +++ b/test/case/result/primary-unique.result @@ -0,0 +1,24 @@ +INITIALIZATION +CREATE TABLE unique_table(id int, col1 int, col2 int); +SUCCESS +INSERT INTO unique_table VALUES (1,1,1); +SUCCESS + +1. UNIQUE TEST +CREATE UNIQUE INDEX index_id on unique_table(id); +SUCCESS +INSERT INTO unique_table VALUES (2,1,1); +SUCCESS +CREATE UNIQUE INDEX index_id on unique_table(id); +FAILURE +INSERT INTO unique_table VALUES (3,2,1); +SUCCESS +INSERT INTO unique_table VALUES (1,2,1); +FAILURE + +2. SELECT +SELECT * FROM unique_table; +1 | 1 | 1 +2 | 1 | 1 +3 | 2 | 1 +ID | COL1 | COL2 diff --git a/test/case/result/primary-update.result b/test/case/result/primary-update.result new file mode 100644 index 0000000000000000000000000000000000000000..86b2069cc3178bc2588dde946ae0f2999ef4a5f9 --- /dev/null +++ b/test/case/result/primary-update.result @@ -0,0 +1,81 @@ +INITIALIZATION +CREATE TABLE Update_table_1(id int, t_name char, col1 int, col2 int); +SUCCESS +CREATE INDEX index_id on Update_table_1(id); +SUCCESS +INSERT INTO Update_table_1 VALUES (1,'N1',1,1); +SUCCESS +INSERT INTO Update_table_1 VALUES (2,'N2',1,1); +SUCCESS +INSERT INTO Update_table_1 VALUES (3,'N3',2,1); +SUCCESS + +1. UPDATE A ROW +UPDATE Update_table_1 SET t_name='N01' WHERE id=1; +SUCCESS +SELECT * FROM Update_table_1; +1 | N01 | 1 | 1 +2 | N2 | 1 | 1 +3 | N3 | 2 | 1 +ID | T_NAME | COL1 | COL2 + +2. UPDATE ROWS +UPDATE Update_table_1 SET col2=0 WHERE col1=1; +SUCCESS +SELECT * FROM Update_table_1; +1 | N01 | 1 | 0 +2 | N2 | 1 | 0 +3 | N3 | 2 | 1 +ID | T_NAME | COL1 | COL2 + +3. UPDATE A INDEX COLUMN +UPDATE Update_table_1 SET id=4 WHERE t_name='N3'; +SUCCESS +SELECT * FROM Update_table_1; +1 | N01 | 1 | 0 +2 | N2 | 1 | 0 +4 | N3 | 2 | 1 +ID | T_NAME | COL1 | COL2 + +4. UPDATE WITHOUT CONDITIONS +UPDATE Update_table_1 SET col1=0; +SUCCESS +SELECT * FROM Update_table_1; +1 | N01 | 0 | 0 +2 | N2 | 0 | 0 +4 | N3 | 0 | 1 +ID | T_NAME | COL1 | COL2 + +5. UPDATE WITH CONDITIONS +UPDATE Update_table_1 SET t_name='N02' WHERE col1=0 AND col2=0; +SUCCESS +SELECT * FROM Update_table_1; +1 | N02 | 0 | 0 +2 | N02 | 0 | 0 +4 | N3 | 0 | 1 +ID | T_NAME | COL1 | COL2 + +6. UPDATE NON-EXISTENT TABLE +UPDATE Update_table_2 SET t_name='N01' WHERE id=1; +FAILURE + +7. UPDATE NON-EXISTENT COLUMN +UPDATE Update_table_1 SET t_name_false='N01' WHERE id=1; +FAILURE + +8. UPDATE WITH INVALID CONDITION +UPDATE Update_table_1 SET t_name='N01' WHERE id_false=1; +FAILURE + +9. UPDATE IN VAIN +UPDATE Update_table_1 SET t_name='N01' WHERE id=100; +SUCCESS +SELECT * FROM Update_table_1; +1 | N02 | 0 | 0 +2 | N02 | 0 | 0 +4 | N3 | 0 | 1 +ID | T_NAME | COL1 | COL2 + +10. UPDATE WITH INVALID VALUE +UPDATE Update_table_1 SET col1='N01' WHERE id=1; +FAILURE diff --git a/test/case/test/.primary-update.test.swp b/test/case/test/.primary-update.test.swp new file mode 100644 index 0000000000000000000000000000000000000000..6389a1fe50e046fe117309b6cbabee8f76919a25 Binary files /dev/null and b/test/case/test/.primary-update.test.swp differ diff --git a/test/case/test/basic.test b/test/case/test/basic.test new file mode 100644 index 0000000000000000000000000000000000000000..157d9e399e58e32e5fb2aa1e3077dad807b20678 --- /dev/null +++ b/test/case/test/basic.test @@ -0,0 +1,37 @@ +-- echo basic insert + +create table t_basic(id int, age int, name char, score float); +insert into t_basic values(1,1, 'a', 1.0); +insert into t_basic values(2,2, 'b', 2.0); +insert into t_basic values(4,4, 'c', 3.0); +insert into t_basic values(3,3, 'd', 4.0); +insert into t_basic values(5,5, 'e', 5.5); +insert into t_basic values(6,6, 'f', 6.6); +insert into t_basic values(7,7, 'g', 7.7); + +--sort select * from t_basic; + +-- echo basic delete +delete from t_basic where id=3; +-- sort select * from t_basic; + +-- echo basic select +select * from t_basic where id=1; + +-- sort select * from t_basic where id>=5; + +select * from t_basic where age>1 and age<3; + +select * from t_basic where t_basic.id=1 and t_basic.age=1; + +select * from t_basic where id=1 and age=1; + +-- sort select id, age, name, score from t_basic; + +-- sort select t_basic.id, t_basic.age, t_basic.name, t_basic.score from t_basic; + +-- sort select t_basic.id, t_basic.age, name from t_basic; + +-- echo create index +create index i_id on t_basic (id); +-- sort select * from t_basic; diff --git a/test/case/test/primary-aggregation-func.test b/test/case/test/primary-aggregation-func.test new file mode 100644 index 0000000000000000000000000000000000000000..478c3136e8e723eadb2f86b898efa3f5f7ee70c5 --- /dev/null +++ b/test/case/test/primary-aggregation-func.test @@ -0,0 +1,57 @@ +-- echo initialization +CREATE TABLE aggregation_func(id int, num int, price float, addr char, birthday date); + +INSERT INTO aggregation_func VALUES (1, 18, 10.0, 'abc', '2020-01-01'); +INSERT INTO aggregation_func VALUES (2, 15, 20.0, 'abc', '2010-01-11'); +INSERT INTO aggregation_func VALUES (3, 12, 30.0, 'def', '2021-01-21'); +INSERT INTO aggregation_func VALUES (4, 15, 30.0, 'dei', '2021-01-31'); + +-- echo 1. count +SELECT count(*) FROM aggregation_func; + +SELECT count(num) FROM aggregation_func; + +-- echo 2. min +SELECT min(num) FROM aggregation_func; + +SELECT min(price) FROM aggregation_func; + +SELECT min(addr) FROM aggregation_func; + +-- echo 3. max +SELECT max(num) FROM aggregation_func; + +SELECT max(price) FROM aggregation_func; + +SELECT max(addr) FROM aggregation_func; + +-- echo 4. avg +SELECT avg(num) FROM aggregation_func; + +SELECT avg(price) FROM aggregation_func; + +-- echo 5. error with * +SELECT min(*) FROM aggregation_func; +SELECT max(*) FROM aggregation_func; +SELECT avg(*) FROM aggregation_func; + +-- echo 6. error with redundant columns +SELECT count(*,num) FROM aggregation_func; +SELECT min(num,price) FROM aggregation_func; +SELECT max(num,price) FROM aggregation_func; +SELECT avg(num,price) FROM aggregation_func; + +-- echo 7. error with empty columns +SELECT count() FROM aggregation_func; +SELECT min() FROM aggregation_func; +SELECT max() FROM aggregation_func; +SELECT avg() FROM aggregation_func; + +-- echo 8. error with non-existent columns +SELECT count(id2) FROM aggregation_func; +SELECT min(id2) FROM aggregation_func; +SELECT max(id2) FROM aggregation_func; +SELECT avg(id2) FROM aggregation_func; + +-- echo 9. select many aggregation +SELECT min(num),max(num),avg(num) FROM aggregation_func; diff --git a/test/case/test/primary-complex-sub-query.test b/test/case/test/primary-complex-sub-query.test new file mode 100644 index 0000000000000000000000000000000000000000..e5ef032b8eee651f2680254cee6b48d08fd8f62f --- /dev/null +++ b/test/case/test/primary-complex-sub-query.test @@ -0,0 +1,54 @@ +-- echo initialization +CREATE TABLE csq_1(id int, col1 int, feat1 float); +CREATE TABLE csq_2(id int, col2 int, feat2 float); +CREATE TABLE csq_3(id int, col3 int, feat3 float); +CREATE TABLE csq_4(id int, col4 int, feat4 float); + +INSERT INTO csq_1 VALUES (1, 4, 11.2); +INSERT INTO csq_1 VALUES (2, 2, 12.0); +INSERT INTO csq_1 VALUES (3, 3, 13.5); +INSERT INTO csq_2 VALUES (1, 2, 13.0); +INSERT INTO csq_2 VALUES (2, 7, 10.5); +INSERT INTO csq_2 VALUES (5, 3, 12.6); +INSERT INTO csq_3 VALUES (1, 2, 11.0); +INSERT INTO csq_3 VALUES (3, 6, 16.5); +INSERT INTO csq_3 VALUES (5, 5, 14.6); + +-- echo 1. Select +-- sort select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id in (select csq_3.id from csq_3)); + +-- sort select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id not in (select csq_3.id from csq_3)); + +-- sort select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id not in (select csq_3.id from csq_3)); + +-- sort select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id in (select csq_3.id from csq_3)); + +-- sort select * from csq_1 where col1 > (select avg(csq_2.col2) from csq_2 where csq_2.feat2 >= (select min(csq_3.feat3) from csq_3)); + +-- sort select * from csq_1 where (select avg(csq_2.col2) from csq_2 where csq_2.feat2 > (select min(csq_3.feat3) from csq_3)) = col1; + +-- sort select * from csq_1 where (select avg(csq_2.col2) from csq_2) <> (select avg(csq_3.col3) from csq_3); + +-- sort select * from csq_1 where feat1 > (select min(csq_2.feat2) from csq_2) and col1 <= (select min(csq_3.col3) from csq_3); + +-- sort select * from csq_1 where (select max(csq_2.feat2) from csq_2) > feat1 and col1 > (select min(csq_3.col3) from csq_3); + +-- sort select * from csq_1 where (select max(csq_2.feat2) from csq_2) > feat1 and (select min(csq_3.col3) from csq_3) < col1; + +-- sort select * from csq_1 where feat1 <> (select avg(csq_2.feat2) from csq_2 where csq_2.feat2 > csq_1.feat1); + +-- sort select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id in (select csq_3.id from csq_3 where csq_1.id = csq_3.id)); + +-- echo 2. Select with empty table +-- sort select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id in (select csq_3.id from csq_3 where 1=0)); +-- sort select * from csq_1 where id in (select csq_2.id from csq_2 where csq_2.id in (select csq_3.id from csq_3 where 1=0) and 1=0); +-- sort select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id not in (select csq_3.id from csq_3 where 1=0)); +-- sort select * from csq_1 where col1 not in (select csq_2.col2 from csq_2 where csq_2.id not in (select csq_3.id from csq_3) and 1=0); +-- sort select * from csq_3 where feat3 < (select max(csq_2.feat2) from csq_2 where csq_2.id not in (select csq_3.id from csq_3 where 1=0)); +-- sort select * from csq_3 where feat3 < (select max(csq_2.feat2) from csq_2 where csq_2.id not in (select csq_3.id from csq_3 ) and 1=0); + +--echo 3. error +select * from csq_1 where col1 = (select csq_2.col2 from csq_2); +select * from csq_1 where col1 = (select * from csq_2); +select * from csq_1 where col1 in (select * from csq_2); +select * from csq_1 where col1 not in (select * from csq_2); diff --git a/test/case/test/primary-date.test b/test/case/test/primary-date.test new file mode 100644 index 0000000000000000000000000000000000000000..5a6c1a0c17c45ae71ea947c292c83fe58dd87fb3 --- /dev/null +++ b/test/case/test/primary-date.test @@ -0,0 +1,34 @@ +-- echo initialization +CREATE TABLE date_table(id int, u_date date); +CREATE INDEX index_id on date_table(u_date); + +-- echo 1. insert normal date data +INSERT INTO date_table VALUES (1,'2020-01-21'); +INSERT INTO date_table VALUES (2,'2020-10-21'); +INSERT INTO date_table VALUES (3,'2020-1-01'); +INSERT INTO date_table VALUES (4,'2020-01-1'); +INSERT INTO date_table VALUES (5,'2019-12-21'); +INSERT INTO date_table VALUES (6,'2016-2-29'); +INSERT INTO date_table VALUES (7,'1970-1-1'); +INSERT INTO date_table VALUES (8,'2000-01-01'); +INSERT INTO date_table VALUES (9,'2038-1-19'); + +-- echo 2. compare date data +-- sort SELECT * FROM date_table WHERE u_date>'2020-1-20'; +-- sort SELECT * FROM date_table WHERE u_date<'2019-12-31'; +-- sort SELECT * FROM date_table WHERE u_date='2020-1-1'; + +-- echo 3. delete data +DELETE FROM date_table WHERE u_date>'2012-2-29'; +-- sort SELECT * FROM date_table; + +-- echo 4. check invalid date data +SELECT * FROM date_table WHERE u_date='2017-2-29'; +SELECT * FROM date_table WHERE u_date='2017-21-29'; +SELECT * FROM date_table WHERE u_date='2017-12-32'; +SELECT * FROM date_table WHERE u_date='2017-11-31'; + +INSERT INTO date_table VALUES (10,'2017-2-29'); +INSERT INTO date_table VALUES (11,'2017-21-29'); +INSERT INTO date_table VALUES (12,'2017-12-32'); +INSERT INTO date_table VALUES (13,'2017-11-31'); diff --git a/test/case/test/primary-drop-table.test b/test/case/test/primary-drop-table.test new file mode 100644 index 0000000000000000000000000000000000000000..8d15a47a96b163f7d0f7d4a86d9c38ac8291ad79 --- /dev/null +++ b/test/case/test/primary-drop-table.test @@ -0,0 +1,39 @@ +-- echo 1. Drop empty table +CREATE TABLE Drop_table_1(id int, t_name char); +DROP TABLE Drop_table_1; + +-- echo 2. Drop non-empty table +CREATE TABLE Drop_table_2(id int, t_name char); +INSERT INTO Drop_table_2 VALUES (1,'OB'); +DROP TABLE Drop_table_2; + +-- echo 3. Check the accuracy of dropping table +CREATE TABLE Drop_table_3(id int, t_name char); +INSERT INTO Drop_table_3 VALUES (1,'OB'); +-- sort SELECT * FROM Drop_table_3; +DROP TABLE Drop_table_3; +INSERT INTO Drop_table_3 VALUES (1,'OB'); +SELECT * FROM Drop_table_3; +DELETE FROM Drop_table_3 WHERE id = 3; +CREATE TABLE Drop_table_3(id int, t_name char); +-- sort SELECT * FROM Drop_table_3; + +-- echo 4. Drop non-existent table +CREATE TABLE Drop_table_4(id int, t_name char); +DROP TABLE Drop_table_4; +DROP TABLE Drop_table_4; +DROP TABLE Drop_table_4_1; + +-- echo 5. Create a table which has dropped +CREATE TABLE Drop_table_5(id int, t_name char); +DROP TABLE Drop_table_5; +CREATE TABLE Drop_table_5(id int, t_name char); +SELECT * FROM Drop_table_5; + +-- echo 6. Drop a table with index +CREATE TABLE Drop_table_6(id int, t_name char); +CREATE INDEX index_id on Drop_table_6(id); +INSERT INTO Drop_table_6 VALUES (1,'OB'); +-- sort SELECT * FROM Drop_table_6; +DROP TABLE Drop_table_6; +SELECT * FROM Drop_table_6; diff --git a/test/case/test/primary-expression.test b/test/case/test/primary-expression.test new file mode 100644 index 0000000000000000000000000000000000000000..d19bab4aa5d3018caf59cccef45bbd0310796740 --- /dev/null +++ b/test/case/test/primary-expression.test @@ -0,0 +1,33 @@ +-- echo initialization +create table exp_table(id int, col1 int, col2 int, col3 float, col4 float); +insert into exp_table VALUES (1, 1, 1, 1.0, 1.5); +insert into exp_table VALUES (2, 2, -2, 5.5, 1.0); +insert into exp_table VALUES (3, 3, 4, 5.0, 4.0); + +-- echo 1. select +-- sort select * from exp_table where 1 = 5/4; +-- sort select * from exp_table where col1-2 > 0; +-- sort select * from exp_table where 2+col2 < 1; +-- sort select * from exp_table where col1*col2 < 0; + +-- sort select * from exp_table where 5/4 = 1; +-- sort select * from exp_table where 0 < col1-2; +-- sort select * from exp_table where 1.0 > 2+col2; +-- sort select * from exp_table where -0 < col1-col2; +-- sort select * from exp_table where 0 < -2+col1; + +-- sort select * from exp_table where 1+1 = 2*1.0; +-- sort select * from exp_table where 5/4*8 < 4+col2*col3/2; +-- sort select * from exp_table where 5/4*8 < (4+col2)*col3/2; + +-- sort select id,-(col2*(-1)+1)+(col4+2)*(col1+col3*2),(4+col2)*col3/2 from exp_table where -(col2*(-1)+1)+(col4+2)*(col1+col3*2) > (4+col2)*col3/2; +-- sort select id,col1,col2,col3,col4,6-(col2*(1+col1))+(col4+2)/(1+col1*4+col3*2) from exp_table where 6-(col2*(1+col1))+(col4+2)/(1+col1*4+col3*2) > 5; +-- sort select id,col1,col2,col3,col4,3*col1/(col2+2) from exp_table where 3*col1/(col2+2) > 1; +-- sort select id,3*col1/(col2+2) from exp_table where 3*col1/(col2+2)+1/0 > 1; +-- sort select * from exp_table where 1/0 = 1/0; + +-- echo 2. expression about many tables +create table exp_table2(id int, col1 int); +insert into exp_table2 VALUES (1, 1); +insert into exp_table2 VALUES (2, 3); +-- sort select exp_table.id,3*exp_table2.col1/(exp_table.col2+2) from exp_table,exp_table2 where 3*exp_table2.col1/(exp_table.col2+2)>1; \ No newline at end of file diff --git a/test/case/test/primary-group-by.test b/test/case/test/primary-group-by.test new file mode 100644 index 0000000000000000000000000000000000000000..01836179209e5a194b2bef6c36d1fd68cdc30899 --- /dev/null +++ b/test/case/test/primary-group-by.test @@ -0,0 +1,34 @@ +-- echo 1. create table +create table t_group_by (id int, score float, name char); +create table t_group_by_2 (id int, age int); + +-- echo 2. insert records +insert into t_group_by values(3, 1.0, 'a'); +insert into t_group_by values(1, 2.0, 'b'); +insert into t_group_by values(4, 3.0, 'c'); +insert into t_group_by values(3, 2.0, 'c'); +insert into t_group_by values(3, 4.0, 'c'); +insert into t_group_by values(3, 3.0, 'd'); +insert into t_group_by values(3, 2.0, 'f'); + +insert into t_group_by_2 values(1, 10); +insert into t_group_by_2 values(2, 20); +insert into t_group_by_2 values(3, 10); +insert into t_group_by_2 values(3, 20); +insert into t_group_by_2 values(3, 40); +insert into t_group_by_2 values(4, 20); + +-- echo 3. primary group by +-- sort select id, avg(score) from t_group_by group by id; + +-- sort select name, min(id), max(score) from t_group_by group by name; + +-- sort select id, name, avg(score) from t_group_by group by id, name; + +-- echo 4. with where condition +-- sort select id, avg(score) from t_group_by where id>2 group by id; + +-- sort select name, count(id), max(score) from t_group_by where name > 'a' and id>=0 group by name; + +-- echo 5. multi table +-- sort select t_group_by.id, t_group_by.name, avg(t_group_by.score), avg(t_group_by_2.age) from t_group_by, t_group_by_2 where t_group_by.id=t_group_by_2.id group by t_group_by.id, t_group_by.name; \ No newline at end of file diff --git a/test/case/test/primary-insert.test b/test/case/test/primary-insert.test new file mode 100644 index 0000000000000000000000000000000000000000..429afc87bef0396f0417d80ca1fee0e7b43053fa --- /dev/null +++ b/test/case/test/primary-insert.test @@ -0,0 +1,13 @@ +-- echo initialization +CREATE TABLE insert_table(id int, t_name char, col1 int, col2 int); + +-- echo 1. insert +INSERT INTO insert_table VALUES (1,'N1',1,1); +INSERT INTO insert_table VALUES (2,'N2',1,1),(3,'N3',2,1); + +-- echo 2. error +INSERT INTO insert_table VALUES (4,'N4',1,1),(1,1,1); +INSERT INTO insert_table VALUES (4,'N4',1,1),(1,1,1,1); + +-- echo 3. select +-- sort SELECT * FROM insert_table; \ No newline at end of file diff --git a/test/case/test/primary-join-tables.test b/test/case/test/primary-join-tables.test new file mode 100644 index 0000000000000000000000000000000000000000..07b83d5c17c727a00a92c1fdd5157249601197fa --- /dev/null +++ b/test/case/test/primary-join-tables.test @@ -0,0 +1,646 @@ +-- echo initialization +CREATE TABLE join_table_1(id int, name char); +CREATE TABLE join_table_2(id int, num int); +CREATE TABLE join_table_3(id int, num2 int); +create table join_table_empty_1(id int, num_empty_1 int); +create table join_table_empty_2(id int, num_empty_2 int); + +INSERT INTO join_table_1 VALUES (1, 'a'); +INSERT INTO join_table_1 VALUES (2, 'b'); +INSERT INTO join_table_1 VALUES (3, 'c'); +INSERT INTO join_table_2 VALUES (1, 2); +INSERT INTO join_table_2 VALUES (2, 15); +INSERT INTO join_table_3 VALUES (1, 120); +INSERT INTO join_table_3 VALUES (3, 800); + +-- echo 1. Select +-- sort Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id; +-- sort Select join_table_1.name from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id; +-- sort Select join_table_2.num from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id; +-- sort Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id inner join join_table_3 on join_table_1.id=join_table_3.id; +-- sort Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id and join_table_2.num>13 where join_table_1.name='b'; +-- sort Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id and join_table_2.num>13 where join_table_1.name='a'; +-- sort Select * from join_table_1 inner join join_table_2 on join_table_1.id=join_table_2.id and join_table_2.num>23 where join_table_1.name='b'; + +-- echo 3. empty +select * from join_table_1 inner join join_table_empty_1 on join_table_1.id=join_table_empty_1.id; +select * from join_table_empty_1 inner join join_table_1 on join_table_empty_1.id=join_table_1.id; +select * from join_table_empty_1 inner join join_table_empty_2 on join_table_empty_1.id = join_table_empty_2.id; +select * from join_table_1 inner join join_table_2 on join_table_1.id = join_table_2.id inner join join_table_empty_1 on join_table_1.id=join_table_empty_1.id; +select * from join_table_empty_1 inner join join_table_1 on join_table_empty_1.id=join_table_1.id inner join join_table_2 on join_table_1.id=join_table_2.id; + +-- echo 4. very large join +create table join_table_large_1(id int, num1 int); +create table join_table_large_2(id int, num2 int); +create table join_table_large_3(id int, num3 int); +create table join_table_large_4(id int, num4 int); +create table join_table_large_5(id int, num5 int); +create table join_table_large_6(id int, num6 int); + +insert into join_table_large_1 values(1, 1); +insert into join_table_large_1 values(2, 2); +insert into join_table_large_1 values(3, 3); +insert into join_table_large_1 values(4, 4); +insert into join_table_large_1 values(5, 5); +insert into join_table_large_1 values(6, 6); +insert into join_table_large_1 values(7, 7); +insert into join_table_large_1 values(8, 8); +insert into join_table_large_1 values(9, 9); +insert into join_table_large_1 values(10, 10); +insert into join_table_large_1 values(11, 11); +insert into join_table_large_1 values(12, 12); +insert into join_table_large_1 values(13, 13); +insert into join_table_large_1 values(14, 14); +insert into join_table_large_1 values(15, 15); +insert into join_table_large_1 values(16, 16); +insert into join_table_large_1 values(17, 17); +insert into join_table_large_1 values(18, 18); +insert into join_table_large_1 values(19, 19); +insert into join_table_large_1 values(20, 20); +insert into join_table_large_1 values(21, 21); +insert into join_table_large_1 values(22, 22); +insert into join_table_large_1 values(23, 23); +insert into join_table_large_1 values(24, 24); +insert into join_table_large_1 values(25, 25); +insert into join_table_large_1 values(26, 26); +insert into join_table_large_1 values(27, 27); +insert into join_table_large_1 values(28, 28); +insert into join_table_large_1 values(29, 29); +insert into join_table_large_1 values(30, 30); +insert into join_table_large_1 values(31, 31); +insert into join_table_large_1 values(32, 32); +insert into join_table_large_1 values(33, 33); +insert into join_table_large_1 values(34, 34); +insert into join_table_large_1 values(35, 35); +insert into join_table_large_1 values(36, 36); +insert into join_table_large_1 values(37, 37); +insert into join_table_large_1 values(38, 38); +insert into join_table_large_1 values(39, 39); +insert into join_table_large_1 values(40, 40); +insert into join_table_large_1 values(41, 41); +insert into join_table_large_1 values(42, 42); +insert into join_table_large_1 values(43, 43); +insert into join_table_large_1 values(44, 44); +insert into join_table_large_1 values(45, 45); +insert into join_table_large_1 values(46, 46); +insert into join_table_large_1 values(47, 47); +insert into join_table_large_1 values(48, 48); +insert into join_table_large_1 values(49, 49); +insert into join_table_large_1 values(50, 50); +insert into join_table_large_1 values(51, 51); +insert into join_table_large_1 values(52, 52); +insert into join_table_large_1 values(53, 53); +insert into join_table_large_1 values(54, 54); +insert into join_table_large_1 values(55, 55); +insert into join_table_large_1 values(56, 56); +insert into join_table_large_1 values(57, 57); +insert into join_table_large_1 values(58, 58); +insert into join_table_large_1 values(59, 59); +insert into join_table_large_1 values(60, 60); +insert into join_table_large_1 values(61, 61); +insert into join_table_large_1 values(62, 62); +insert into join_table_large_1 values(63, 63); +insert into join_table_large_1 values(64, 64); +insert into join_table_large_1 values(65, 65); +insert into join_table_large_1 values(66, 66); +insert into join_table_large_1 values(67, 67); +insert into join_table_large_1 values(68, 68); +insert into join_table_large_1 values(69, 69); +insert into join_table_large_1 values(70, 70); +insert into join_table_large_1 values(71, 71); +insert into join_table_large_1 values(72, 72); +insert into join_table_large_1 values(73, 73); +insert into join_table_large_1 values(74, 74); +insert into join_table_large_1 values(75, 75); +insert into join_table_large_1 values(76, 76); +insert into join_table_large_1 values(77, 77); +insert into join_table_large_1 values(78, 78); +insert into join_table_large_1 values(79, 79); +insert into join_table_large_1 values(80, 80); +insert into join_table_large_1 values(81, 81); +insert into join_table_large_1 values(82, 82); +insert into join_table_large_1 values(83, 83); +insert into join_table_large_1 values(84, 84); +insert into join_table_large_1 values(85, 85); +insert into join_table_large_1 values(86, 86); +insert into join_table_large_1 values(87, 87); +insert into join_table_large_1 values(88, 88); +insert into join_table_large_1 values(89, 89); +insert into join_table_large_1 values(90, 90); +insert into join_table_large_1 values(91, 91); +insert into join_table_large_1 values(92, 92); +insert into join_table_large_1 values(93, 93); +insert into join_table_large_1 values(94, 94); +insert into join_table_large_1 values(95, 95); +insert into join_table_large_1 values(96, 96); +insert into join_table_large_1 values(97, 97); +insert into join_table_large_1 values(98, 98); +insert into join_table_large_1 values(99, 99); +insert into join_table_large_1 values(100, 100); + +insert into join_table_large_2 values(1, 1); +insert into join_table_large_2 values(2, 2); +insert into join_table_large_2 values(3, 3); +insert into join_table_large_2 values(4, 4); +insert into join_table_large_2 values(5, 5); +insert into join_table_large_2 values(6, 6); +insert into join_table_large_2 values(7, 7); +insert into join_table_large_2 values(8, 8); +insert into join_table_large_2 values(9, 9); +insert into join_table_large_2 values(10, 10); +insert into join_table_large_2 values(11, 11); +insert into join_table_large_2 values(12, 12); +insert into join_table_large_2 values(13, 13); +insert into join_table_large_2 values(14, 14); +insert into join_table_large_2 values(15, 15); +insert into join_table_large_2 values(16, 16); +insert into join_table_large_2 values(17, 17); +insert into join_table_large_2 values(18, 18); +insert into join_table_large_2 values(19, 19); +insert into join_table_large_2 values(20, 20); +insert into join_table_large_2 values(21, 21); +insert into join_table_large_2 values(22, 22); +insert into join_table_large_2 values(23, 23); +insert into join_table_large_2 values(24, 24); +insert into join_table_large_2 values(25, 25); +insert into join_table_large_2 values(26, 26); +insert into join_table_large_2 values(27, 27); +insert into join_table_large_2 values(28, 28); +insert into join_table_large_2 values(29, 29); +insert into join_table_large_2 values(30, 30); +insert into join_table_large_2 values(31, 31); +insert into join_table_large_2 values(32, 32); +insert into join_table_large_2 values(33, 33); +insert into join_table_large_2 values(34, 34); +insert into join_table_large_2 values(35, 35); +insert into join_table_large_2 values(36, 36); +insert into join_table_large_2 values(37, 37); +insert into join_table_large_2 values(38, 38); +insert into join_table_large_2 values(39, 39); +insert into join_table_large_2 values(40, 40); +insert into join_table_large_2 values(41, 41); +insert into join_table_large_2 values(42, 42); +insert into join_table_large_2 values(43, 43); +insert into join_table_large_2 values(44, 44); +insert into join_table_large_2 values(45, 45); +insert into join_table_large_2 values(46, 46); +insert into join_table_large_2 values(47, 47); +insert into join_table_large_2 values(48, 48); +insert into join_table_large_2 values(49, 49); +insert into join_table_large_2 values(50, 50); +insert into join_table_large_2 values(51, 51); +insert into join_table_large_2 values(52, 52); +insert into join_table_large_2 values(53, 53); +insert into join_table_large_2 values(54, 54); +insert into join_table_large_2 values(55, 55); +insert into join_table_large_2 values(56, 56); +insert into join_table_large_2 values(57, 57); +insert into join_table_large_2 values(58, 58); +insert into join_table_large_2 values(59, 59); +insert into join_table_large_2 values(60, 60); +insert into join_table_large_2 values(61, 61); +insert into join_table_large_2 values(62, 62); +insert into join_table_large_2 values(63, 63); +insert into join_table_large_2 values(64, 64); +insert into join_table_large_2 values(65, 65); +insert into join_table_large_2 values(66, 66); +insert into join_table_large_2 values(67, 67); +insert into join_table_large_2 values(68, 68); +insert into join_table_large_2 values(69, 69); +insert into join_table_large_2 values(70, 70); +insert into join_table_large_2 values(71, 71); +insert into join_table_large_2 values(72, 72); +insert into join_table_large_2 values(73, 73); +insert into join_table_large_2 values(74, 74); +insert into join_table_large_2 values(75, 75); +insert into join_table_large_2 values(76, 76); +insert into join_table_large_2 values(77, 77); +insert into join_table_large_2 values(78, 78); +insert into join_table_large_2 values(79, 79); +insert into join_table_large_2 values(80, 80); +insert into join_table_large_2 values(81, 81); +insert into join_table_large_2 values(82, 82); +insert into join_table_large_2 values(83, 83); +insert into join_table_large_2 values(84, 84); +insert into join_table_large_2 values(85, 85); +insert into join_table_large_2 values(86, 86); +insert into join_table_large_2 values(87, 87); +insert into join_table_large_2 values(88, 88); +insert into join_table_large_2 values(89, 89); +insert into join_table_large_2 values(90, 90); +insert into join_table_large_2 values(91, 91); +insert into join_table_large_2 values(92, 92); +insert into join_table_large_2 values(93, 93); +insert into join_table_large_2 values(94, 94); +insert into join_table_large_2 values(95, 95); +insert into join_table_large_2 values(96, 96); +insert into join_table_large_2 values(97, 97); +insert into join_table_large_2 values(98, 98); +insert into join_table_large_2 values(99, 99); +insert into join_table_large_2 values(100, 100); + +insert into join_table_large_3 values(1, 1); +insert into join_table_large_3 values(2, 2); +insert into join_table_large_3 values(3, 3); +insert into join_table_large_3 values(4, 4); +insert into join_table_large_3 values(5, 5); +insert into join_table_large_3 values(6, 6); +insert into join_table_large_3 values(7, 7); +insert into join_table_large_3 values(8, 8); +insert into join_table_large_3 values(9, 9); +insert into join_table_large_3 values(10, 10); +insert into join_table_large_3 values(11, 11); +insert into join_table_large_3 values(12, 12); +insert into join_table_large_3 values(13, 13); +insert into join_table_large_3 values(14, 14); +insert into join_table_large_3 values(15, 15); +insert into join_table_large_3 values(16, 16); +insert into join_table_large_3 values(17, 17); +insert into join_table_large_3 values(18, 18); +insert into join_table_large_3 values(19, 19); +insert into join_table_large_3 values(20, 20); +insert into join_table_large_3 values(21, 21); +insert into join_table_large_3 values(22, 22); +insert into join_table_large_3 values(23, 23); +insert into join_table_large_3 values(24, 24); +insert into join_table_large_3 values(25, 25); +insert into join_table_large_3 values(26, 26); +insert into join_table_large_3 values(27, 27); +insert into join_table_large_3 values(28, 28); +insert into join_table_large_3 values(29, 29); +insert into join_table_large_3 values(30, 30); +insert into join_table_large_3 values(31, 31); +insert into join_table_large_3 values(32, 32); +insert into join_table_large_3 values(33, 33); +insert into join_table_large_3 values(34, 34); +insert into join_table_large_3 values(35, 35); +insert into join_table_large_3 values(36, 36); +insert into join_table_large_3 values(37, 37); +insert into join_table_large_3 values(38, 38); +insert into join_table_large_3 values(39, 39); +insert into join_table_large_3 values(40, 40); +insert into join_table_large_3 values(41, 41); +insert into join_table_large_3 values(42, 42); +insert into join_table_large_3 values(43, 43); +insert into join_table_large_3 values(44, 44); +insert into join_table_large_3 values(45, 45); +insert into join_table_large_3 values(46, 46); +insert into join_table_large_3 values(47, 47); +insert into join_table_large_3 values(48, 48); +insert into join_table_large_3 values(49, 49); +insert into join_table_large_3 values(50, 50); +insert into join_table_large_3 values(51, 51); +insert into join_table_large_3 values(52, 52); +insert into join_table_large_3 values(53, 53); +insert into join_table_large_3 values(54, 54); +insert into join_table_large_3 values(55, 55); +insert into join_table_large_3 values(56, 56); +insert into join_table_large_3 values(57, 57); +insert into join_table_large_3 values(58, 58); +insert into join_table_large_3 values(59, 59); +insert into join_table_large_3 values(60, 60); +insert into join_table_large_3 values(61, 61); +insert into join_table_large_3 values(62, 62); +insert into join_table_large_3 values(63, 63); +insert into join_table_large_3 values(64, 64); +insert into join_table_large_3 values(65, 65); +insert into join_table_large_3 values(66, 66); +insert into join_table_large_3 values(67, 67); +insert into join_table_large_3 values(68, 68); +insert into join_table_large_3 values(69, 69); +insert into join_table_large_3 values(70, 70); +insert into join_table_large_3 values(71, 71); +insert into join_table_large_3 values(72, 72); +insert into join_table_large_3 values(73, 73); +insert into join_table_large_3 values(74, 74); +insert into join_table_large_3 values(75, 75); +insert into join_table_large_3 values(76, 76); +insert into join_table_large_3 values(77, 77); +insert into join_table_large_3 values(78, 78); +insert into join_table_large_3 values(79, 79); +insert into join_table_large_3 values(80, 80); +insert into join_table_large_3 values(81, 81); +insert into join_table_large_3 values(82, 82); +insert into join_table_large_3 values(83, 83); +insert into join_table_large_3 values(84, 84); +insert into join_table_large_3 values(85, 85); +insert into join_table_large_3 values(86, 86); +insert into join_table_large_3 values(87, 87); +insert into join_table_large_3 values(88, 88); +insert into join_table_large_3 values(89, 89); +insert into join_table_large_3 values(90, 90); +insert into join_table_large_3 values(91, 91); +insert into join_table_large_3 values(92, 92); +insert into join_table_large_3 values(93, 93); +insert into join_table_large_3 values(94, 94); +insert into join_table_large_3 values(95, 95); +insert into join_table_large_3 values(96, 96); +insert into join_table_large_3 values(97, 97); +insert into join_table_large_3 values(98, 98); +insert into join_table_large_3 values(99, 99); +insert into join_table_large_3 values(100, 100); + +insert into join_table_large_4 values(1, 1); +insert into join_table_large_4 values(2, 2); +insert into join_table_large_4 values(3, 3); +insert into join_table_large_4 values(4, 4); +insert into join_table_large_4 values(5, 5); +insert into join_table_large_4 values(6, 6); +insert into join_table_large_4 values(7, 7); +insert into join_table_large_4 values(8, 8); +insert into join_table_large_4 values(9, 9); +insert into join_table_large_4 values(10, 10); +insert into join_table_large_4 values(11, 11); +insert into join_table_large_4 values(12, 12); +insert into join_table_large_4 values(13, 13); +insert into join_table_large_4 values(14, 14); +insert into join_table_large_4 values(15, 15); +insert into join_table_large_4 values(16, 16); +insert into join_table_large_4 values(17, 17); +insert into join_table_large_4 values(18, 18); +insert into join_table_large_4 values(19, 19); +insert into join_table_large_4 values(20, 20); +insert into join_table_large_4 values(21, 21); +insert into join_table_large_4 values(22, 22); +insert into join_table_large_4 values(23, 23); +insert into join_table_large_4 values(24, 24); +insert into join_table_large_4 values(25, 25); +insert into join_table_large_4 values(26, 26); +insert into join_table_large_4 values(27, 27); +insert into join_table_large_4 values(28, 28); +insert into join_table_large_4 values(29, 29); +insert into join_table_large_4 values(30, 30); +insert into join_table_large_4 values(31, 31); +insert into join_table_large_4 values(32, 32); +insert into join_table_large_4 values(33, 33); +insert into join_table_large_4 values(34, 34); +insert into join_table_large_4 values(35, 35); +insert into join_table_large_4 values(36, 36); +insert into join_table_large_4 values(37, 37); +insert into join_table_large_4 values(38, 38); +insert into join_table_large_4 values(39, 39); +insert into join_table_large_4 values(40, 40); +insert into join_table_large_4 values(41, 41); +insert into join_table_large_4 values(42, 42); +insert into join_table_large_4 values(43, 43); +insert into join_table_large_4 values(44, 44); +insert into join_table_large_4 values(45, 45); +insert into join_table_large_4 values(46, 46); +insert into join_table_large_4 values(47, 47); +insert into join_table_large_4 values(48, 48); +insert into join_table_large_4 values(49, 49); +insert into join_table_large_4 values(50, 50); +insert into join_table_large_4 values(51, 51); +insert into join_table_large_4 values(52, 52); +insert into join_table_large_4 values(53, 53); +insert into join_table_large_4 values(54, 54); +insert into join_table_large_4 values(55, 55); +insert into join_table_large_4 values(56, 56); +insert into join_table_large_4 values(57, 57); +insert into join_table_large_4 values(58, 58); +insert into join_table_large_4 values(59, 59); +insert into join_table_large_4 values(60, 60); +insert into join_table_large_4 values(61, 61); +insert into join_table_large_4 values(62, 62); +insert into join_table_large_4 values(63, 63); +insert into join_table_large_4 values(64, 64); +insert into join_table_large_4 values(65, 65); +insert into join_table_large_4 values(66, 66); +insert into join_table_large_4 values(67, 67); +insert into join_table_large_4 values(68, 68); +insert into join_table_large_4 values(69, 69); +insert into join_table_large_4 values(70, 70); +insert into join_table_large_4 values(71, 71); +insert into join_table_large_4 values(72, 72); +insert into join_table_large_4 values(73, 73); +insert into join_table_large_4 values(74, 74); +insert into join_table_large_4 values(75, 75); +insert into join_table_large_4 values(76, 76); +insert into join_table_large_4 values(77, 77); +insert into join_table_large_4 values(78, 78); +insert into join_table_large_4 values(79, 79); +insert into join_table_large_4 values(80, 80); +insert into join_table_large_4 values(81, 81); +insert into join_table_large_4 values(82, 82); +insert into join_table_large_4 values(83, 83); +insert into join_table_large_4 values(84, 84); +insert into join_table_large_4 values(85, 85); +insert into join_table_large_4 values(86, 86); +insert into join_table_large_4 values(87, 87); +insert into join_table_large_4 values(88, 88); +insert into join_table_large_4 values(89, 89); +insert into join_table_large_4 values(90, 90); +insert into join_table_large_4 values(91, 91); +insert into join_table_large_4 values(92, 92); +insert into join_table_large_4 values(93, 93); +insert into join_table_large_4 values(94, 94); +insert into join_table_large_4 values(95, 95); +insert into join_table_large_4 values(96, 96); +insert into join_table_large_4 values(97, 97); +insert into join_table_large_4 values(98, 98); +insert into join_table_large_4 values(99, 99); +insert into join_table_large_4 values(100, 100); + +insert into join_table_large_5 values(1, 1); +insert into join_table_large_5 values(2, 2); +insert into join_table_large_5 values(3, 3); +insert into join_table_large_5 values(4, 4); +insert into join_table_large_5 values(5, 5); +insert into join_table_large_5 values(6, 6); +insert into join_table_large_5 values(7, 7); +insert into join_table_large_5 values(8, 8); +insert into join_table_large_5 values(9, 9); +insert into join_table_large_5 values(10, 10); +insert into join_table_large_5 values(11, 11); +insert into join_table_large_5 values(12, 12); +insert into join_table_large_5 values(13, 13); +insert into join_table_large_5 values(14, 14); +insert into join_table_large_5 values(15, 15); +insert into join_table_large_5 values(16, 16); +insert into join_table_large_5 values(17, 17); +insert into join_table_large_5 values(18, 18); +insert into join_table_large_5 values(19, 19); +insert into join_table_large_5 values(20, 20); +insert into join_table_large_5 values(21, 21); +insert into join_table_large_5 values(22, 22); +insert into join_table_large_5 values(23, 23); +insert into join_table_large_5 values(24, 24); +insert into join_table_large_5 values(25, 25); +insert into join_table_large_5 values(26, 26); +insert into join_table_large_5 values(27, 27); +insert into join_table_large_5 values(28, 28); +insert into join_table_large_5 values(29, 29); +insert into join_table_large_5 values(30, 30); +insert into join_table_large_5 values(31, 31); +insert into join_table_large_5 values(32, 32); +insert into join_table_large_5 values(33, 33); +insert into join_table_large_5 values(34, 34); +insert into join_table_large_5 values(35, 35); +insert into join_table_large_5 values(36, 36); +insert into join_table_large_5 values(37, 37); +insert into join_table_large_5 values(38, 38); +insert into join_table_large_5 values(39, 39); +insert into join_table_large_5 values(40, 40); +insert into join_table_large_5 values(41, 41); +insert into join_table_large_5 values(42, 42); +insert into join_table_large_5 values(43, 43); +insert into join_table_large_5 values(44, 44); +insert into join_table_large_5 values(45, 45); +insert into join_table_large_5 values(46, 46); +insert into join_table_large_5 values(47, 47); +insert into join_table_large_5 values(48, 48); +insert into join_table_large_5 values(49, 49); +insert into join_table_large_5 values(50, 50); +insert into join_table_large_5 values(51, 51); +insert into join_table_large_5 values(52, 52); +insert into join_table_large_5 values(53, 53); +insert into join_table_large_5 values(54, 54); +insert into join_table_large_5 values(55, 55); +insert into join_table_large_5 values(56, 56); +insert into join_table_large_5 values(57, 57); +insert into join_table_large_5 values(58, 58); +insert into join_table_large_5 values(59, 59); +insert into join_table_large_5 values(60, 60); +insert into join_table_large_5 values(61, 61); +insert into join_table_large_5 values(62, 62); +insert into join_table_large_5 values(63, 63); +insert into join_table_large_5 values(64, 64); +insert into join_table_large_5 values(65, 65); +insert into join_table_large_5 values(66, 66); +insert into join_table_large_5 values(67, 67); +insert into join_table_large_5 values(68, 68); +insert into join_table_large_5 values(69, 69); +insert into join_table_large_5 values(70, 70); +insert into join_table_large_5 values(71, 71); +insert into join_table_large_5 values(72, 72); +insert into join_table_large_5 values(73, 73); +insert into join_table_large_5 values(74, 74); +insert into join_table_large_5 values(75, 75); +insert into join_table_large_5 values(76, 76); +insert into join_table_large_5 values(77, 77); +insert into join_table_large_5 values(78, 78); +insert into join_table_large_5 values(79, 79); +insert into join_table_large_5 values(80, 80); +insert into join_table_large_5 values(81, 81); +insert into join_table_large_5 values(82, 82); +insert into join_table_large_5 values(83, 83); +insert into join_table_large_5 values(84, 84); +insert into join_table_large_5 values(85, 85); +insert into join_table_large_5 values(86, 86); +insert into join_table_large_5 values(87, 87); +insert into join_table_large_5 values(88, 88); +insert into join_table_large_5 values(89, 89); +insert into join_table_large_5 values(90, 90); +insert into join_table_large_5 values(91, 91); +insert into join_table_large_5 values(92, 92); +insert into join_table_large_5 values(93, 93); +insert into join_table_large_5 values(94, 94); +insert into join_table_large_5 values(95, 95); +insert into join_table_large_5 values(96, 96); +insert into join_table_large_5 values(97, 97); +insert into join_table_large_5 values(98, 98); +insert into join_table_large_5 values(99, 99); +insert into join_table_large_5 values(100, 100); + +insert into join_table_large_6 values(1, 1); +insert into join_table_large_6 values(2, 2); +insert into join_table_large_6 values(3, 3); +insert into join_table_large_6 values(4, 4); +insert into join_table_large_6 values(5, 5); +insert into join_table_large_6 values(6, 6); +insert into join_table_large_6 values(7, 7); +insert into join_table_large_6 values(8, 8); +insert into join_table_large_6 values(9, 9); +insert into join_table_large_6 values(10, 10); +insert into join_table_large_6 values(11, 11); +insert into join_table_large_6 values(12, 12); +insert into join_table_large_6 values(13, 13); +insert into join_table_large_6 values(14, 14); +insert into join_table_large_6 values(15, 15); +insert into join_table_large_6 values(16, 16); +insert into join_table_large_6 values(17, 17); +insert into join_table_large_6 values(18, 18); +insert into join_table_large_6 values(19, 19); +insert into join_table_large_6 values(20, 20); +insert into join_table_large_6 values(21, 21); +insert into join_table_large_6 values(22, 22); +insert into join_table_large_6 values(23, 23); +insert into join_table_large_6 values(24, 24); +insert into join_table_large_6 values(25, 25); +insert into join_table_large_6 values(26, 26); +insert into join_table_large_6 values(27, 27); +insert into join_table_large_6 values(28, 28); +insert into join_table_large_6 values(29, 29); +insert into join_table_large_6 values(30, 30); +insert into join_table_large_6 values(31, 31); +insert into join_table_large_6 values(32, 32); +insert into join_table_large_6 values(33, 33); +insert into join_table_large_6 values(34, 34); +insert into join_table_large_6 values(35, 35); +insert into join_table_large_6 values(36, 36); +insert into join_table_large_6 values(37, 37); +insert into join_table_large_6 values(38, 38); +insert into join_table_large_6 values(39, 39); +insert into join_table_large_6 values(40, 40); +insert into join_table_large_6 values(41, 41); +insert into join_table_large_6 values(42, 42); +insert into join_table_large_6 values(43, 43); +insert into join_table_large_6 values(44, 44); +insert into join_table_large_6 values(45, 45); +insert into join_table_large_6 values(46, 46); +insert into join_table_large_6 values(47, 47); +insert into join_table_large_6 values(48, 48); +insert into join_table_large_6 values(49, 49); +insert into join_table_large_6 values(50, 50); +insert into join_table_large_6 values(51, 51); +insert into join_table_large_6 values(52, 52); +insert into join_table_large_6 values(53, 53); +insert into join_table_large_6 values(54, 54); +insert into join_table_large_6 values(55, 55); +insert into join_table_large_6 values(56, 56); +insert into join_table_large_6 values(57, 57); +insert into join_table_large_6 values(58, 58); +insert into join_table_large_6 values(59, 59); +insert into join_table_large_6 values(60, 60); +insert into join_table_large_6 values(61, 61); +insert into join_table_large_6 values(62, 62); +insert into join_table_large_6 values(63, 63); +insert into join_table_large_6 values(64, 64); +insert into join_table_large_6 values(65, 65); +insert into join_table_large_6 values(66, 66); +insert into join_table_large_6 values(67, 67); +insert into join_table_large_6 values(68, 68); +insert into join_table_large_6 values(69, 69); +insert into join_table_large_6 values(70, 70); +insert into join_table_large_6 values(71, 71); +insert into join_table_large_6 values(72, 72); +insert into join_table_large_6 values(73, 73); +insert into join_table_large_6 values(74, 74); +insert into join_table_large_6 values(75, 75); +insert into join_table_large_6 values(76, 76); +insert into join_table_large_6 values(77, 77); +insert into join_table_large_6 values(78, 78); +insert into join_table_large_6 values(79, 79); +insert into join_table_large_6 values(80, 80); +insert into join_table_large_6 values(81, 81); +insert into join_table_large_6 values(82, 82); +insert into join_table_large_6 values(83, 83); +insert into join_table_large_6 values(84, 84); +insert into join_table_large_6 values(85, 85); +insert into join_table_large_6 values(86, 86); +insert into join_table_large_6 values(87, 87); +insert into join_table_large_6 values(88, 88); +insert into join_table_large_6 values(89, 89); +insert into join_table_large_6 values(90, 90); +insert into join_table_large_6 values(91, 91); +insert into join_table_large_6 values(92, 92); +insert into join_table_large_6 values(93, 93); +insert into join_table_large_6 values(94, 94); +insert into join_table_large_6 values(95, 95); +insert into join_table_large_6 values(96, 96); +insert into join_table_large_6 values(97, 97); +insert into join_table_large_6 values(98, 98); +insert into join_table_large_6 values(99, 99); +insert into join_table_large_6 values(100, 100); + +-- sort select * from join_table_large_1 inner join join_table_large_2 on join_table_large_1.id=join_table_large_2.id inner join join_table_large_3 on join_table_large_1.id=join_table_large_3.id inner join join_table_large_4 on join_table_large_3.id=join_table_large_4.id inner join join_table_large_5 on 1=1 inner join join_table_large_6 on join_table_large_5.id=join_table_large_6.id where join_table_large_3.num3 <10 and join_table_large_5.num5>90; diff --git a/test/case/test/primary-multi-index.test b/test/case/test/primary-multi-index.test new file mode 100644 index 0000000000000000000000000000000000000000..19949725d45a24dae14a0469b4aa5e665d1f3d92 --- /dev/null +++ b/test/case/test/primary-multi-index.test @@ -0,0 +1,77 @@ +-- echo 1. multi index of empty table +CREATE TABLE multi_index(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +CREATE INDEX i_1_12 ON multi_index(col1,col2); +CREATE INDEX i_1_345 ON multi_index(col3, col4, col5); +CREATE INDEX i_1_56 ON multi_index(col5, col6); +CREATE INDEX i_1_456 ON multi_index(col4, col5, col6); +-- sort SELECT * FROM multi_index; + +-- echo 2. multi index of non-empty table +CREATE TABLE multi_index2(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +INSERT INTO multi_index2 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +INSERT INTO multi_index2 VALUES (2, 1, 16.2, 'x', '2021-01-02', 1, 61); +INSERT INTO multi_index2 VALUES (3, 1, 11.6, 'h', '2023-01-02', 10, 17); + +CREATE INDEX i_2_12 ON multi_index2(col1,col2); +CREATE INDEX i_2_345 ON multi_index2(col3, col4, col5); +CREATE INDEX i_2_56 ON multi_index2(col5, col6); +CREATE INDEX i_2_456 ON multi_index2(col4, col5, col6); +-- sort SELECT * FROM multi_index2; + +-- echo 3. influence of inserting +CREATE TABLE multi_index3(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); +CREATE INDEX i_3_i1 ON multi_index3(id,col1); + +INSERT INTO multi_index3 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +INSERT INTO multi_index3 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +-- sort SELECT * FROM multi_index3; +CREATE INDEX i_3_14 ON multi_index3(col1,col4); +INSERT INTO multi_index3 VALUES (2, 1, 16.2, 'x', '2021-01-02', 1, 61); +INSERT INTO multi_index3 VALUES (3, 1, 11.6, 'h', '2023-01-02', 10, 17); +INSERT INTO multi_index3 VALUES (4, 2, 12.2, 'e', '2022-01-04', 13, 10); +INSERT INTO multi_index3 VALUES (5, 3, 14.2, 'd', '2020-04-02', 12, 2); +-- sort SELECT * FROM multi_index3; + +-- echo 4. query with indexs +-- sort SELECT * FROM multi_index3 WHERE id = 1; +-- sort SELECT * FROM multi_index3 WHERE col1 > 1 and col4 = '2021-01-02'; +-- sort SELECT * FROM multi_index3 WHERE col1 <> 1 and col4 >= '2021-01-02'; +-- sort SELECT * FROM multi_index3 WHERE col2 < 15.0 and col4 <> '2021-01-02'; + +-- echo 5. influence of deleting +DELETE FROM multi_index3 WHERE id = 1; +DELETE FROM multi_index3 WHERE id = 61; +-- sort SELECT * FROM multi_index3; + +DELETE FROM multi_index3 WHERE col3 = 'x'; +-- sort SELECT * FROM multi_index3; + +DELETE FROM multi_index3 WHERE id = 4 and col1 = 1; +DELETE FROM multi_index3 WHERE id = 90 and col1 = 13; +DELETE FROM multi_index3 WHERE id = 90 and col1 = 1; +DELETE FROM multi_index3 WHERE id = 4 and col1 = 13; +DELETE FROM multi_index3 WHERE id = 3 and col1 = 1; +DELETE FROM multi_index3 WHERE id = 3 and col1 = 1; +-- sort SELECT * FROM multi_index3; + +INSERT INTO multi_index3 VALUES (1, 1, 11.2, 'a', '2021-01-02', 1, 1); +INSERT INTO multi_index3 VALUES (2, 1, 11.2, 'x', '2021-01-02', 1, 61); +INSERT INTO multi_index3 VALUES (3, 1, 11.2, 'h', '2023-01-02', 10, 17); +-- sort SELECT * FROM multi_index3; + +-- echo 6. influence of updating +UPDATE multi_index3 SET col6=49 where id=2; +UPDATE multi_index3 SET col4='1999-02-01' where id=2; +UPDATE multi_index3 SET col1=2 where id=2; +UPDATE multi_index3 SET col1=5 where col6=49; +-- sort SELECT * FROM multi_index3; + +-- echo 7. influence of dropping table +DROP table multi_index; + +-- echo 8. error +CREATE TABLE multi_index4(id int, col1 int, col2 float, col3 char, col4 date, col5 int, col6 int); + +CREATE INDEX i_4_i7 ON multi_index4(id,col7); +CREATE INDEX i_4_78 ON multi_index4(col7,col8); +CREATE INDEX i_4_i78 ON multi_index4(id,col7,col8); diff --git a/test/case/test/primary-null.test b/test/case/test/primary-null.test new file mode 100644 index 0000000000000000000000000000000000000000..e86fead71fd7cc66a4a9ff9daed7ffccd673400a --- /dev/null +++ b/test/case/test/primary-null.test @@ -0,0 +1,83 @@ +-- echo initialization +CREATE TABLE null_table(id int, num int nullable, price float not null, birthday date nullable); +CREATE TABLE null_table2(id int, num int nullable, price float not null, birthday date nullable); +CREATE INDEX index_num on null_table(num); + +-- echo 1. insert +INSERT INTO null_table VALUES (1, 18, 10.0, '2020-01-01'); +INSERT INTO null_table VALUES (2, null, 20.0, '2010-01-11'); +INSERT INTO null_table VALUES (3, 12, 30.0, null); +INSERT INTO null_table VALUES (4, 15, 30.0, '2021-01-31'); +INSERT INTO null_table2 VALUES (1, 18, 30.0, '2021-01-31'); +INSERT INTO null_table2 VALUES (2, null, 40.0, null); + +INSERT INTO null_table VALUES (5, 15, null, '2021-01-31'); +INSERT INTO null_table VALUES (null, 15, 30.0, '2021-01-31'); + +-- echo 2. SELECT +-- sort SELECT * FROM null_table; + +-- echo 3. SELECT WITH CONSTANT +-- sort SELECT * FROM null_table where 1 is null; +-- sort SELECT * FROM null_table where 1 is not null; +-- sort SELECT * FROM null_table where null=1; +-- sort SELECT * FROM null_table where 1=null; +-- sort SELECT * FROM null_table where 1<>null; +-- sort SELECT * FROM null_table where 1null; + +-- sort SELECT * FROM null_table where null is null; +-- sort SELECT * FROM null_table where null is not null; +-- sort SELECT * FROM null_table WHERE null=null; +-- sort SELECT * FROM null_table WHERE null<>null; +-- sort SELECT * FROM null_table WHERE null>null; +-- sort SELECT * FROM null_table WHERE nullnull; +-- sort SELECT * FROM null_table WHERE 'a'>null; +-- sort SELECT * FROM null_table WHERE 'a'null; +-- sort SELECT * FROM null_table WHERE '2021-01-31' null; +-- sort SELECT * FROM null_table where birthday > null; +-- sort SELECT * FROM null_table where birthday < null; + +-- sort SELECT * FROM null_table where num is not null; +-- sort SELECT * FROM null_table where num is null; +-- sort SELECT * FROM null_table where num = null; +-- sort SELECT * FROM null_table where null = num; +-- sort SELECT * FROM null_table where num <> null; +-- sort SELECT * FROM null_table where num > null; +-- sort SELECT * FROM null_table where num < null; + +-- sort SELECT null_table.num,null_table2.num,null_table.birthday FROM null_table,null_table2 where null_table.num=null_table2.num; + +-- echo 5. aggregation +SELECT count(*) FROM null_table; +SELECT count(price) FROM null_table; +SELECT count(birthday) FROM null_table; +SELECT avg(num) FROM null_table; + +-- echo 6. aggregation with null columns +CREATE TABLE null_table3(id int, num int nullable); +INSERT INTO null_table3 VALUES (1, null); +INSERT INTO null_table3 VALUES (2, null); +SELECT count(num) FROM null_table3; +SELECT min(num) FROM null_table3; +SELECT max(num) FROM null_table3; +SELECT avg(num) FROM null_table3; diff --git a/test/case/test/primary-order-by.test b/test/case/test/primary-order-by.test new file mode 100644 index 0000000000000000000000000000000000000000..974468e8f226e49f9b1f1f0a7bb9d21168aefcdd --- /dev/null +++ b/test/case/test/primary-order-by.test @@ -0,0 +1,43 @@ +-- echo 1. create table +create table t_order_by(id int, score float, name char); +create table t_order_by_2(id int, age int); + +-- echo 2. insert records +insert into t_order_by values(3, 1.0, 'a'); +insert into t_order_by values(1, 2.0, 'b'); +insert into t_order_by values(4, 3.0, 'c'); +insert into t_order_by values(3, 2.0, 'c'); +insert into t_order_by values(3, 4.0, 'c'); +insert into t_order_by values(3, 3.0, 'd'); +insert into t_order_by values(3, 2.0, 'f'); + +insert into t_order_by_2 values(1, 10); +insert into t_order_by_2 values(2, 20); +insert into t_order_by_2 values(3, 10); +insert into t_order_by_2 values(3, 20); +insert into t_order_by_2 values(3, 40); +insert into t_order_by_2 values(4, 20); + +-- echo 3. primary order by +-- sort select * from t_order_by order by id; + +-- sort select * from t_order_by order by id asc; + +-- sort select * from t_order_by order by id desc; + +-- sort select * from t_order_by order by score desc; + +-- sort select * from t_order_by order by name desc; + +-- echo 4. order by more than one fields +select * from t_order_by order by id, score, name; + +select * from t_order_by order by id desc, score asc, name desc; + +-- echo 5. order by associate with where condition +select * from t_order_by where id=3 and name>='a' order by score desc, name; + +-- echo 6. multi-table order by +select * from t_order_by,t_order_by_2 order by t_order_by.id,t_order_by.score,t_order_by.name,t_order_by_2.id,t_order_by_2.age; + +select * from t_order_by, t_order_by_2 where t_order_by.id=t_order_by_2.id order by t_order_by.score desc, t_order_by_2.age asc, t_order_by.id asc, t_order_by.name; diff --git a/test/case/test/primary-select-meta.test b/test/case/test/primary-select-meta.test new file mode 100644 index 0000000000000000000000000000000000000000..6243ebc4ac2fe37d02bfad6df21b6ece341029f7 --- /dev/null +++ b/test/case/test/primary-select-meta.test @@ -0,0 +1,9 @@ +-- echo initialization +CREATE TABLE Select_meta(id int, age int); + +-- echo 1. select from a non-existent table +select * from no_table; + +-- echo 2. select from a non-existent column +select home from Select_meta; +select * from Select_meta where home='001'; diff --git a/test/case/test/primary-select-tables.test b/test/case/test/primary-select-tables.test new file mode 100644 index 0000000000000000000000000000000000000000..98408c1be4ad46f284cc8b76d7d401f45c28f6e5 --- /dev/null +++ b/test/case/test/primary-select-tables.test @@ -0,0 +1,41 @@ +-- echo initialization +CREATE TABLE Select_tables_1(id int, age int, u_name char); +CREATE TABLE Select_tables_2(id int, age int, u_name char); +CREATE TABLE Select_tables_3(id int, res int, u_name char); +CREATE TABLE Select_tables_4(id int, age int, u_name char); +CREATE TABLE Select_tables_5(id int, res int, u_name char); + +INSERT INTO Select_tables_1 VALUES (1,18,'a'); +INSERT INTO Select_tables_1 VALUES (2,15,'b'); +INSERT INTO Select_tables_2 VALUES (1,20,'a'); +INSERT INTO Select_tables_2 VALUES (2,21,'c'); +INSERT INTO Select_tables_3 VALUES (1,35,'a'); +INSERT INTO Select_tables_3 VALUES (2,37,'a'); + +-- echo insert data into select_tables_4 and select_tables_5 +INSERT INTO Select_tables_4 VALUES (1, 2, 'a'); +INSERT INTO Select_tables_4 VALUES (1, 3, 'b'); +INSERT INTO Select_tables_4 VALUES (2, 2, 'c'); +INSERT INTO Select_tables_4 VALUES (2, 4, 'd'); +INSERT INTO Select_tables_5 VALUES (1, 10, 'g'); +INSERT INTO Select_tables_5 VALUES (1, 11, 'f'); +INSERT INTO Select_tables_5 VALUES (2, 12, 'c'); + +-- echo 1. multi-table query +-- sort SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3; +-- sort SELECT Select_tables_1.id,Select_tables_2.u_name,Select_tables_3.res FROM Select_tables_1,Select_tables_2,Select_tables_3; +Select Select_tables_1.res FROM Select_tables_1,Select_tables_2,Select_tables_3; + +-- echo 2. conditional query +-- sort SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3 WHERE Select_tables_1.u_name=Select_tables_2.u_name AND Select_tables_2.u_name=Select_tables_3.u_name; +-- sort SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3 WHERE Select_tables_1.id=Select_tables_2.id AND Select_tables_3.res=35; +-- sort SELECT * FROM Select_tables_1,Select_tables_2,Select_tables_3 WHERE Select_tables_1.age<18 AND Select_tables_2.u_name='c' AND Select_tables_3.res=35 AND Select_tables_1.id=Select_tables_2.id AND Select_tables_2.id=Select_tables_3.id; +-- sort SELECT Select_tables_2.age FROM Select_tables_1,Select_tables_2 WHERE Select_tables_1.age<18 AND Select_tables_2.u_name='c' AND Select_tables_1.id=Select_tables_2.id; + +-- echo 3. duplicate key query +-- sort SELECT * from Select_tables_4, Select_tables_5 where Select_tables_4.id=Select_tables_5.id; +-- sort select * from Select_tables_4, Select_tables_5 where Select_tables_4.id >= Select_tables_5.id; + +-- echo 4. join empty table +CREATE TABLE Select_tables_6(id int, res int); +-- sort SELECT Select_tables_1.id,Select_tables_6.id from Select_tables_1, Select_tables_6 where Select_tables_1.id=Select_tables_6.id; diff --git a/test/case/test/primary-simple-sub-query.test b/test/case/test/primary-simple-sub-query.test new file mode 100644 index 0000000000000000000000000000000000000000..7ae8478b7e24ec033834eb8eabd8f5923d2f5d4d --- /dev/null +++ b/test/case/test/primary-simple-sub-query.test @@ -0,0 +1,46 @@ +-- echo initialization +CREATE TABLE ssq_1(id int, col1 int, feat1 float); +CREATE TABLE ssq_2(id int, col2 int, feat2 float); +CREATE TABLE ssq_3(id int, col3 int, feat3 float); + +INSERT INTO ssq_1 VALUES (1, 4, 11.2); +INSERT INTO ssq_1 VALUES (2, 2, 12.0); +INSERT INTO ssq_1 VALUES (3, 3, 13.5); +INSERT INTO ssq_2 VALUES (1, 2, 13.0); +INSERT INTO ssq_2 VALUES (2, 7, 10.5); +INSERT INTO ssq_2 VALUES (5, 3, 12.6); + +-- echo 1. Select +-- sort select * from ssq_1 where id in (select ssq_2.id from ssq_2); +-- sort select * from ssq_1 where col1 not in (select ssq_2.col2 from ssq_2); + +-- sort select * from ssq_1 where col1 = (select avg(ssq_2.col2) from ssq_2); +-- sort select * from ssq_1 where (select avg(ssq_2.col2) from ssq_2) = col1; + +-- sort select * from ssq_1 where feat1 >= (select min(ssq_2.feat2) from ssq_2); +-- sort select * from ssq_1 where (select min(ssq_2.feat2) from ssq_2) <= feat1; + +-- sort select * from ssq_1 where feat1 <= (select max(ssq_2.feat2) from ssq_2); +-- sort select * from ssq_1 where (select max(ssq_2.feat2) from ssq_2) >= feat1; + +-- sort select * from ssq_1 where feat1 > (select min(ssq_2.feat2) from ssq_2); +-- sort select * from ssq_1 where (select min(ssq_2.feat2) from ssq_2) < feat1; + +-- sort select * from ssq_1 where feat1 < (select max(ssq_2.feat2) from ssq_2); +-- sort select * from ssq_1 where (select max(ssq_2.feat2) from ssq_2) > feat1; + +-- sort select * from ssq_1 where feat1 <> (select avg(ssq_2.feat2) from ssq_2); + +-- echo 2. Select with empty table +-- sort select * from ssq_1 where feat1 < (select max(ssq_2.feat2) from ssq_2 where 1=0); +-- sort select * from ssq_1 where id in (select ssq_2.id from ssq_2 where 1=0); +-- sort select * from ssq_1 where id not in (select ssq_2.id from ssq_2 where 1=0); +-- sort select * from ssq_3 where feat3 < (select max(ssq_2.feat2) from ssq_2); +-- sort select * from ssq_3 where id in (select ssq_2.id from ssq_2); +-- sort select * from ssq_3 where id not in (select ssq_2.id from ssq_2); + +--echo 3. error +select * from ssq_1 where col1 = (select ssq_2.col2 from ssq_2); +select * from ssq_1 where col1 = (select * from ssq_2); +select * from ssq_1 where col1 in (select * from ssq_2); +select * from ssq_1 where col1 not in (select * from ssq_2); \ No newline at end of file diff --git a/test/case/test/primary-text.test b/test/case/test/primary-text.test new file mode 100644 index 0000000000000000000000000000000000000000..b9359e28465768b9cd5a647449f1d9a8e47ce074 --- /dev/null +++ b/test/case/test/primary-text.test @@ -0,0 +1,23 @@ +-- echo initialization +create table text_table(id int, info text); + +-- echo 1. insert +insert into text_table values (1,'this is a very very long string'); +insert into text_table values (2,'this is a very very long string2'); +insert into text_table values (3,'this is a very very long string3'); +-- sort select * from text_table; + +-- echo 2. condition +delete from text_table where id=1; +-- sort select * from text_table; + +-- echo 3. update +UPDATE text_table set info='a tmp data' where id = 2; +-- sort select * from text_table; + +-- echo 4. boundary test with length 4096 +insert into text_table values (4,'this is a very very long string pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad1'); +-- sort select * from text_table; + +insert into text_table values (5,'this is a very very long string pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad pad1 pad pad pad pad'); +-- sort select * from text_table; diff --git a/test/case/test/primary-unique.test b/test/case/test/primary-unique.test new file mode 100644 index 0000000000000000000000000000000000000000..7ced3ef457e1e98a573bb343047caff01c1d6a87 --- /dev/null +++ b/test/case/test/primary-unique.test @@ -0,0 +1,13 @@ +-- echo initialization +CREATE TABLE unique_table(id int, col1 int, col2 int); +INSERT INTO unique_table VALUES (1,1,1); + +-- echo 1. unique test +CREATE UNIQUE INDEX index_id on unique_table(id); +INSERT INTO unique_table VALUES (2,1,1); +CREATE UNIQUE INDEX index_id on unique_table(id); +INSERT INTO unique_table VALUES (3,2,1); +INSERT INTO unique_table VALUES (1,2,1); + +-- echo 2. select +-- sort SELECT * FROM unique_table; \ No newline at end of file diff --git a/test/case/test/primary-update.test b/test/case/test/primary-update.test new file mode 100644 index 0000000000000000000000000000000000000000..23405322a91f640d54fadff3501d52b43096c473 --- /dev/null +++ b/test/case/test/primary-update.test @@ -0,0 +1,42 @@ +-- echo initialization +CREATE TABLE Update_table_1(id int, t_name char, col1 int, col2 int); +CREATE INDEX index_id on Update_table_1(id); +INSERT INTO Update_table_1 VALUES (1,'N1',1,1); +INSERT INTO Update_table_1 VALUES (2,'N2',1,1); +INSERT INTO Update_table_1 VALUES (3,'N3',2,1); + +-- echo 1. update a row +UPDATE Update_table_1 SET t_name='N01' WHERE id=1; +-- sort SELECT * FROM Update_table_1; + +-- echo 2. update rows +UPDATE Update_table_1 SET col2=0 WHERE col1=1; +-- sort SELECT * FROM Update_table_1; + +-- echo 3. update a index column +UPDATE Update_table_1 SET id=4 WHERE t_name='N3'; +-- sort SELECT * FROM Update_table_1; + +-- echo 4. update without conditions +UPDATE Update_table_1 SET col1=0; +-- sort SELECT * FROM Update_table_1; + +-- echo 5. update with conditions +UPDATE Update_table_1 SET t_name='N02' WHERE col1=0 AND col2=0; +-- sort SELECT * FROM Update_table_1; + +-- echo 6. update non-existent table +UPDATE Update_table_2 SET t_name='N01' WHERE id=1; + +-- echo 7. update non-existent column +UPDATE Update_table_1 SET t_name_false='N01' WHERE id=1; + +-- echo 8. update with invalid condition +UPDATE Update_table_1 SET t_name='N01' WHERE id_false=1; + +-- echo 9. update in vain +UPDATE Update_table_1 SET t_name='N01' WHERE id=100; +-- sort SELECT * FROM Update_table_1; + +-- echo 10. update with invalid value +UPDATE Update_table_1 SET col1='N01' WHERE id=1; \ No newline at end of file diff --git a/test/CMakeLists.txt b/test/perf/CMakeLists.txt similarity index 89% rename from test/CMakeLists.txt rename to test/perf/CMakeLists.txt index b69532c561df43b4207ee0c671b863a43fff47b5..f7c3db53e5b9c35ba5914206d3acfa3aba8b4877 100644 --- a/test/CMakeLists.txt +++ b/test/perf/CMakeLists.txt @@ -9,10 +9,10 @@ MESSAGE("${CMAKE_COMMON_FLAGS}") #INCLUDE_DIRECTORIES([AFTER|BEFORE] [SYSTEM] dir1 dir2 ...) -INCLUDE_DIRECTORIES(. ${PROJECT_SOURCE_DIR}/../deps /usr/local/include SYSTEM) +INCLUDE_DIRECTORIES(. ${PROJECT_SOURCE_DIR}/../../deps /usr/local/include SYSTEM) # 父cmake 设置的include_directories 和link_directories并不传导到子cmake里面 #INCLUDE_DIRECTORIES(BEFORE ${CMAKE_INSTALL_PREFIX}/include) -LINK_DIRECTORIES(/usr/local/lib ${PROJECT_BINARY_DIR}/../lib) +LINK_DIRECTORIES(/usr/local/lib ${PROJECT_BINARY_DIR}/../../lib) IF (DEFINED ENV{LD_LIBRARY_PATH}) diff --git a/test/client_performance_test.cpp b/test/perf/client_performance_test.cpp similarity index 100% rename from test/client_performance_test.cpp rename to test/perf/client_performance_test.cpp diff --git a/unitest/bitmap_test.cpp b/unitest/bitmap_test.cpp index 98ae52e7c571489a7fae01ff45ab1234ec0450f4..206c9ab7a96c89ad035c2514e205c3ed1ceebc02 100644 --- a/unitest/bitmap_test.cpp +++ b/unitest/bitmap_test.cpp @@ -20,7 +20,8 @@ See the Mulan PSL v2 for more details. */ using namespace common; -TEST(test_bitmap, test_bitmap) { +TEST(test_bitmap, test_bitmap) +{ char buf1[1]; memset(buf1, 0, sizeof(buf1)); Bitmap bitmap(buf1, 8); @@ -66,7 +67,8 @@ TEST(test_bitmap, test_bitmap) { ASSERT_EQ(16, bitmap3.next_setted_bit(8)); } -int main(int argc, char **argv) { +int main(int argc, char **argv) +{ // 分析gtest程序的命令行参数 testing::InitGoogleTest(&argc, argv); diff --git a/unitest/bplus_tree_test.cpp b/unitest/bplus_tree_test.cpp index e41f2c9694cd1cc81675de76e12328ad2cf17438..0af06ff58fa054dcf9bf0d23537fab519493a427 100644 --- a/unitest/bplus_tree_test.cpp +++ b/unitest/bplus_tree_test.cpp @@ -15,7 +15,7 @@ See the Mulan PSL v2 for more details. */ #include #include -#include "storage/common/bplus_tree.h" +#include "storage/index/bplus_tree.h" #include "storage/default/disk_buffer_pool.h" #include "rc.h" #include "common/log/log.h" @@ -40,6 +40,7 @@ int k = 0; void test_insert() { + RC rc = RC::SUCCESS; for (int i = 0; i < insert_num; i++) { rid.page_num = i / page_size; @@ -51,10 +52,11 @@ void test_insert() LOG_INFO("Begin to insert the page's num %s", rid.to_string().c_str()); } } else { - LOG_INFO("Insert %d", i); + LOG_INFO("Insert %d. rid=%s", i, rid.to_string().c_str()); } - RC rc = handler->insert_entry((const char *)&i, &rid); + rc = handler->insert_entry((const char *)&i, &rid); ASSERT_EQ(RC::SUCCESS, rc); + handler->print_tree(); ASSERT_EQ(true, handler->validate_tree()); } } @@ -71,10 +73,11 @@ void test_insert() LOG_INFO("Begin to insert the page's num %s", rid.to_string().c_str()); } } else { - LOG_INFO("Insert %d", i); + LOG_INFO("Insert %d. rid=%s", i, rid.to_string().c_str()); } - RC rc = handler->insert_entry((const char *)&i, &rid); + rc = handler->insert_entry((const char *)&i, &rid); ASSERT_EQ(RC::SUCCESS, rc); + handler->print_tree(); ASSERT_EQ(true, handler->validate_tree()); } } @@ -91,9 +94,9 @@ void test_insert() LOG_INFO("Begin to insert the page's num %s", rid.to_string().c_str()); } } else { - LOG_INFO("Insert %d", i); + LOG_INFO("Insert %d. rid=%s", i, rid.to_string().c_str()); } - RC rc = handler->insert_entry((const char *)&i, &rid); + rc = handler->insert_entry((const char *)&i, &rid); ASSERT_EQ(RC::SUCCESS, rc); ASSERT_EQ(true, handler->validate_tree()); } @@ -114,11 +117,14 @@ void test_insert() LOG_INFO("Begin to check duplicated insert the page's num %s", rid.to_string().c_str()); } } else { - LOG_INFO("Check duplicate insert %d", i); + LOG_INFO("check duplicate Insert %d. rid=%s. i%TIMES=%d", i, rid.to_string().c_str(), i%TIMES); } - RC rc = handler->insert_entry((const char *)&i, &rid); + rc = handler->insert_entry((const char *)&i, &rid); int t = i % TIMES; if (t == 0 || t == 1 || t == 2) { + if (rc != RC::RECORD_DUPLICATE_KEY) { + LOG_WARN("insert duplicate key success"); + } ASSERT_EQ(RC::RECORD_DUPLICATE_KEY, rc); } else { ASSERT_EQ(RC::SUCCESS, rc); @@ -154,6 +160,7 @@ void test_get() void test_delete() { + RC rc = RC::SUCCESS; std::list rids; for (int i = 0; i < insert_num / 2; i++) { @@ -164,16 +171,19 @@ void test_delete() if (t == 0 || t == 1) { if (insert_num > page_size) { if (k++ % 100 == 0) { - LOG_INFO("Begin to delete entry of index, rid: %s", rid.to_string().c_str()); + LOG_INFO("Begin to delete entry of index, i=%d rid: %s", i, rid.to_string().c_str()); } } else { - LOG_INFO("Begin to delete entry of index, rid: %s", rid.to_string().c_str()); + LOG_INFO("Begin to delete entry of index, i=%d, rid: %s", i, rid.to_string().c_str()); } - RC rc = handler->delete_entry((const char *)&i, &rid); + rc = handler->delete_entry((const char *)&i, &rid); + if (rc != RC::SUCCESS) { + LOG_WARN("failed to delete entry. i=%d, rid=%s", i, rid.to_string().c_str()); + } + ASSERT_EQ(RC::SUCCESS, rc); ASSERT_EQ(true, handler->validate_tree()); - ASSERT_EQ(RC::SUCCESS, rc); } } @@ -192,7 +202,7 @@ void test_delete() } else { LOG_INFO("Begin to delete entry of index, rid: %s", rid.to_string().c_str()); } - RC rc = handler->delete_entry((const char *)&i, &rid); + rc = handler->delete_entry((const char *)&i, &rid); ASSERT_EQ(true, handler->validate_tree()); ASSERT_EQ(RC::SUCCESS, rc); @@ -205,20 +215,26 @@ void test_delete() rid.slot_num = i % page_size; if (insert_num > page_size) { if (k++ % 100 == 0) { - LOG_INFO("Begin to get entry of index, rid: %s", rid.to_string().c_str()); + LOG_INFO("Begin to get entry of index, i=%d,rid: %s", i, rid.to_string().c_str()); } } else { - LOG_INFO("Begin to get entry of index, rid: %s", rid.to_string().c_str()); + LOG_INFO("Begin to get entry of index, i=%d, rid: %s", i, rid.to_string().c_str()); } rids.clear(); - RC rc = handler->get_entry((const char *)&i, rids); + rc = handler->get_entry((const char *)&i, rids); ASSERT_EQ(RC::SUCCESS, rc); int t = i % TIMES; if (t == 0 || t == 1) { ASSERT_EQ(0, rids.size()); } else { + if (rids.size() != 1) { + LOG_WARN("invalid. i=%d, rid=%s, check rid=%s", i, rid.to_string().c_str(), check_rid.to_string().c_str()); + } ASSERT_EQ(1, rids.size()); check_rid = rids.front(); + if (rid != check_rid) { + LOG_WARN("invalid. i=%d, rid=%s, check rid=%s", i, rid.to_string().c_str(), check_rid.to_string().c_str()); + } ASSERT_EQ(rid.page_num, check_rid.page_num); ASSERT_EQ(rid.slot_num, check_rid.slot_num); ASSERT_EQ(true, handler->validate_tree()); @@ -239,7 +255,7 @@ void test_delete() } else { LOG_INFO("Begin to delete entry of index, rid: %s", rid.to_string().c_str()); } - RC rc = handler->delete_entry((const char *)&i, &rid); + rc = handler->delete_entry((const char *)&i, &rid); ASSERT_EQ(true, handler->validate_tree()); ASSERT_EQ(RC::SUCCESS, rc); @@ -261,7 +277,7 @@ void test_delete() } else { LOG_INFO("Begin to delete entry of index, rid: %s", rid.to_string().c_str()); } - RC rc = handler->delete_entry((const char *)&i, &rid); + rc = handler->delete_entry((const char *)&i, &rid); ASSERT_EQ(true, handler->validate_tree()); ASSERT_EQ(RC::SUCCESS, rc); @@ -280,7 +296,7 @@ void test_delete() } else { LOG_INFO("Begin to insert entry of index, rid: %s", rid.to_string().c_str()); } - RC rc = handler->insert_entry((const char *)&i, &rid); + rc = handler->insert_entry((const char *)&i, &rid); int t = i % TIMES; if (t == 0 || t == 1 || t == 2) { ASSERT_EQ(RC::SUCCESS, rc); @@ -292,6 +308,385 @@ void test_delete() handler->print_tree(); } +TEST(test_bplus_tree, test_leaf_index_node_handle) +{ + LoggerFactory::init_default("test.log"); + + IndexFileHeader index_file_header; + index_file_header.root_page = BP_INVALID_PAGE_NUM; + index_file_header.internal_max_size = 5; + index_file_header.leaf_max_size = 5; + index_file_header.attr_length = 4; + index_file_header.key_length = 4 + sizeof(RID); + index_file_header.attr_type = INTS; + + Frame frame; + frame.dirty = false; + frame.pin_count = 0; + frame.acc_time = 0; + frame.file_desc = 0; + frame.page.page_num = 100; + + BPPageHandle page_handle; + page_handle.open = true; + page_handle.frame = &frame; + + KeyComparator key_comparator; + key_comparator.init(INTS, 4); + + LeafIndexNodeHandler leaf_node(index_file_header, page_handle); + leaf_node.init_empty(); + ASSERT_EQ(0, leaf_node.size()); + + bool found; + int index; + char key_mem[4 + sizeof(RID)]; + int &key = *(int *)key_mem; + RID &rid = *(RID *)(key_mem + 4); + rid.page_num = 0; + rid.slot_num = 0; + for (int i = 0; i < 5; i++) { + key = i * 2 + 1; + index = leaf_node.lookup(key_comparator, key_mem, &found); + ASSERT_EQ(false, found); + leaf_node.insert(index, (const char *)&key, (const char *)&rid); + } + + ASSERT_EQ(5, leaf_node.size()); + + for (int i = 0; i < 5; i++) { + key = i * 2; + index = leaf_node.lookup(key_comparator, key_mem, &found); + ASSERT_EQ(false, found); + ASSERT_EQ(i, index); + } + + key = 12; + index = leaf_node.lookup(key_comparator, key_mem, &found); + ASSERT_EQ(false, found); + ASSERT_EQ(5, index); + + for (int i = 0; i < 5; i++) { + key = i * 2 + 1; + index = leaf_node.lookup(key_comparator, key_mem, &found); + if (!found || i != index) { + printf("found=%d, index=%d, key=%d", found, index, key); + } + ASSERT_EQ(true, found); + ASSERT_EQ(i, index); + } +} +TEST(test_bplus_tree, test_internal_index_node_handle) +{ + LoggerFactory::init_default("test.log"); + + IndexFileHeader index_file_header; + index_file_header.root_page = BP_INVALID_PAGE_NUM; + index_file_header.internal_max_size = 5; + index_file_header.leaf_max_size = 5; + index_file_header.attr_length = 4; + index_file_header.key_length = 4 + sizeof(RID); + index_file_header.attr_type = INTS; + + Frame frame; + frame.dirty = false; + frame.pin_count = 0; + frame.acc_time = 0; + frame.file_desc = 0; + frame.page.page_num = 100; + + BPPageHandle page_handle; + page_handle.open = true; + page_handle.frame = &frame; + + KeyComparator key_comparator; + key_comparator.init(INTS, 4); + + InternalIndexNodeHandler internal_node(index_file_header, page_handle); + internal_node.init_empty(); + ASSERT_EQ(0, internal_node.size()); + + bool found; + int index; + int insert_position; + char key_mem[4 + sizeof(RID)]; + int &key = *(int *)key_mem; + RID &rid = *(RID *)(key_mem + 4); + rid.page_num = 0; + rid.slot_num = 0; + + key = 3; + internal_node.create_new_root(1, key_mem, key); + for (int i = 2; i < 5; i++) { + key = i * 2 + 1; + internal_node.insert((const char *)&key, (PageNum)key, key_comparator); + } + + ASSERT_EQ(5, internal_node.size()); + + for (int i = 1; i < 5; i++) { + key = i * 2 + 1; + int real_key = *(int*)internal_node.key_at(i); + ASSERT_EQ(key, real_key); + } + + key = 0; + index = internal_node.lookup(key_comparator, key_mem, &found, &insert_position); + ASSERT_EQ(false, found); + ASSERT_EQ(0, index); + ASSERT_EQ(1, insert_position); + + key = 2; + index = internal_node.lookup(key_comparator, key_mem, &found, &insert_position); + ASSERT_EQ(false, found); + ASSERT_EQ(0, index); + ASSERT_EQ(1, insert_position); + + key = 4; + index = internal_node.lookup(key_comparator, key_mem, &found, &insert_position); + ASSERT_EQ(false, found); + ASSERT_EQ(1, index); + ASSERT_EQ(2, insert_position); + + key = 8; + index = internal_node.lookup(key_comparator, key_mem, &found, &insert_position); + ASSERT_EQ(false, found); + ASSERT_EQ(3, index); + ASSERT_EQ(4, insert_position); + + key = 10; + index = internal_node.lookup(key_comparator, key_mem, &found, &insert_position); + ASSERT_EQ(false, found); + ASSERT_EQ(4, index); + ASSERT_EQ(5, insert_position); + + key = 12; + index = internal_node.lookup(key_comparator, key_mem, &found); + ASSERT_EQ(false, found); + ASSERT_EQ(4, index); + ASSERT_EQ(5, insert_position); + + for (int i = 1; i < 5; i++) { + key = i * 2 + 1; + index = internal_node.lookup(key_comparator, key_mem, &found); + if (!found || i != index) { + printf("found=%d, index=%d, key=%d", found, index, key); + } + ASSERT_EQ(true, found); + ASSERT_EQ(i, index); + } +} +TEST(test_bplus_tree, test_scanner) +{ + LoggerFactory::init_default("test.log"); + + DiskBufferPool::set_pool_num(POOL_NUM); + + const char *index_name = "scanner.btree"; + ::remove(index_name); + handler = new BplusTreeHandler(); + handler->create(index_name, INTS, sizeof(int), ORDER, ORDER); + + int count = 0; + RC rc = RC::SUCCESS; + RID rid; + for (int i = 0; i < 100; i++) { + int key = i * 2 + 1; + rid.page_num = 0; + rid.slot_num = key; + rc = handler->insert_entry((const char *)&key, &rid); + ASSERT_EQ(RC::SUCCESS, rc); + } + + handler->print_tree(); + + BplusTreeScanner scanner(*handler); + + int begin = -100; + int end = -20; + rc = scanner.open((const char *)&begin, false, (const char *)&end, false); + ASSERT_EQ(RC::SUCCESS, rc); + + rc = scanner.next_entry(&rid); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = -100; + end = 1; + rc = scanner.open((const char *)&begin, false, (const char *)&end, false); + ASSERT_EQ(RC::SUCCESS, rc); + rc = scanner.next_entry(&rid); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = -100; + end = 1; + rc = scanner.open((const char *)&begin, false, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + rc = scanner.next_entry(&rid); + ASSERT_EQ(RC::SUCCESS, rc); + rc = scanner.next_entry(&rid); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 1; + end = 3; + rc = scanner.open((const char *)&begin, false, (const char *)&end, false/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + rc = scanner.next_entry(&rid); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 1; + end = 3; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(2, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 0; + end = 3; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(2, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 11; + end = 21; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ((end - begin) / 2 + 1, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 11; + end = 91; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ((end - begin) / 2 + 1, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 191; + end = 199; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ((end - begin) / 2 + 1, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 191; + end = 201; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(5, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 200; + end = 301; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + rc = scanner.next_entry(&rid); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 300; + end = 201; + rc = scanner.open((const char *)&begin, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::INVALID_ARGUMENT, rc); + + scanner.close(); + + begin = 300; + end = 201; + rc = scanner.open(nullptr, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(100, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 300; + end = 10; + rc = scanner.open(nullptr, true, (const char *)&end, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(5, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 190; + end = 10; + rc = scanner.open((const char *)&begin, true, nullptr, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(5, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); + + begin = 190; + end = 10; + rc = scanner.open(nullptr, true, nullptr, true/*inclusive*/); + ASSERT_EQ(RC::SUCCESS, rc); + count = 0; + while ((rc = scanner.next_entry(&rid)) == RC::SUCCESS) { + count++; + } + ASSERT_EQ(100, count); + ASSERT_EQ(RC::RECORD_EOF, rc); + + scanner.close(); +} + TEST(test_bplus_tree, test_bplus_tree_insert) { @@ -301,10 +696,7 @@ TEST(test_bplus_tree, test_bplus_tree_insert) ::remove(index_name); handler = new BplusTreeHandler(); - handler->create(index_name, INTS, sizeof(int)); - - BplusTreeTester bplus_tree_tester(*handler); - bplus_tree_tester.set_order(ORDER); + handler->create(index_name, INTS, sizeof(int), ORDER, ORDER); test_insert(); @@ -329,4 +721,4 @@ int main(int argc, char **argv) int rc = RUN_ALL_TESTS(); return rc; -} \ No newline at end of file +} diff --git a/unitest/md5_test.cpp b/unitest/md5_test.cpp index 172080c5346b60ca9405ad5498a118f042201496..92ea2f5a1d4d8671ffc31fa530d7a42b7f2cd33b 100644 --- a/unitest/md5_test.cpp +++ b/unitest/md5_test.cpp @@ -19,15 +19,18 @@ See the Mulan PSL v2 for more details. */ using namespace common; -Md5Test::Md5Test() { +Md5Test::Md5Test() +{ // Auto-generated constructor stub } -Md5Test::~Md5Test() { +Md5Test::~Md5Test() +{ // Auto-generated destructor stub } -void Md5Test::string() { +void Md5Test::string() +{ char buf[512] = "/home/fastdfs/longda"; unsigned char digest[16] = {0}; MD5String(buf, digest); @@ -36,7 +39,8 @@ void Md5Test::string() { } } -int main(int argc, char **argv) { +int main(int argc, char **argv) +{ Md5Test test; test.string(); diff --git a/unitest/path_test.cpp b/unitest/path_test.cpp index a6fe0d220e5b6d2bbf64984e085960e873f21ebe..d2d6b3d16f67145296bd69dba49eb282d40e5224 100644 --- a/unitest/path_test.cpp +++ b/unitest/path_test.cpp @@ -12,4 +12,7 @@ See the Mulan PSL v2 for more details. */ // Created by Longda on 2021 // -int main(int argc, char **argv) { return 0; } \ No newline at end of file +int main(int argc, char **argv) +{ + return 0; +} \ No newline at end of file diff --git a/unitest/pidfile_test.cpp b/unitest/pidfile_test.cpp index 7a22ac3bd825f9bba99b90ababcf7cd7ffae0ca6..9750dcd1d54016f311b8c12c508bf6883e42a3c0 100644 --- a/unitest/pidfile_test.cpp +++ b/unitest/pidfile_test.cpp @@ -21,7 +21,8 @@ See the Mulan PSL v2 for more details. */ using namespace common; -int main() { +int main() +{ long long pid = (long long)getpid(); const char *programName = "test"; diff --git a/unitest/rc_test.cpp b/unitest/rc_test.cpp index 5bdf2de5b86d694745b01e58b4529ae1537293aa..e992d94b17ae31f9029e6ff0ffcc0f44876ae016 100644 --- a/unitest/rc_test.cpp +++ b/unitest/rc_test.cpp @@ -14,7 +14,8 @@ See the Mulan PSL v2 for more details. */ #include "rc.h" #include -int main(int argc, char **argv) { +int main(int argc, char **argv) +{ - std::cout << rc2SimpleStr(status) << std::endl ; + std::cout << rc2SimpleStr(status) << std::endl; } \ No newline at end of file