Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
taosdata
TDengine
提交
7184778c
T
TDengine
项目概览
taosdata
/
TDengine
大约 2 年 前同步成功
通知
1192
Star
22018
Fork
4786
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
T
TDengine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7184778c
编写于
2月 21, 2022
作者:
H
Hongze Cheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
more page cache
上级
7248dc68
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
698 addition
and
732 deletion
+698
-732
source/libs/tdb/src/sqlite/pcache.c
source/libs/tdb/src/sqlite/pcache.c
+300
-274
source/libs/tdb/src/sqlite/pcache1.c
source/libs/tdb/src/sqlite/pcache1.c
+391
-458
source/libs/tdb/src/sqliteinc/sqliteInt.h
source/libs/tdb/src/sqliteinc/sqliteInt.h
+7
-0
未找到文件。
source/libs/tdb/src/sqlite/pcache.c
浏览文件 @
7184778c
...
...
@@ -48,8 +48,8 @@ struct PCache {
int
szExtra
;
/* Size of extra space for each page */
u8
bPurgeable
;
/* True if pages are on backing store */
u8
eCreate
;
/* eCreate value for for xFetch() */
int
(
*
xStress
)(
void
*
,
PgHdr
*
);
/* Call to try make a page clean */
void
*
pStress
;
/* Argument to xStress */
int
(
*
xStress
)(
void
*
,
PgHdr
*
);
/* Call to try make a page clean */
void
*
pStress
;
/* Argument to xStress */
sqlite3_pcache
*
pCache
;
/* Pluggable cache module */
};
...
...
@@ -63,39 +63,36 @@ struct PCache {
** is displayed for many operations, resulting in a lot of output.
*/
#if defined(SQLITE_DEBUG) && 0
int
sqlite3PcacheTrace
=
2
;
/* 0: off 1: simple 2: cache dumps */
int
sqlite3PcacheMxDump
=
9999
;
/* Max cache entries for pcacheDump() */
#define pcacheTrace(X) \
if (sqlite3PcacheTrace) { \
sqlite3DebugPrintf X; \
}
void
pcacheDump
(
PCache
*
pCache
)
{
int
sqlite3PcacheTrace
=
2
;
/* 0: off 1: simple 2: cache dumps */
int
sqlite3PcacheMxDump
=
9999
;
/* Max cache entries for pcacheDump() */
# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;}
void
pcacheDump
(
PCache
*
pCache
){
int
N
;
int
i
,
j
;
sqlite3_pcache_page
*
pLower
;
PgHdr
*
pPg
;
unsigned
char
*
a
;
PgHdr
*
pPg
;
unsigned
char
*
a
;
if
(
sqlite3PcacheTrace
<
2
)
return
;
if
(
pCache
->
pCache
==
0
)
return
;
if
(
sqlite3PcacheTrace
<
2
)
return
;
if
(
pCache
->
pCache
==
0
)
return
;
N
=
sqlite3PcachePagecount
(
pCache
);
if
(
N
>
sqlite3PcacheMxDump
)
N
=
sqlite3PcacheMxDump
;
for
(
i
=
1
;
i
<=
N
;
i
++
)
{
if
(
N
>
sqlite3PcacheMxDump
)
N
=
sqlite3PcacheMxDump
;
for
(
i
=
1
;
i
<=
N
;
i
++
)
{
pLower
=
pcache2
.
xFetch
(
pCache
->
pCache
,
i
,
0
);
if
(
pLower
==
0
)
continue
;
pPg
=
(
PgHdr
*
)
pLower
->
pExtra
;
if
(
pLower
==
0
)
continue
;
pPg
=
(
PgHdr
*
)
pLower
->
pExtra
;
printf
(
"%3d: nRef %2d flgs %02x data "
,
i
,
pPg
->
nRef
,
pPg
->
flags
);
a
=
(
unsigned
char
*
)
pLower
->
pBuf
;
for
(
j
=
0
;
j
<
12
;
j
++
)
printf
(
"%02x"
,
a
[
j
]);
for
(
j
=
0
;
j
<
12
;
j
++
)
printf
(
"%02x"
,
a
[
j
]);
printf
(
"
\n
"
);
if
(
pPg
->
pPage
==
0
)
{
if
(
pPg
->
pPage
==
0
)
{
pcache2
.
xUnpin
(
pCache
->
pCache
,
pLower
,
0
);
}
}
}
#else
#define pcacheTrace(X)
#define pcacheDump(X)
}
#else
#
define pcacheTrace(X)
#
define pcacheDump(X)
#endif
/*
...
...
@@ -108,20 +105,20 @@ void pcacheDump(PCache *pCache) {
** assert( sqlite3PcachePageSanity(pPg) );
*/
#ifdef SQLITE_DEBUG
int
sqlite3PcachePageSanity
(
PgHdr
*
pPg
)
{
int
sqlite3PcachePageSanity
(
PgHdr
*
pPg
){
PCache
*
pCache
;
assert
(
pPg
!=
0
);
assert
(
pPg
->
pgno
>
0
||
pPg
->
pPager
==
0
);
/* Page number is 1 or more */
assert
(
pPg
!=
0
);
assert
(
pPg
->
pgno
>
0
||
pPg
->
pPager
==
0
);
/* Page number is 1 or more */
pCache
=
pPg
->
pCache
;
assert
(
pCache
!=
0
);
/* Every page has an associated PCache */
if
(
pPg
->
flags
&
PGHDR_CLEAN
)
{
assert
(
(
pPg
->
flags
&
PGHDR_DIRTY
)
==
0
);
/* Cannot be both CLEAN and DIRTY */
assert
(
pCache
->
pDirty
!=
pPg
);
/* CLEAN pages not on dirty list */
assert
(
pCache
->
pDirtyTail
!=
pPg
);
assert
(
pCache
!=
0
);
/* Every page has an associated PCache */
if
(
pPg
->
flags
&
PGHDR_CLEAN
)
{
assert
(
(
pPg
->
flags
&
PGHDR_DIRTY
)
==
0
);
/* Cannot be both CLEAN and DIRTY */
assert
(
pCache
->
pDirty
!=
pPg
);
/* CLEAN pages not on dirty list */
assert
(
pCache
->
pDirtyTail
!=
pPg
);
}
/* WRITEABLE pages must also be DIRTY */
if
(
pPg
->
flags
&
PGHDR_WRITEABLE
)
{
assert
(
pPg
->
flags
&
PGHDR_DIRTY
);
/* WRITEABLE implies DIRTY */
if
(
pPg
->
flags
&
PGHDR_WRITEABLE
)
{
assert
(
pPg
->
flags
&
PGHDR_DIRTY
);
/* WRITEABLE implies DIRTY */
}
/* NEED_SYNC can be set independently of WRITEABLE. This can happen,
** for example, when using the sqlite3PagerDontWrite() optimization:
...
...
@@ -144,6 +141,7 @@ int sqlite3PcachePageSanity(PgHdr *pPg) {
}
#endif
/* SQLITE_DEBUG */
/********************************** Linked List Management ********************/
/* Allowed values for second argument to pcacheManageDirtyList() */
...
...
@@ -157,51 +155,53 @@ int sqlite3PcachePageSanity(PgHdr *pPg) {
** remove pPage from the dirty list. The 0x02 means add pPage back to
** the dirty list. Doing both moves pPage to the front of the dirty list.
*/
static
void
pcacheManageDirtyList
(
PgHdr
*
pPage
,
u8
addRemove
)
{
static
void
pcacheManageDirtyList
(
PgHdr
*
pPage
,
u8
addRemove
){
PCache
*
p
=
pPage
->
pCache
;
pcacheTrace
((
"%p.DIRTYLIST.%s %d
\n
"
,
p
,
addRemove
==
1
?
"REMOVE"
:
addRemove
==
2
?
"ADD"
:
"FRONT"
,
pPage
->
pgno
));
if
(
addRemove
&
PCACHE_DIRTYLIST_REMOVE
)
{
assert
(
pPage
->
pDirtyNext
||
pPage
==
p
->
pDirtyTail
);
assert
(
pPage
->
pDirtyPrev
||
pPage
==
p
->
pDirty
);
pcacheTrace
((
"%p.DIRTYLIST.%s %d
\n
"
,
p
,
addRemove
==
1
?
"REMOVE"
:
addRemove
==
2
?
"ADD"
:
"FRONT"
,
pPage
->
pgno
));
if
(
addRemove
&
PCACHE_DIRTYLIST_REMOVE
){
assert
(
pPage
->
pDirtyNext
||
pPage
==
p
->
pDirtyTail
);
assert
(
pPage
->
pDirtyPrev
||
pPage
==
p
->
pDirty
);
/* Update the PCache1.pSynced variable if necessary. */
if
(
p
->
pSynced
==
pPage
)
{
if
(
p
->
pSynced
==
pPage
)
{
p
->
pSynced
=
pPage
->
pDirtyPrev
;
}
if
(
pPage
->
pDirtyNext
)
{
if
(
pPage
->
pDirtyNext
)
{
pPage
->
pDirtyNext
->
pDirtyPrev
=
pPage
->
pDirtyPrev
;
}
else
{
assert
(
pPage
==
p
->
pDirtyTail
);
}
else
{
assert
(
pPage
==
p
->
pDirtyTail
);
p
->
pDirtyTail
=
pPage
->
pDirtyPrev
;
}
if
(
pPage
->
pDirtyPrev
)
{
if
(
pPage
->
pDirtyPrev
)
{
pPage
->
pDirtyPrev
->
pDirtyNext
=
pPage
->
pDirtyNext
;
}
else
{
}
else
{
/* If there are now no dirty pages in the cache, set eCreate to 2.
** This is an optimization that allows sqlite3PcacheFetch() to skip
** searching for a dirty page to eject from the cache when it might
** otherwise have to. */
assert
(
pPage
==
p
->
pDirty
);
assert
(
pPage
==
p
->
pDirty
);
p
->
pDirty
=
pPage
->
pDirtyNext
;
assert
(
p
->
bPurgeable
||
p
->
eCreate
==
2
);
if
(
p
->
pDirty
==
0
)
{
/*OPTIMIZATION-IF-TRUE*/
assert
(
p
->
bPurgeable
==
0
||
p
->
eCreate
==
1
);
assert
(
p
->
bPurgeable
||
p
->
eCreate
==
2
);
if
(
p
->
pDirty
==
0
){
/*OPTIMIZATION-IF-TRUE*/
assert
(
p
->
bPurgeable
==
0
||
p
->
eCreate
==
1
);
p
->
eCreate
=
2
;
}
}
}
if
(
addRemove
&
PCACHE_DIRTYLIST_ADD
)
{
if
(
addRemove
&
PCACHE_DIRTYLIST_ADD
)
{
pPage
->
pDirtyPrev
=
0
;
pPage
->
pDirtyNext
=
p
->
pDirty
;
if
(
pPage
->
pDirtyNext
)
{
assert
(
pPage
->
pDirtyNext
->
pDirtyPrev
==
0
);
if
(
pPage
->
pDirtyNext
)
{
assert
(
pPage
->
pDirtyNext
->
pDirtyPrev
==
0
);
pPage
->
pDirtyNext
->
pDirtyPrev
=
pPage
;
}
else
{
}
else
{
p
->
pDirtyTail
=
pPage
;
if
(
p
->
bPurgeable
)
{
assert
(
p
->
eCreate
==
2
);
if
(
p
->
bPurgeable
)
{
assert
(
p
->
eCreate
==
2
);
p
->
eCreate
=
1
;
}
}
...
...
@@ -212,8 +212,9 @@ static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove) {
** optimization, as if pSynced points to a page with the NEED_SYNC
** flag set sqlite3PcacheFetchStress() searches through all newer
** entries of the dirty-list for a page with NEED_SYNC clear anyway. */
if
(
!
p
->
pSynced
&&
0
==
(
pPage
->
flags
&
PGHDR_NEED_SYNC
)
/*OPTIMIZATION-IF-FALSE*/
)
{
if
(
!
p
->
pSynced
&&
0
==
(
pPage
->
flags
&
PGHDR_NEED_SYNC
)
/*OPTIMIZATION-IF-FALSE*/
){
p
->
pSynced
=
pPage
;
}
}
...
...
@@ -224,8 +225,8 @@ static void pcacheManageDirtyList(PgHdr *pPage, u8 addRemove) {
** Wrapper around the pluggable caches xUnpin method. If the cache is
** being used for an in-memory database, this function is a no-op.
*/
static
void
pcacheUnpin
(
PgHdr
*
p
)
{
if
(
p
->
pCache
->
bPurgeable
)
{
static
void
pcacheUnpin
(
PgHdr
*
p
){
if
(
p
->
pCache
->
bPurgeable
)
{
pcacheTrace
((
"%p.UNPIN %d
\n
"
,
p
->
pCache
,
p
->
pgno
));
pcache2
.
xUnpin
(
p
->
pCache
->
pCache
,
p
->
pPage
,
0
);
pcacheDump
(
p
->
pCache
);
...
...
@@ -236,19 +237,19 @@ static void pcacheUnpin(PgHdr *p) {
** Compute the number of pages of cache requested. p->szCache is the
** cache size requested by the "PRAGMA cache_size" statement.
*/
static
int
numberOfCachePages
(
PCache
*
p
)
{
if
(
p
->
szCache
>=
0
)
{
static
int
numberOfCachePages
(
PCache
*
p
){
if
(
p
->
szCache
>=
0
)
{
/* IMPLEMENTATION-OF: R-42059-47211 If the argument N is positive then the
** suggested cache size is set to N. */
return
p
->
szCache
;
}
else
{
}
else
{
i64
n
;
/* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the
** number of cache pages is adjusted to be a number of pages that would
** use approximately abs(N*1024) bytes of memory based on the current
** page size. */
n
=
((
-
1024
*
(
i64
)
p
->
szCache
)
/
(
p
->
szPage
+
p
->
szExtra
));
if
(
n
>
1000000000
)
n
=
1000000000
;
n
=
((
-
1024
*
(
i64
)
p
->
szCache
)
/
(
p
->
szPage
+
p
->
szExtra
));
if
(
n
>
1000000000
)
n
=
1000000000
;
return
(
int
)
n
;
}
}
...
...
@@ -258,9 +259,11 @@ static int numberOfCachePages(PCache *p) {
** Initialize and shutdown the page cache subsystem. Neither of these
** functions are threadsafe.
*/
int
sqlite3PcacheInitialize
(
void
)
{
return
pcache2
.
xInit
(
pcache2
.
pArg
);
}
void
sqlite3PcacheShutdown
(
void
)
{
if
(
pcache2
.
xShutdown
)
{
int
sqlite3PcacheInitialize
(
void
){
return
pcache2
.
xInit
(
pcache2
.
pArg
);
}
void
sqlite3PcacheShutdown
(
void
){
if
(
pcache2
.
xShutdown
){
/* IMPLEMENTATION-OF: R-26000-56589 The xShutdown() method may be NULL. */
pcache2
.
xShutdown
(
pcache2
.
pArg
);
}
...
...
@@ -269,7 +272,7 @@ void sqlite3PcacheShutdown(void) {
/*
** Return the size in bytes of a PCache object.
*/
int
sqlite3PcacheSize
(
void
)
{
return
sizeof
(
PCache
);
}
int
sqlite3PcacheSize
(
void
){
return
sizeof
(
PCache
);
}
/*
** Create a new PCache object. Storage space to hold the object
...
...
@@ -283,24 +286,25 @@ int sqlite3PcacheSize(void) { return sizeof(PCache); }
** to this module, the extra space really ends up being the MemPage
** structure in the pager.
*/
int
sqlite3PcacheOpen
(
int
szPage
,
/* Size of every page */
int
sqlite3PcacheOpen
(
int
szPage
,
/* Size of every page */
int
szExtra
,
/* Extra space associated with each page */
int
bPurgeable
,
/* True if pages are on backing store */
int
(
*
xStress
)(
void
*
,
PgHdr
*
),
/* Call to try to make pages clean */
void
*
pStress
,
/* Argument to xStress */
int
(
*
xStress
)(
void
*
,
PgHdr
*
),
/* Call to try to make pages clean */
void
*
pStress
,
/* Argument to xStress */
PCache
*
p
/* Preallocated space for the PCache */
)
{
){
memset
(
p
,
0
,
sizeof
(
PCache
));
p
->
szPage
=
1
;
p
->
szExtra
=
szExtra
;
assert
(
szExtra
>=
8
);
/* First 8 bytes will be zeroed */
assert
(
szExtra
>=
8
);
/* First 8 bytes will be zeroed */
p
->
bPurgeable
=
bPurgeable
;
p
->
eCreate
=
2
;
p
->
xStress
=
xStress
;
p
->
pStress
=
pStress
;
p
->
szCache
=
100
;
p
->
szSpill
=
1
;
pcacheTrace
((
"%p.OPEN szPage %d bPurgeable %d
\n
"
,
p
,
szPage
,
bPurgeable
));
pcacheTrace
((
"%p.OPEN szPage %d bPurgeable %d
\n
"
,
p
,
szPage
,
bPurgeable
));
return
sqlite3PcacheSetPageSize
(
p
,
szPage
);
}
...
...
@@ -308,21 +312,24 @@ int sqlite3PcacheOpen(int szPage, /* Size of every page */
** Change the page size for PCache object. The caller must ensure that there
** are no outstanding page references when this function is called.
*/
int
sqlite3PcacheSetPageSize
(
PCache
*
pCache
,
int
szPage
)
{
assert
(
pCache
->
nRefSum
==
0
&&
pCache
->
pDirty
==
0
);
if
(
pCache
->
szPage
)
{
int
sqlite3PcacheSetPageSize
(
PCache
*
pCache
,
int
szPage
){
assert
(
pCache
->
nRefSum
==
0
&&
pCache
->
pDirty
==
0
);
if
(
pCache
->
szPage
)
{
sqlite3_pcache
*
pNew
;
pNew
=
pcache2
.
xCreate
(
szPage
,
pCache
->
szExtra
+
ROUND8
(
sizeof
(
PgHdr
)),
pCache
->
bPurgeable
);
if
(
pNew
==
0
)
return
SQLITE_NOMEM_BKPT
;
pNew
=
pcache2
.
xCreate
(
szPage
,
pCache
->
szExtra
+
ROUND8
(
sizeof
(
PgHdr
)),
pCache
->
bPurgeable
);
if
(
pNew
==
0
)
return
SQLITE_NOMEM_BKPT
;
pcache2
.
xCachesize
(
pNew
,
numberOfCachePages
(
pCache
));
if
(
pCache
->
pCache
)
{
if
(
pCache
->
pCache
)
{
pcache2
.
xDestroy
(
pCache
->
pCache
);
}
pCache
->
pCache
=
pNew
;
pCache
->
szPage
=
szPage
;
pcacheTrace
((
"%p.PAGESIZE %d
\n
"
,
pCache
,
szPage
));
pcacheTrace
((
"%p.PAGESIZE %d
\n
"
,
pCache
,
szPage
));
}
return
SQLITE_OK
;
return
0
;
}
/*
...
...
@@ -349,17 +356,18 @@ int sqlite3PcacheSetPageSize(PCache *pCache, int szPage) {
** the stack on entry and pop them back off on exit, which saves a
** lot of pushing and popping.
*/
sqlite3_pcache_page
*
sqlite3PcacheFetch
(
PCache
*
pCache
,
/* Obtain the page from this cache */
sqlite3_pcache_page
*
sqlite3PcacheFetch
(
PCache
*
pCache
,
/* Obtain the page from this cache */
Pgno
pgno
,
/* Page number to obtain */
int
createFlag
/* If true, create page if it does not exist already */
)
{
){
int
eCreate
;
sqlite3_pcache_page
*
pRes
;
assert
(
pCache
!=
0
);
assert
(
pCache
->
pCache
!=
0
);
assert
(
createFlag
==
3
||
createFlag
==
0
);
assert
(
pCache
->
eCreate
==
((
pCache
->
bPurgeable
&&
pCache
->
pDirty
)
?
1
:
2
)
);
assert
(
pCache
!=
0
);
assert
(
pCache
->
pCache
!=
0
);
assert
(
createFlag
==
3
||
createFlag
==
0
);
assert
(
pCache
->
eCreate
==
((
pCache
->
bPurgeable
&&
pCache
->
pDirty
)
?
1
:
2
)
);
/* eCreate defines what to do if the page does not exist.
** 0 Do not allocate a new page. (createFlag==0)
...
...
@@ -369,11 +377,12 @@ sqlite3_pcache_page *sqlite3PcacheFetch(PCache *pCache, /* Obtain the page fr
** (createFlag==1 AND !(bPurgeable AND pDirty)
*/
eCreate
=
createFlag
&
pCache
->
eCreate
;
assert
(
eCreate
==
0
||
eCreate
==
1
||
eCreate
==
2
);
assert
(
createFlag
==
0
||
pCache
->
eCreate
==
eCreate
);
assert
(
createFlag
==
0
||
eCreate
==
1
+
(
!
pCache
->
bPurgeable
||
!
pCache
->
pDirty
)
);
assert
(
eCreate
==
0
||
eCreate
==
1
||
eCreate
==
2
);
assert
(
createFlag
==
0
||
pCache
->
eCreate
==
eCreate
);
assert
(
createFlag
==
0
||
eCreate
==
1
+
(
!
pCache
->
bPurgeable
||!
pCache
->
pDirty
)
);
pRes
=
pcache2
.
xFetch
(
pCache
->
pCache
,
pgno
,
eCreate
);
pcacheTrace
((
"%p.FETCH %d%s (result: %p)
\n
"
,
pCache
,
pgno
,
createFlag
?
" create"
:
""
,
pRes
));
pcacheTrace
((
"%p.FETCH %d%s (result: %p)
\n
"
,
pCache
,
pgno
,
createFlag
?
" create"
:
""
,
pRes
));
return
pRes
;
}
...
...
@@ -388,14 +397,15 @@ sqlite3_pcache_page *sqlite3PcacheFetch(PCache *pCache, /* Obtain the page fr
**
** This routine should be invoked only after sqlite3PcacheFetch() fails.
*/
int
sqlite3PcacheFetchStress
(
PCache
*
pCache
,
/* Obtain the page from this cache */
int
sqlite3PcacheFetchStress
(
PCache
*
pCache
,
/* Obtain the page from this cache */
Pgno
pgno
,
/* Page number to obtain */
sqlite3_pcache_page
**
ppPage
/* Write result here */
)
{
){
PgHdr
*
pPg
;
if
(
pCache
->
eCreate
==
2
)
return
0
;
if
(
pCache
->
eCreate
==
2
)
return
0
;
if
(
sqlite3PcachePagecount
(
pCache
)
>
pCache
->
szSpill
)
{
if
(
sqlite3PcachePagecount
(
pCache
)
>
pCache
->
szSpill
)
{
/* Find a dirty page to write-out and recycle. First try to find a
** page that does not require a journal-sync (one with PGHDR_NEED_SYNC
** cleared), but if that is not possible settle for any other
...
...
@@ -405,29 +415,33 @@ int sqlite3PcacheFetchStress(PCache * pCache, /* Obtain the page fr
** flag is currently referenced, then the following may leave pSynced
** set incorrectly (pointing to other than the LRU page with NEED_SYNC
** cleared). This is Ok, as pSynced is just an optimization. */
for
(
pPg
=
pCache
->
pSynced
;
pPg
&&
(
pPg
->
nRef
||
(
pPg
->
flags
&
PGHDR_NEED_SYNC
));
pPg
=
pPg
->
pDirtyPrev
)
;
for
(
pPg
=
pCache
->
pSynced
;
pPg
&&
(
pPg
->
nRef
||
(
pPg
->
flags
&
PGHDR_NEED_SYNC
));
pPg
=
pPg
->
pDirtyPrev
);
pCache
->
pSynced
=
pPg
;
if
(
!
pPg
)
{
for
(
pPg
=
pCache
->
pDirtyTail
;
pPg
&&
pPg
->
nRef
;
pPg
=
pPg
->
pDirtyPrev
)
;
if
(
!
pPg
){
for
(
pPg
=
pCache
->
pDirtyTail
;
pPg
&&
pPg
->
nRef
;
pPg
=
pPg
->
pDirtyPrev
);
}
if
(
pPg
)
{
if
(
pPg
)
{
int
rc
;
#ifdef SQLITE_LOG_CACHE_SPILL
sqlite3_log
(
SQLITE_FULL
,
"spill page %d making room for %d - cache used: %d/%d"
,
pPg
->
pgno
,
pgno
,
pcache2
.
xPagecount
(
pCache
->
pCache
),
numberOfCachePages
(
pCache
));
sqlite3_log
(
SQLITE_FULL
,
"spill page %d making room for %d - cache used: %d/%d"
,
pPg
->
pgno
,
pgno
,
pcache2
.
xPagecount
(
pCache
->
pCache
),
numberOfCachePages
(
pCache
));
#endif
pcacheTrace
((
"%p.SPILL %d
\n
"
,
pCache
,
pPg
->
pgno
));
pcacheTrace
((
"%p.SPILL %d
\n
"
,
pCache
,
pPg
->
pgno
));
rc
=
pCache
->
xStress
(
pCache
->
pStress
,
pPg
);
pcacheDump
(
pCache
);
if
(
rc
!=
SQLITE_OK
&&
rc
!=
SQLITE_BUSY
)
{
if
(
rc
!=
0
&&
rc
!=
SQLITE_BUSY
)
{
return
rc
;
}
}
}
*
ppPage
=
pcache2
.
xFetch
(
pCache
->
pCache
,
pgno
,
2
);
return
*
ppPage
==
0
?
SQLITE_NOMEM_BKPT
:
SQLITE_OK
;
return
*
ppPage
==
0
?
SQLITE_NOMEM_BKPT
:
0
;
}
/*
...
...
@@ -440,15 +454,15 @@ int sqlite3PcacheFetchStress(PCache * pCache, /* Obtain the page fr
** case.
*/
static
SQLITE_NOINLINE
PgHdr
*
pcacheFetchFinishWithInit
(
PCache
*
pCache
,
/* Obtain the page from this cache */
PCache
*
pCache
,
/* Obtain the page from this cache */
Pgno
pgno
,
/* Page number obtained */
sqlite3_pcache_page
*
pPage
/* Page obtained by prior PcacheFetch() call */
)
{
){
PgHdr
*
pPgHdr
;
assert
(
pPage
!=
0
);
pPgHdr
=
(
PgHdr
*
)
pPage
->
pExtra
;
assert
(
pPgHdr
->
pPage
==
0
);
memset
(
&
pPgHdr
->
pDirty
,
0
,
sizeof
(
PgHdr
)
-
offsetof
(
PgHdr
,
pDirty
));
assert
(
pPage
!=
0
);
pPgHdr
=
(
PgHdr
*
)
pPage
->
pExtra
;
assert
(
pPgHdr
->
pPage
==
0
);
memset
(
&
pPgHdr
->
pDirty
,
0
,
sizeof
(
PgHdr
)
-
offsetof
(
PgHdr
,
pDirty
));
pPgHdr
->
pPage
=
pPage
;
pPgHdr
->
pData
=
pPage
->
pBuf
;
pPgHdr
->
pExtra
=
(
void
*
)
&
pPgHdr
[
1
];
...
...
@@ -456,7 +470,7 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
pPgHdr
->
pCache
=
pCache
;
pPgHdr
->
pgno
=
pgno
;
pPgHdr
->
flags
=
PGHDR_CLEAN
;
return
sqlite3PcacheFetchFinish
(
pCache
,
pgno
,
pPage
);
return
sqlite3PcacheFetchFinish
(
pCache
,
pgno
,
pPage
);
}
/*
...
...
@@ -465,21 +479,22 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
** must be called after sqlite3PcacheFetch() in order to get a usable
** result.
*/
PgHdr
*
sqlite3PcacheFetchFinish
(
PCache
*
pCache
,
/* Obtain the page from this cache */
PgHdr
*
sqlite3PcacheFetchFinish
(
PCache
*
pCache
,
/* Obtain the page from this cache */
Pgno
pgno
,
/* Page number obtained */
sqlite3_pcache_page
*
pPage
/* Page obtained by prior PcacheFetch() call */
)
{
){
PgHdr
*
pPgHdr
;
assert
(
pPage
!=
0
);
assert
(
pPage
!=
0
);
pPgHdr
=
(
PgHdr
*
)
pPage
->
pExtra
;
if
(
!
pPgHdr
->
pPage
)
{
if
(
!
pPgHdr
->
pPage
)
{
return
pcacheFetchFinishWithInit
(
pCache
,
pgno
,
pPage
);
}
pCache
->
nRefSum
++
;
pPgHdr
->
nRef
++
;
assert
(
sqlite3PcachePageSanity
(
pPgHdr
)
);
assert
(
sqlite3PcachePageSanity
(
pPgHdr
)
);
return
pPgHdr
;
}
...
...
@@ -487,13 +502,13 @@ PgHdr *sqlite3PcacheFetchFinish(PCache * pCache, /* Obtain the page
** Decrement the reference count on a page. If the page is clean and the
** reference count drops to 0, then it is made eligible for recycling.
*/
void
SQLITE_NOINLINE
sqlite3PcacheRelease
(
PgHdr
*
p
)
{
assert
(
p
->
nRef
>
0
);
void
SQLITE_NOINLINE
sqlite3PcacheRelease
(
PgHdr
*
p
){
assert
(
p
->
nRef
>
0
);
p
->
pCache
->
nRefSum
--
;
if
((
--
p
->
nRef
)
==
0
)
{
if
(
p
->
flags
&
PGHDR_CLEAN
)
{
if
(
(
--
p
->
nRef
)
==
0
)
{
if
(
p
->
flags
&
PGHDR_CLEAN
)
{
pcacheUnpin
(
p
);
}
else
{
}
else
{
pcacheManageDirtyList
(
p
,
PCACHE_DIRTYLIST_FRONT
);
}
}
...
...
@@ -502,9 +517,9 @@ void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p) {
/*
** Increase the reference count of a supplied page by 1.
*/
void
sqlite3PcacheRef
(
PgHdr
*
p
)
{
assert
(
p
->
nRef
>
0
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
void
sqlite3PcacheRef
(
PgHdr
*
p
){
assert
(
p
->
nRef
>
0
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
p
->
nRef
++
;
p
->
pCache
->
nRefSum
++
;
}
...
...
@@ -514,10 +529,10 @@ void sqlite3PcacheRef(PgHdr *p) {
** page. This function deletes that reference, so after it returns the
** page pointed to by p is invalid.
*/
void
sqlite3PcacheDrop
(
PgHdr
*
p
)
{
assert
(
p
->
nRef
==
1
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
if
(
p
->
flags
&
PGHDR_DIRTY
)
{
void
sqlite3PcacheDrop
(
PgHdr
*
p
){
assert
(
p
->
nRef
==
1
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
if
(
p
->
flags
&
PGHDR_DIRTY
)
{
pcacheManageDirtyList
(
p
,
PCACHE_DIRTYLIST_REMOVE
);
}
p
->
pCache
->
nRefSum
--
;
...
...
@@ -528,18 +543,18 @@ void sqlite3PcacheDrop(PgHdr *p) {
** Make sure the page is marked as dirty. If it isn't dirty already,
** make it so.
*/
void
sqlite3PcacheMakeDirty
(
PgHdr
*
p
)
{
assert
(
p
->
nRef
>
0
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
if
(
p
->
flags
&
(
PGHDR_CLEAN
|
PGHDR_DONT_WRITE
))
{
/*OPTIMIZATION-IF-FALSE*/
void
sqlite3PcacheMakeDirty
(
PgHdr
*
p
){
assert
(
p
->
nRef
>
0
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
if
(
p
->
flags
&
(
PGHDR_CLEAN
|
PGHDR_DONT_WRITE
)
){
/*OPTIMIZATION-IF-FALSE*/
p
->
flags
&=
~
PGHDR_DONT_WRITE
;
if
(
p
->
flags
&
PGHDR_CLEAN
)
{
p
->
flags
^=
(
PGHDR_DIRTY
|
PGHDR_CLEAN
);
pcacheTrace
((
"%p.DIRTY %d
\n
"
,
p
->
pCache
,
p
->
pgno
));
assert
(
(
p
->
flags
&
(
PGHDR_DIRTY
|
PGHDR_CLEAN
))
==
PGHDR_DIRTY
);
if
(
p
->
flags
&
PGHDR_CLEAN
)
{
p
->
flags
^=
(
PGHDR_DIRTY
|
PGHDR_CLEAN
);
pcacheTrace
((
"%p.DIRTY %d
\n
"
,
p
->
pCache
,
p
->
pgno
));
assert
(
(
p
->
flags
&
(
PGHDR_DIRTY
|
PGHDR_CLEAN
))
==
PGHDR_DIRTY
);
pcacheManageDirtyList
(
p
,
PCACHE_DIRTYLIST_ADD
);
}
assert
(
sqlite3PcachePageSanity
(
p
)
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
}
}
...
...
@@ -547,16 +562,16 @@ void sqlite3PcacheMakeDirty(PgHdr *p) {
** Make sure the page is marked as clean. If it isn't clean already,
** make it so.
*/
void
sqlite3PcacheMakeClean
(
PgHdr
*
p
)
{
assert
(
sqlite3PcachePageSanity
(
p
)
);
assert
(
(
p
->
flags
&
PGHDR_DIRTY
)
!=
0
);
assert
(
(
p
->
flags
&
PGHDR_CLEAN
)
==
0
);
void
sqlite3PcacheMakeClean
(
PgHdr
*
p
){
assert
(
sqlite3PcachePageSanity
(
p
)
);
assert
(
(
p
->
flags
&
PGHDR_DIRTY
)
!=
0
);
assert
(
(
p
->
flags
&
PGHDR_CLEAN
)
==
0
);
pcacheManageDirtyList
(
p
,
PCACHE_DIRTYLIST_REMOVE
);
p
->
flags
&=
~
(
PGHDR_DIRTY
|
PGHDR_NEED_SYNC
|
PGHDR_WRITEABLE
);
p
->
flags
&=
~
(
PGHDR_DIRTY
|
PGHDR_NEED_SYNC
|
PGHDR_WRITEABLE
);
p
->
flags
|=
PGHDR_CLEAN
;
pcacheTrace
((
"%p.CLEAN %d
\n
"
,
p
->
pCache
,
p
->
pgno
));
assert
(
sqlite3PcachePageSanity
(
p
)
);
if
(
p
->
nRef
==
0
)
{
pcacheTrace
((
"%p.CLEAN %d
\n
"
,
p
->
pCache
,
p
->
pgno
));
assert
(
sqlite3PcachePageSanity
(
p
)
);
if
(
p
->
nRef
==
0
)
{
pcacheUnpin
(
p
);
}
}
...
...
@@ -564,10 +579,10 @@ void sqlite3PcacheMakeClean(PgHdr *p) {
/*
** Make every page in the cache clean.
*/
void
sqlite3PcacheCleanAll
(
PCache
*
pCache
)
{
void
sqlite3PcacheCleanAll
(
PCache
*
pCache
){
PgHdr
*
p
;
pcacheTrace
((
"%p.CLEAN-ALL
\n
"
,
pCache
));
while
((
p
=
pCache
->
pDirty
)
!=
0
)
{
pcacheTrace
((
"%p.CLEAN-ALL
\n
"
,
pCache
));
while
(
(
p
=
pCache
->
pDirty
)
!=
0
)
{
sqlite3PcacheMakeClean
(
p
);
}
}
...
...
@@ -575,11 +590,11 @@ void sqlite3PcacheCleanAll(PCache *pCache) {
/*
** Clear the PGHDR_NEED_SYNC and PGHDR_WRITEABLE flag from all dirty pages.
*/
void
sqlite3PcacheClearWritable
(
PCache
*
pCache
)
{
void
sqlite3PcacheClearWritable
(
PCache
*
pCache
){
PgHdr
*
p
;
pcacheTrace
((
"%p.CLEAR-WRITEABLE
\n
"
,
pCache
));
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
p
->
pDirtyNext
)
{
p
->
flags
&=
~
(
PGHDR_NEED_SYNC
|
PGHDR_WRITEABLE
);
pcacheTrace
((
"%p.CLEAR-WRITEABLE
\n
"
,
pCache
));
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
p
->
pDirtyNext
)
{
p
->
flags
&=
~
(
PGHDR_NEED_SYNC
|
PGHDR_WRITEABLE
);
}
pCache
->
pSynced
=
pCache
->
pDirtyTail
;
}
...
...
@@ -587,9 +602,9 @@ void sqlite3PcacheClearWritable(PCache *pCache) {
/*
** Clear the PGHDR_NEED_SYNC flag from all dirty pages.
*/
void
sqlite3PcacheClearSyncFlags
(
PCache
*
pCache
)
{
void
sqlite3PcacheClearSyncFlags
(
PCache
*
pCache
){
PgHdr
*
p
;
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
p
->
pDirtyNext
)
{
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
p
->
pDirtyNext
)
{
p
->
flags
&=
~
PGHDR_NEED_SYNC
;
}
pCache
->
pSynced
=
pCache
->
pDirtyTail
;
...
...
@@ -598,15 +613,15 @@ void sqlite3PcacheClearSyncFlags(PCache *pCache) {
/*
** Change the page number of page p to newPgno.
*/
void
sqlite3PcacheMove
(
PgHdr
*
p
,
Pgno
newPgno
)
{
void
sqlite3PcacheMove
(
PgHdr
*
p
,
Pgno
newPgno
){
PCache
*
pCache
=
p
->
pCache
;
assert
(
p
->
nRef
>
0
);
assert
(
newPgno
>
0
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
pcacheTrace
((
"%p.MOVE %d -> %d
\n
"
,
pCache
,
p
->
pgno
,
newPgno
));
pcache2
.
xRekey
(
pCache
->
pCache
,
p
->
pPage
,
p
->
pgno
,
newPgno
);
assert
(
p
->
nRef
>
0
);
assert
(
newPgno
>
0
);
assert
(
sqlite3PcachePageSanity
(
p
)
);
pcacheTrace
((
"%p.MOVE %d -> %d
\n
"
,
pCache
,
p
->
pgno
,
newPgno
));
pcache2
.
xRekey
(
pCache
->
pCache
,
p
->
pPage
,
p
->
pgno
,
newPgno
);
p
->
pgno
=
newPgno
;
if
((
p
->
flags
&
PGHDR_DIRTY
)
&&
(
p
->
flags
&
PGHDR_NEED_SYNC
))
{
if
(
(
p
->
flags
&
PGHDR_DIRTY
)
&&
(
p
->
flags
&
PGHDR_NEED_SYNC
)
)
{
pcacheManageDirtyList
(
p
,
PCACHE_DIRTYLIST_FRONT
);
}
}
...
...
@@ -620,72 +635,74 @@ void sqlite3PcacheMove(PgHdr *p, Pgno newPgno) {
** function is 0, then the data area associated with page 1 is zeroed, but
** the page object is not dropped.
*/
void
sqlite3PcacheTruncate
(
PCache
*
pCache
,
Pgno
pgno
)
{
if
(
pCache
->
pCache
)
{
void
sqlite3PcacheTruncate
(
PCache
*
pCache
,
Pgno
pgno
){
if
(
pCache
->
pCache
)
{
PgHdr
*
p
;
PgHdr
*
pNext
;
pcacheTrace
((
"%p.TRUNCATE %d
\n
"
,
pCache
,
pgno
));
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
pNext
)
{
pcacheTrace
((
"%p.TRUNCATE %d
\n
"
,
pCache
,
pgno
));
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
pNext
)
{
pNext
=
p
->
pDirtyNext
;
/* This routine never gets call with a positive pgno except right
** after sqlite3PcacheCleanAll(). So if there are dirty pages,
** it must be that pgno==0.
*/
assert
(
p
->
pgno
>
0
);
if
(
p
->
pgno
>
pgno
)
{
assert
(
p
->
flags
&
PGHDR_DIRTY
);
assert
(
p
->
pgno
>
0
);
if
(
p
->
pgno
>
pgno
)
{
assert
(
p
->
flags
&
PGHDR_DIRTY
);
sqlite3PcacheMakeClean
(
p
);
}
}
if
(
pgno
==
0
&&
pCache
->
nRefSum
)
{
if
(
pgno
==
0
&&
pCache
->
nRefSum
)
{
sqlite3_pcache_page
*
pPage1
;
pPage1
=
pcache2
.
xFetch
(
pCache
->
pCache
,
1
,
0
);
if
(
ALWAYS
(
pPage1
))
{
/* Page 1 is always available in cache, because
pPage1
=
pcache2
.
xFetch
(
pCache
->
pCache
,
1
,
0
);
if
(
ALWAYS
(
pPage1
)
){
/* Page 1 is always available in cache, because
** pCache->nRefSum>0 */
memset
(
pPage1
->
pBuf
,
0
,
pCache
->
szPage
);
pgno
=
1
;
}
}
pcache2
.
xTruncate
(
pCache
->
pCache
,
pgno
+
1
);
pcache2
.
xTruncate
(
pCache
->
pCache
,
pgno
+
1
);
}
}
/*
** Close a cache.
*/
void
sqlite3PcacheClose
(
PCache
*
pCache
)
{
assert
(
pCache
->
pCache
!=
0
);
pcacheTrace
((
"%p.CLOSE
\n
"
,
pCache
));
void
sqlite3PcacheClose
(
PCache
*
pCache
){
assert
(
pCache
->
pCache
!=
0
);
pcacheTrace
((
"%p.CLOSE
\n
"
,
pCache
));
pcache2
.
xDestroy
(
pCache
->
pCache
);
}
/*
** Discard the contents of the cache.
*/
void
sqlite3PcacheClear
(
PCache
*
pCache
)
{
sqlite3PcacheTruncate
(
pCache
,
0
);
}
void
sqlite3PcacheClear
(
PCache
*
pCache
){
sqlite3PcacheTruncate
(
pCache
,
0
);
}
/*
** Merge two lists of pages connected by pDirty and in pgno order.
** Do not bother fixing the pDirtyPrev pointers.
*/
static
PgHdr
*
pcacheMergeDirtyList
(
PgHdr
*
pA
,
PgHdr
*
pB
)
{
static
PgHdr
*
pcacheMergeDirtyList
(
PgHdr
*
pA
,
PgHdr
*
pB
){
PgHdr
result
,
*
pTail
;
pTail
=
&
result
;
assert
(
pA
!=
0
&&
pB
!=
0
);
for
(;;)
{
if
(
pA
->
pgno
<
pB
->
pgno
)
{
assert
(
pA
!=
0
&&
pB
!=
0
);
for
(;;)
{
if
(
pA
->
pgno
<
pB
->
pgno
)
{
pTail
->
pDirty
=
pA
;
pTail
=
pA
;
pA
=
pA
->
pDirty
;
if
(
pA
==
0
)
{
if
(
pA
==
0
)
{
pTail
->
pDirty
=
pB
;
break
;
}
}
else
{
}
else
{
pTail
->
pDirty
=
pB
;
pTail
=
pB
;
pB
=
pB
->
pDirty
;
if
(
pB
==
0
)
{
if
(
pB
==
0
)
{
pTail
->
pDirty
=
pA
;
break
;
}
...
...
@@ -705,24 +722,24 @@ static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB) {
** ever changes to make the previous sentence incorrect.
*/
#define N_SORT_BUCKET 32
static
PgHdr
*
pcacheSortDirtyList
(
PgHdr
*
pIn
)
{
static
PgHdr
*
pcacheSortDirtyList
(
PgHdr
*
pIn
){
PgHdr
*
a
[
N_SORT_BUCKET
],
*
p
;
int
i
;
memset
(
a
,
0
,
sizeof
(
a
));
while
(
pIn
)
{
while
(
pIn
)
{
p
=
pIn
;
pIn
=
p
->
pDirty
;
p
->
pDirty
=
0
;
for
(
i
=
0
;
ALWAYS
(
i
<
N_SORT_BUCKET
-
1
);
i
++
)
{
if
(
a
[
i
]
==
0
)
{
for
(
i
=
0
;
ALWAYS
(
i
<
N_SORT_BUCKET
-
1
);
i
++
)
{
if
(
a
[
i
]
==
0
)
{
a
[
i
]
=
p
;
break
;
}
else
{
}
else
{
p
=
pcacheMergeDirtyList
(
a
[
i
],
p
);
a
[
i
]
=
0
;
}
}
if
(
NEVER
(
i
==
N_SORT_BUCKET
-
1
))
{
if
(
NEVER
(
i
==
N_SORT_BUCKET
-
1
)
)
{
/* To get here, there need to be 2^(N_SORT_BUCKET) elements in
** the input list. But that is impossible.
*/
...
...
@@ -730,8 +747,8 @@ static PgHdr *pcacheSortDirtyList(PgHdr *pIn) {
}
}
p
=
a
[
0
];
for
(
i
=
1
;
i
<
N_SORT_BUCKET
;
i
++
)
{
if
(
a
[
i
]
==
0
)
continue
;
for
(
i
=
1
;
i
<
N_SORT_BUCKET
;
i
++
)
{
if
(
a
[
i
]
==
0
)
continue
;
p
=
p
?
pcacheMergeDirtyList
(
p
,
a
[
i
])
:
a
[
i
];
}
return
p
;
...
...
@@ -740,9 +757,9 @@ static PgHdr *pcacheSortDirtyList(PgHdr *pIn) {
/*
** Return a list of all dirty pages in the cache, sorted by page number.
*/
PgHdr
*
sqlite3PcacheDirtyList
(
PCache
*
pCache
)
{
PgHdr
*
sqlite3PcacheDirtyList
(
PCache
*
pCache
){
PgHdr
*
p
;
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
p
->
pDirtyNext
)
{
for
(
p
=
pCache
->
pDirty
;
p
;
p
=
p
->
pDirtyNext
)
{
p
->
pDirty
=
p
->
pDirtyNext
;
}
return
pcacheSortDirtyList
(
pCache
->
pDirty
);
...
...
@@ -754,18 +771,22 @@ PgHdr *sqlite3PcacheDirtyList(PCache *pCache) {
** This is not the total number of pages referenced, but the sum of the
** reference count for all pages.
*/
int
sqlite3PcacheRefCount
(
PCache
*
pCache
)
{
return
pCache
->
nRefSum
;
}
int
sqlite3PcacheRefCount
(
PCache
*
pCache
){
return
pCache
->
nRefSum
;
}
/*
** Return the number of references to the page supplied as an argument.
*/
int
sqlite3PcachePageRefcount
(
PgHdr
*
p
)
{
return
p
->
nRef
;
}
int
sqlite3PcachePageRefcount
(
PgHdr
*
p
){
return
p
->
nRef
;
}
/*
** Return the total number of pages in the cache.
*/
int
sqlite3PcachePagecount
(
PCache
*
pCache
)
{
assert
(
pCache
->
pCache
!=
0
);
int
sqlite3PcachePagecount
(
PCache
*
pCache
){
assert
(
pCache
->
pCache
!=
0
);
return
pcache2
.
xPagecount
(
pCache
->
pCache
);
}
...
...
@@ -773,16 +794,19 @@ int sqlite3PcachePagecount(PCache *pCache) {
/*
** Get the suggested cache-size value.
*/
int
sqlite3PcacheGetCachesize
(
PCache
*
pCache
)
{
return
numberOfCachePages
(
pCache
);
}
int
sqlite3PcacheGetCachesize
(
PCache
*
pCache
){
return
numberOfCachePages
(
pCache
);
}
#endif
/*
** Set the suggested cache-size value.
*/
void
sqlite3PcacheSetCachesize
(
PCache
*
pCache
,
int
mxPage
)
{
assert
(
pCache
->
pCache
!=
0
);
void
sqlite3PcacheSetCachesize
(
PCache
*
pCache
,
int
mxPage
){
assert
(
pCache
->
pCache
!=
0
);
pCache
->
szCache
=
mxPage
;
pcache2
.
xCachesize
(
pCache
->
pCache
,
numberOfCachePages
(
pCache
));
pcache2
.
xCachesize
(
pCache
->
pCache
,
numberOfCachePages
(
pCache
));
}
/*
...
...
@@ -790,25 +814,25 @@ void sqlite3PcacheSetCachesize(PCache *pCache, int mxPage) {
** argument is zero. Return the effective cache-spill size, which will
** be the larger of the szSpill and szCache.
*/
int
sqlite3PcacheSetSpillsize
(
PCache
*
p
,
int
mxPage
)
{
int
sqlite3PcacheSetSpillsize
(
PCache
*
p
,
int
mxPage
){
int
res
;
assert
(
p
->
pCache
!=
0
);
if
(
mxPage
)
{
if
(
mxPage
<
0
)
{
mxPage
=
(
int
)((
-
1024
*
(
i64
)
mxPage
)
/
(
p
->
szPage
+
p
->
szExtra
));
assert
(
p
->
pCache
!=
0
);
if
(
mxPage
)
{
if
(
mxPage
<
0
)
{
mxPage
=
(
int
)((
-
1024
*
(
i64
)
mxPage
)
/
(
p
->
szPage
+
p
->
szExtra
));
}
p
->
szSpill
=
mxPage
;
}
res
=
numberOfCachePages
(
p
);
if
(
res
<
p
->
szSpill
)
res
=
p
->
szSpill
;
if
(
res
<
p
->
szSpill
)
res
=
p
->
szSpill
;
return
res
;
}
/*
** Free up as much memory as possible from the page cache.
*/
void
sqlite3PcacheShrink
(
PCache
*
pCache
)
{
assert
(
pCache
->
pCache
!=
0
);
void
sqlite3PcacheShrink
(
PCache
*
pCache
){
assert
(
pCache
->
pCache
!=
0
);
pcache2
.
xShrink
(
pCache
->
pCache
);
}
...
...
@@ -816,17 +840,17 @@ void sqlite3PcacheShrink(PCache *pCache) {
** Return the size of the header added by this middleware layer
** in the page-cache hierarchy.
*/
int
sqlite3HeaderSizePcache
(
void
)
{
return
ROUND8
(
sizeof
(
PgHdr
));
}
int
sqlite3HeaderSizePcache
(
void
){
return
ROUND8
(
sizeof
(
PgHdr
));
}
/*
** Return the number of dirty pages currently in the cache, as a percentage
** of the configured cache size.
*/
int
sqlite3PCachePercentDirty
(
PCache
*
pCache
)
{
int
sqlite3PCachePercentDirty
(
PCache
*
pCache
){
PgHdr
*
pDirty
;
int
nDirty
=
0
;
int
nCache
=
numberOfCachePages
(
pCache
);
for
(
pDirty
=
pCache
->
pDirty
;
pDirty
;
pDirty
=
pDirty
->
pDirtyNext
)
nDirty
++
;
for
(
pDirty
=
pCache
->
pDirty
;
pDirty
;
pDirty
=
pDirty
->
pDirtyNext
)
nDirty
++
;
return
nCache
?
(
int
)(((
i64
)
nDirty
*
100
)
/
nCache
)
:
0
;
}
...
...
@@ -834,7 +858,9 @@ int sqlite3PCachePercentDirty(PCache *pCache) {
/*
** Return true if there are one or more dirty pages in the cache. Else false.
*/
int
sqlite3PCacheIsDirty
(
PCache
*
pCache
)
{
return
(
pCache
->
pDirty
!=
0
);
}
int
sqlite3PCacheIsDirty
(
PCache
*
pCache
){
return
(
pCache
->
pDirty
!=
0
);
}
#endif
#if defined(SQLITE_CHECK_PAGES) || defined(SQLITE_DEBUG)
...
...
@@ -843,9 +869,9 @@ int sqlite3PCacheIsDirty(PCache *pCache) { return (pCache->pDirty != 0); }
** callback. This is only used if the SQLITE_CHECK_PAGES macro is
** defined.
*/
void
sqlite3PcacheIterateDirty
(
PCache
*
pCache
,
void
(
*
xIter
)(
PgHdr
*
))
{
void
sqlite3PcacheIterateDirty
(
PCache
*
pCache
,
void
(
*
xIter
)(
PgHdr
*
)){
PgHdr
*
pDirty
;
for
(
pDirty
=
pCache
->
pDirty
;
pDirty
;
pDirty
=
pDirty
->
pDirtyNext
)
{
for
(
pDirty
=
pCache
->
pDirty
;
pDirty
;
pDirty
=
pDirty
->
pDirtyNext
)
{
xIter
(
pDirty
);
}
}
...
...
source/libs/tdb/src/sqlite/pcache1.c
浏览文件 @
7184778c
...
...
@@ -108,10 +108,10 @@ struct PgHdr1 {
unsigned
int
iKey
;
/* Key value (page number) */
u16
isBulkLocal
;
/* This page from bulk local storage */
u16
isAnchor
;
/* This is the PGroup.lru element */
PgHdr1
*
pNext
;
/* Next in hash table chain */
PCache1
*
pCache
;
/* Cache that currently owns this page */
PgHdr1
*
pLruNext
;
/* Next in LRU list of unpinned pages */
PgHdr1
*
pLruPrev
;
/* Previous in LRU list of unpinned pages */
PgHdr1
*
pNext
;
/* Next in hash table chain */
PCache1
*
pCache
;
/* Cache that currently owns this page */
PgHdr1
*
pLruNext
;
/* Next in LRU list of unpinned pages */
PgHdr1
*
pLruPrev
;
/* Previous in LRU list of unpinned pages */
/* NB: pLruPrev is only valid if pLruNext!=0 */
};
...
...
@@ -119,8 +119,8 @@ struct PgHdr1 {
** A page is pinned if it is not on the LRU list. To be "pinned" means
** that the page is in active use and must not be deallocated.
*/
#define PAGE_IS_PINNED(p)
((p)->pLruNext==
0)
#define PAGE_IS_UNPINNED(p)
((p)->pLruNext!=
0)
#define PAGE_IS_PINNED(p)
((p)->pLruNext ==
0)
#define PAGE_IS_UNPINNED(p)
((p)->pLruNext !=
0)
/* Each page cache (or PCache) belongs to a PGroup. A PGroup is a set
** of one or more PCaches that are able to recycle each other's unpinned
...
...
@@ -145,7 +145,7 @@ struct PgHdr1 {
** SQLITE_MUTEX_STATIC_LRU.
*/
struct
PGroup
{
sqlite3_mutex
*
mutex
;
/* MUTEX_STATIC_LRU or NULL */
pthread_mutex_t
mutex
;
/* MUTEX_STATIC_LRU or NULL */
unsigned
int
nMaxPage
;
/* Sum of nMax for purgeable caches */
unsigned
int
nMinPage
;
/* Sum of nMin for purgeable caches */
unsigned
int
mxPinned
;
/* nMaxpage + 10 - nMinPage */
...
...
@@ -168,7 +168,7 @@ struct PCache1 {
** modified at any time by a call to the pcache1Cachesize() method.
** The PGroup mutex must be held when accessing nMax.
*/
PGroup
*
pGroup
;
/* PGroup this cache belongs to */
PGroup
*
pGroup
;
/* PGroup this cache belongs to */
unsigned
int
*
pnPurgeable
;
/* Pointer to pGroup->nPurgeable */
int
szPage
;
/* Size of database content section */
int
szExtra
;
/* sizeof(MemPage)+sizeof(PgHdr) */
...
...
@@ -186,9 +186,9 @@ struct PCache1 {
unsigned
int
nRecyclable
;
/* Number of pages in the LRU list */
unsigned
int
nPage
;
/* Total number of pages in apHash */
unsigned
int
nHash
;
/* Number of slots in apHash[] */
PgHdr1
**
apHash
;
/* Hash table for fast lookup by key */
PgHdr1
*
pFree
;
/* List of unused pcache-local pages */
void
*
pBulk
;
/* Bulk memory used by pcache-local */
PgHdr1
**
apHash
;
/* Hash table for fast lookup by key */
PgHdr1
*
pFree
;
/* List of unused pcache-local pages */
void
*
pBulk
;
/* Bulk memory used by pcache-local */
};
/*
...
...
@@ -199,22 +199,6 @@ struct PgFreeslot {
PgFreeslot
*
pNext
;
/* Next free slot */
};
sqlite3_pcache_methods2
pcache2
=
{
1
,
/* iVersion */
0
,
/* pArg */
pcache1Init
,
/* xInit */
pcache1Shutdown
,
/* xShutdown */
pcache1Create
,
/* xCreate */
pcache1Cachesize
,
/* xCachesize */
pcache1Pagecount
,
/* xPagecount */
pcache1Fetch
,
/* xFetch */
pcache1Unpin
,
/* xUnpin */
pcache1Rekey
,
/* xRekey */
pcache1Truncate
,
/* xTruncate */
pcache1Destroy
,
/* xDestroy */
pcache1Shrink
/* xShrink */
};
/*
** Global data used by this cache.
*/
...
...
@@ -234,8 +218,8 @@ static struct PCacheGlobal {
int
nReserve
;
/* Try to keep nFreeSlot above this */
void
*
pStart
,
*
pEnd
;
/* Bounds of global page cache memory */
/* Above requires no mutex. Use mutex below for variable that follow. */
sqlite3_mutex
*
mutex
;
/* Mutex for accessing the following: */
PgFreeslot
*
pFree
;
/* Free page blocks */
pthread_mutex_t
mutex
;
/* Mutex for accessing the following: */
PgFreeslot
*
pFree
;
/* Free page blocks */
int
nFreeSlot
;
/* Number of unused pcache slots */
/* The following value requires a mutex to change. We skip the mutex on
** reading because (1) most platforms read a 32-bit integer atomically and
...
...
@@ -247,20 +231,19 @@ static struct PCacheGlobal {
/*
** Macros to enter and leave the PCache LRU mutex.
*/
#if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE
==
0
#
define pcache1EnterMutex(X) assert((X)->mutex==
0)
#
define pcache1LeaveMutex(X) assert((X)->mutex==
0)
#
define PCACHE1_MIGHT_USE_GROUP_MUTEX 0
#if !defined(SQLITE_ENABLE_MEMORY_MANAGEMENT) || SQLITE_THREADSAFE
==
0
#
define pcache1EnterMutex(X) assert((X)->mutex ==
0)
#
define pcache1LeaveMutex(X) assert((X)->mutex ==
0)
#define PCACHE1_MIGHT_USE_GROUP_MUTEX 0
#else
#
define pcache1EnterMutex(X)
sqlite3_mutex_enter((X)->mutex)
#
define pcache1LeaveMutex(X)
sqlite3_mutex_leave((X)->mutex)
#
define PCACHE1_MIGHT_USE_GROUP_MUTEX 1
#
define pcache1EnterMutex(X)
sqlite3_mutex_enter((X)->mutex)
#
define pcache1LeaveMutex(X)
sqlite3_mutex_leave((X)->mutex)
#define PCACHE1_MIGHT_USE_GROUP_MUTEX 1
#endif
/******************************************************************************/
/******** Page Allocation/SQLITE_CONFIG_PCACHE Related Functions **************/
/*
** This function is called during initialization if a static buffer is
** supplied to use for the page-cache by passing the SQLITE_CONFIG_PAGECACHE
...
...
@@ -270,23 +253,23 @@ static struct PCacheGlobal {
** This routine is called from sqlite3_initialize() and so it is guaranteed
** to be serialized already. There is no need for further mutexing.
*/
void
sqlite3PCacheBufferSetup
(
void
*
pBuf
,
int
sz
,
int
n
){
if
(
pcache1
.
isInit
)
{
void
sqlite3PCacheBufferSetup
(
void
*
pBuf
,
int
sz
,
int
n
)
{
if
(
pcache1
.
isInit
)
{
PgFreeslot
*
p
;
if
(
pBuf
==
0
)
sz
=
n
=
0
;
if
(
n
==
0
)
sz
=
0
;
if
(
pBuf
==
0
)
sz
=
n
=
0
;
if
(
n
==
0
)
sz
=
0
;
sz
=
ROUNDDOWN8
(
sz
);
pcache1
.
szSlot
=
sz
;
pcache1
.
nSlot
=
pcache1
.
nFreeSlot
=
n
;
pcache1
.
nReserve
=
n
>
90
?
10
:
(
n
/
10
+
1
);
pcache1
.
nReserve
=
n
>
90
?
10
:
(
n
/
10
+
1
);
pcache1
.
pStart
=
pBuf
;
pcache1
.
pFree
=
0
;
pcache1
.
bUnderPressure
=
0
;
while
(
n
--
)
{
p
=
(
PgFreeslot
*
)
pBuf
;
while
(
n
--
)
{
p
=
(
PgFreeslot
*
)
pBuf
;
p
->
pNext
=
pcache1
.
pFree
;
pcache1
.
pFree
=
p
;
pBuf
=
(
void
*
)
&
((
char
*
)
pBuf
)[
sz
];
pBuf
=
(
void
*
)
&
((
char
*
)
pBuf
)[
sz
];
}
pcache1
.
pEnd
=
pBuf
;
}
...
...
@@ -296,27 +279,27 @@ void sqlite3PCacheBufferSetup(void *pBuf, int sz, int n){
** Try to initialize the pCache->pFree and pCache->pBulk fields. Return
** true if pCache->pFree ends up containing one or more free pages.
*/
static
int
pcache1InitBulk
(
PCache1
*
pCache
){
static
int
pcache1InitBulk
(
PCache1
*
pCache
)
{
i64
szBulk
;
char
*
zBulk
;
if
(
pcache1
.
nInitPage
==
0
)
return
0
;
if
(
pcache1
.
nInitPage
==
0
)
return
0
;
/* Do not bother with a bulk allocation if the cache size very small */
if
(
pCache
->
nMax
<
3
)
return
0
;
if
(
pCache
->
nMax
<
3
)
return
0
;
sqlite3BeginBenignMalloc
();
if
(
pcache1
.
nInitPage
>
0
)
{
if
(
pcache1
.
nInitPage
>
0
)
{
szBulk
=
pCache
->
szAlloc
*
(
i64
)
pcache1
.
nInitPage
;
}
else
{
}
else
{
szBulk
=
-
1024
*
(
i64
)
pcache1
.
nInitPage
;
}
if
(
szBulk
>
pCache
->
szAlloc
*
(
i64
)
pCache
->
nMax
)
{
szBulk
=
pCache
->
szAlloc
*
(
i64
)
pCache
->
nMax
;
if
(
szBulk
>
pCache
->
szAlloc
*
(
i64
)
pCache
->
nMax
)
{
szBulk
=
pCache
->
szAlloc
*
(
i64
)
pCache
->
nMax
;
}
zBulk
=
pCache
->
pBulk
=
sqlite3Malloc
(
szBulk
);
zBulk
=
pCache
->
pBulk
=
sqlite3Malloc
(
szBulk
);
sqlite3EndBenignMalloc
();
if
(
zBulk
)
{
int
nBulk
=
sqlite3MallocSize
(
zBulk
)
/
pCache
->
szAlloc
;
do
{
PgHdr1
*
pX
=
(
PgHdr1
*
)
&
zBulk
[
pCache
->
szPage
];
if
(
zBulk
)
{
int
nBulk
=
sqlite3MallocSize
(
zBulk
)
/
pCache
->
szAlloc
;
do
{
PgHdr1
*
pX
=
(
PgHdr1
*
)
&
zBulk
[
pCache
->
szPage
];
pX
->
page
.
pBuf
=
zBulk
;
pX
->
page
.
pExtra
=
&
pX
[
1
];
pX
->
isBulkLocal
=
1
;
...
...
@@ -325,9 +308,9 @@ static int pcache1InitBulk(PCache1 *pCache){
pX
->
pLruPrev
=
0
;
/* Initializing this saves a valgrind error */
pCache
->
pFree
=
pX
;
zBulk
+=
pCache
->
szAlloc
;
}
while
(
--
nBulk
);
}
while
(
--
nBulk
);
}
return
pCache
->
pFree
!=
0
;
return
pCache
->
pFree
!=
0
;
}
/*
...
...
@@ -339,29 +322,29 @@ static int pcache1InitBulk(PCache1 *pCache){
** Multiple threads can run this routine at the same time. Global variables
** in pcache1 need to be protected via mutex.
*/
static
void
*
pcache1Alloc
(
int
nByte
){
static
void
*
pcache1Alloc
(
int
nByte
)
{
void
*
p
=
0
;
assert
(
sqlite3_mutex_notheld
(
pcache1
.
grp
.
mutex
)
);
if
(
nByte
<=
pcache1
.
szSlot
)
{
assert
(
sqlite3_mutex_notheld
(
pcache1
.
grp
.
mutex
)
);
if
(
nByte
<=
pcache1
.
szSlot
)
{
sqlite3_mutex_enter
(
pcache1
.
mutex
);
p
=
(
PgHdr1
*
)
pcache1
.
pFree
;
if
(
p
)
{
if
(
p
)
{
pcache1
.
pFree
=
pcache1
.
pFree
->
pNext
;
pcache1
.
nFreeSlot
--
;
pcache1
.
bUnderPressure
=
pcache1
.
nFreeSlot
<
pcache1
.
nReserve
;
assert
(
pcache1
.
nFreeSlot
>=
0
);
pcache1
.
bUnderPressure
=
pcache1
.
nFreeSlot
<
pcache1
.
nReserve
;
assert
(
pcache1
.
nFreeSlot
>=
0
);
sqlite3StatusHighwater
(
SQLITE_STATUS_PAGECACHE_SIZE
,
nByte
);
sqlite3StatusUp
(
SQLITE_STATUS_PAGECACHE_USED
,
1
);
}
sqlite3_mutex_leave
(
pcache1
.
mutex
);
}
if
(
p
==
0
)
{
if
(
p
==
0
)
{
/* Memory is not available in the SQLITE_CONFIG_PAGECACHE pool. Get
** it from sqlite3Malloc instead.
*/
p
=
sqlite3Malloc
(
nByte
);
#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
if
(
p
)
{
if
(
p
)
{
int
sz
=
sqlite3MallocSize
(
p
);
sqlite3_mutex_enter
(
pcache1
.
mutex
);
sqlite3StatusHighwater
(
SQLITE_STATUS_PAGECACHE_SIZE
,
nByte
);
...
...
@@ -377,21 +360,21 @@ static void *pcache1Alloc(int nByte){
/*
** Free an allocated buffer obtained from pcache1Alloc().
*/
static
void
pcache1Free
(
void
*
p
){
if
(
p
==
0
)
return
;
if
(
SQLITE_WITHIN
(
p
,
pcache1
.
pStart
,
pcache1
.
pEnd
)
)
{
static
void
pcache1Free
(
void
*
p
)
{
if
(
p
==
0
)
return
;
if
(
SQLITE_WITHIN
(
p
,
pcache1
.
pStart
,
pcache1
.
pEnd
))
{
PgFreeslot
*
pSlot
;
sqlite3_mutex_enter
(
pcache1
.
mutex
);
sqlite3StatusDown
(
SQLITE_STATUS_PAGECACHE_USED
,
1
);
pSlot
=
(
PgFreeslot
*
)
p
;
pSlot
=
(
PgFreeslot
*
)
p
;
pSlot
->
pNext
=
pcache1
.
pFree
;
pcache1
.
pFree
=
pSlot
;
pcache1
.
nFreeSlot
++
;
pcache1
.
bUnderPressure
=
pcache1
.
nFreeSlot
<
pcache1
.
nReserve
;
assert
(
pcache1
.
nFreeSlot
<=
pcache1
.
nSlot
);
pcache1
.
bUnderPressure
=
pcache1
.
nFreeSlot
<
pcache1
.
nReserve
;
assert
(
pcache1
.
nFreeSlot
<=
pcache1
.
nSlot
);
sqlite3_mutex_leave
(
pcache1
.
mutex
);
}
else
{
assert
(
sqlite3MemdebugHasType
(
p
,
MEMTYPE_PCACHE
)
);
}
else
{
assert
(
sqlite3MemdebugHasType
(
p
,
MEMTYPE_PCACHE
)
);
sqlite3MemdebugSetType
(
p
,
MEMTYPE_HEAP
);
#ifndef SQLITE_DISABLE_PAGECACHE_OVERFLOW_STATS
{
...
...
@@ -410,12 +393,12 @@ static void pcache1Free(void *p){
/*
** Return the size of a pcache allocation
*/
static
int
pcache1MemSize
(
void
*
p
){
if
(
p
>=
pcache1
.
pStart
&&
p
<
pcache1
.
pEnd
)
{
static
int
pcache1MemSize
(
void
*
p
)
{
if
(
p
>=
pcache1
.
pStart
&&
p
<
pcache1
.
pEnd
)
{
return
pcache1
.
szSlot
;
}
else
{
}
else
{
int
iSize
;
assert
(
sqlite3MemdebugHasType
(
p
,
MEMTYPE_PCACHE
)
);
assert
(
sqlite3MemdebugHasType
(
p
,
MEMTYPE_PCACHE
)
);
sqlite3MemdebugSetType
(
p
,
MEMTYPE_HEAP
);
iSize
=
sqlite3MallocSize
(
p
);
sqlite3MemdebugSetType
(
p
,
MEMTYPE_PCACHE
);
...
...
@@ -427,30 +410,32 @@ static int pcache1MemSize(void *p){
/*
** Allocate a new page object initially associated with cache pCache.
*/
static
PgHdr1
*
pcache1AllocPage
(
PCache1
*
pCache
,
int
benignMalloc
){
static
PgHdr1
*
pcache1AllocPage
(
PCache1
*
pCache
,
int
benignMalloc
)
{
PgHdr1
*
p
=
0
;
void
*
pPg
;
void
*
pPg
;
assert
(
sqlite3_mutex_held
(
pCache
->
pGroup
->
mutex
)
);
if
(
pCache
->
pFree
||
(
pCache
->
nPage
==
0
&&
pcache1InitBulk
(
pCache
))
)
{
assert
(
pCache
->
pFree
!=
0
);
assert
(
sqlite3_mutex_held
(
pCache
->
pGroup
->
mutex
)
);
if
(
pCache
->
pFree
||
(
pCache
->
nPage
==
0
&&
pcache1InitBulk
(
pCache
)))
{
assert
(
pCache
->
pFree
!=
0
);
p
=
pCache
->
pFree
;
pCache
->
pFree
=
p
->
pNext
;
p
->
pNext
=
0
;
}
else
{
}
else
{
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
/* The group mutex must be released before pcache1Alloc() is called. This
** is because it might call sqlite3_release_memory(), which assumes that
** this mutex is not held. */
assert
(
pcache1
.
separateCache
==
0
);
assert
(
pCache
->
pGroup
==&
pcache1
.
grp
);
assert
(
pcache1
.
separateCache
==
0
);
assert
(
pCache
->
pGroup
==
&
pcache1
.
grp
);
pcache1LeaveMutex
(
pCache
->
pGroup
);
#endif
if
(
benignMalloc
){
sqlite3BeginBenignMalloc
();
}
if
(
benignMalloc
)
{
sqlite3BeginBenignMalloc
();
}
#ifdef SQLITE_PCACHE_SEPARATE_HEADER
pPg
=
pcache1Alloc
(
pCache
->
szPage
);
p
=
sqlite3Malloc
(
sizeof
(
PgHdr1
)
+
pCache
->
szExtra
);
if
(
!
pPg
||
!
p
)
{
if
(
!
pPg
||
!
p
)
{
pcache1Free
(
pPg
);
sqlite3_free
(
p
);
pPg
=
0
;
...
...
@@ -458,11 +443,13 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){
#else
pPg
=
pcache1Alloc
(
pCache
->
szAlloc
);
#endif
if
(
benignMalloc
){
sqlite3EndBenignMalloc
();
}
if
(
benignMalloc
)
{
sqlite3EndBenignMalloc
();
}
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
pcache1EnterMutex
(
pCache
->
pGroup
);
#endif
if
(
pPg
==
0
)
return
0
;
if
(
pPg
==
0
)
return
0
;
#ifndef SQLITE_PCACHE_SEPARATE_HEADER
p
=
(
PgHdr1
*
)
&
((
u8
*
)
pPg
)[
pCache
->
szPage
];
#endif
...
...
@@ -479,15 +466,15 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){
/*
** Free a page object allocated by pcache1AllocPage().
*/
static
void
pcache1FreePage
(
PgHdr1
*
p
){
static
void
pcache1FreePage
(
PgHdr1
*
p
)
{
PCache1
*
pCache
;
assert
(
p
!=
0
);
assert
(
p
!=
0
);
pCache
=
p
->
pCache
;
assert
(
sqlite3_mutex_held
(
p
->
pCache
->
pGroup
->
mutex
)
);
if
(
p
->
isBulkLocal
)
{
assert
(
sqlite3_mutex_held
(
p
->
pCache
->
pGroup
->
mutex
)
);
if
(
p
->
isBulkLocal
)
{
p
->
pNext
=
pCache
->
pFree
;
pCache
->
pFree
=
p
;
}
else
{
}
else
{
pcache1Free
(
p
->
page
.
pBuf
);
#ifdef SQLITE_PCACHE_SEPARATE_HEADER
sqlite3_free
(
p
);
...
...
@@ -501,18 +488,15 @@ static void pcache1FreePage(PgHdr1 *p){
** using sqlite3_config(SQLITE_CONFIG_PAGECACHE) option. If no such buffer
** exists, this function falls back to sqlite3Malloc().
*/
void
*
sqlite3PageMalloc
(
int
sz
){
assert
(
sz
<=
65536
+
8
);
/* These allocations are never very large */
void
*
sqlite3PageMalloc
(
int
sz
)
{
assert
(
sz
<=
65536
+
8
);
/* These allocations are never very large */
return
pcache1Alloc
(
sz
);
}
/*
** Free an allocated buffer obtained from sqlite3PageMalloc().
*/
void
sqlite3PageFree
(
void
*
p
){
pcache1Free
(
p
);
}
void
sqlite3PageFree
(
void
*
p
)
{
pcache1Free
(
p
);
}
/*
** Return true if it desirable to avoid allocating a new page cache
...
...
@@ -530,10 +514,10 @@ void sqlite3PageFree(void *p){
** allocating a new page cache entry in order to avoid stressing
** the heap even further.
*/
static
int
pcache1UnderMemoryPressure
(
PCache1
*
pCache
){
if
(
pcache1
.
nSlot
&&
(
pCache
->
szPage
+
pCache
->
szExtra
)
<=
pcache1
.
szSlot
)
{
static
int
pcache1UnderMemoryPressure
(
PCache1
*
pCache
)
{
if
(
pcache1
.
nSlot
&&
(
pCache
->
szPage
+
pCache
->
szExtra
)
<=
pcache1
.
szSlot
)
{
return
pcache1
.
bUnderPressure
;
}
else
{
}
else
{
return
sqlite3HeapNearlyFull
();
}
}
...
...
@@ -547,28 +531,32 @@ static int pcache1UnderMemoryPressure(PCache1 *pCache){
**
** The PCache mutex must be held when this function is called.
*/
static
void
pcache1ResizeHash
(
PCache1
*
p
){
PgHdr1
**
apNew
;
static
void
pcache1ResizeHash
(
PCache1
*
p
)
{
PgHdr1
**
apNew
;
unsigned
int
nNew
;
unsigned
int
i
;
assert
(
sqlite3_mutex_held
(
p
->
pGroup
->
mutex
)
);
assert
(
sqlite3_mutex_held
(
p
->
pGroup
->
mutex
)
);
nNew
=
p
->
nHash
*
2
;
if
(
nNew
<
256
)
{
nNew
=
p
->
nHash
*
2
;
if
(
nNew
<
256
)
{
nNew
=
256
;
}
pcache1LeaveMutex
(
p
->
pGroup
);
if
(
p
->
nHash
){
sqlite3BeginBenignMalloc
();
}
apNew
=
(
PgHdr1
**
)
sqlite3MallocZero
(
sizeof
(
PgHdr1
*
)
*
nNew
);
if
(
p
->
nHash
){
sqlite3EndBenignMalloc
();
}
if
(
p
->
nHash
)
{
sqlite3BeginBenignMalloc
();
}
apNew
=
(
PgHdr1
**
)
sqlite3MallocZero
(
sizeof
(
PgHdr1
*
)
*
nNew
);
if
(
p
->
nHash
)
{
sqlite3EndBenignMalloc
();
}
pcache1EnterMutex
(
p
->
pGroup
);
if
(
apNew
)
{
for
(
i
=
0
;
i
<
p
->
nHash
;
i
++
)
{
if
(
apNew
)
{
for
(
i
=
0
;
i
<
p
->
nHash
;
i
++
)
{
PgHdr1
*
pPage
;
PgHdr1
*
pNext
=
p
->
apHash
[
i
];
while
(
(
pPage
=
pNext
)
!=
0
)
{
while
((
pPage
=
pNext
)
!=
0
)
{
unsigned
int
h
=
pPage
->
iKey
%
nNew
;
pNext
=
pPage
->
pNext
;
pPage
->
pNext
=
apNew
[
h
];
...
...
@@ -588,24 +576,23 @@ static void pcache1ResizeHash(PCache1 *p){
**
** The PGroup mutex must be held when this function is called.
*/
static
PgHdr1
*
pcache1PinPage
(
PgHdr1
*
pPage
){
assert
(
pPage
!=
0
);
assert
(
PAGE_IS_UNPINNED
(
pPage
)
);
assert
(
pPage
->
pLruNext
);
assert
(
pPage
->
pLruPrev
);
assert
(
sqlite3_mutex_held
(
pPage
->
pCache
->
pGroup
->
mutex
)
);
static
PgHdr1
*
pcache1PinPage
(
PgHdr1
*
pPage
)
{
assert
(
pPage
!=
0
);
assert
(
PAGE_IS_UNPINNED
(
pPage
)
);
assert
(
pPage
->
pLruNext
);
assert
(
pPage
->
pLruPrev
);
assert
(
sqlite3_mutex_held
(
pPage
->
pCache
->
pGroup
->
mutex
)
);
pPage
->
pLruPrev
->
pLruNext
=
pPage
->
pLruNext
;
pPage
->
pLruNext
->
pLruPrev
=
pPage
->
pLruPrev
;
pPage
->
pLruNext
=
0
;
/* pPage->pLruPrev = 0;
** No need to clear pLruPrev as it is never accessed if pLruNext is 0 */
assert
(
pPage
->
isAnchor
==
0
);
assert
(
pPage
->
pCache
->
pGroup
->
lru
.
isAnchor
==
1
);
assert
(
pPage
->
isAnchor
==
0
);
assert
(
pPage
->
pCache
->
pGroup
->
lru
.
isAnchor
==
1
);
pPage
->
pCache
->
nRecyclable
--
;
return
pPage
;
}
/*
** Remove the page supplied as an argument from the hash table
** (PCache1.apHash structure) that it is currently stored in.
...
...
@@ -613,37 +600,36 @@ static PgHdr1 *pcache1PinPage(PgHdr1 *pPage){
**
** The PGroup mutex must be held when this function is called.
*/
static
void
pcache1RemoveFromHash
(
PgHdr1
*
pPage
,
int
freeFlag
){
static
void
pcache1RemoveFromHash
(
PgHdr1
*
pPage
,
int
freeFlag
)
{
unsigned
int
h
;
PCache1
*
pCache
=
pPage
->
pCache
;
PgHdr1
**
pp
;
PCache1
*
pCache
=
pPage
->
pCache
;
PgHdr1
**
pp
;
assert
(
sqlite3_mutex_held
(
pCache
->
pGroup
->
mutex
)
);
assert
(
sqlite3_mutex_held
(
pCache
->
pGroup
->
mutex
)
);
h
=
pPage
->
iKey
%
pCache
->
nHash
;
for
(
pp
=&
pCache
->
apHash
[
h
];
(
*
pp
)
!=
pPage
;
pp
=&
(
*
pp
)
->
pNext
);
for
(
pp
=
&
pCache
->
apHash
[
h
];
(
*
pp
)
!=
pPage
;
pp
=
&
(
*
pp
)
->
pNext
)
;
*
pp
=
(
*
pp
)
->
pNext
;
pCache
->
nPage
--
;
if
(
freeFlag
)
pcache1FreePage
(
pPage
);
if
(
freeFlag
)
pcache1FreePage
(
pPage
);
}
/*
** If there are currently more than nMaxPage pages allocated, try
** to recycle pages to reduce the number allocated to nMaxPage.
*/
static
void
pcache1EnforceMaxPage
(
PCache1
*
pCache
){
static
void
pcache1EnforceMaxPage
(
PCache1
*
pCache
)
{
PGroup
*
pGroup
=
pCache
->
pGroup
;
PgHdr1
*
p
;
assert
(
sqlite3_mutex_held
(
pGroup
->
mutex
)
);
while
(
pGroup
->
nPurgeable
>
pGroup
->
nMaxPage
&&
(
p
=
pGroup
->
lru
.
pLruPrev
)
->
isAnchor
==
0
){
assert
(
p
->
pCache
->
pGroup
==
pGroup
);
assert
(
PAGE_IS_UNPINNED
(
p
)
);
assert
(
sqlite3_mutex_held
(
pGroup
->
mutex
));
while
(
pGroup
->
nPurgeable
>
pGroup
->
nMaxPage
&&
(
p
=
pGroup
->
lru
.
pLruPrev
)
->
isAnchor
==
0
)
{
assert
(
p
->
pCache
->
pGroup
==
pGroup
);
assert
(
PAGE_IS_UNPINNED
(
p
));
pcache1PinPage
(
p
);
pcache1RemoveFromHash
(
p
,
1
);
}
if
(
pCache
->
nPage
==
0
&&
pCache
->
pBulk
)
{
if
(
pCache
->
nPage
==
0
&&
pCache
->
pBulk
)
{
sqlite3_free
(
pCache
->
pBulk
);
pCache
->
pBulk
=
pCache
->
pFree
=
0
;
}
...
...
@@ -656,49 +642,48 @@ static void pcache1EnforceMaxPage(PCache1 *pCache){
**
** The PCache mutex must be held when this function is called.
*/
static
void
pcache1TruncateUnsafe
(
PCache1
*
pCache
,
/* The cache to truncate */
static
void
pcache1TruncateUnsafe
(
PCache1
*
pCache
,
/* The cache to truncate */
unsigned
int
iLimit
/* Drop pages with this pgno or larger */
){
TESTONLY
(
int
nPage
=
0
;
)
/* To assert pCache->nPage is correct */
)
{
TESTONLY
(
int
nPage
=
0
;)
/* To assert pCache->nPage is correct */
unsigned
int
h
,
iStop
;
assert
(
sqlite3_mutex_held
(
pCache
->
pGroup
->
mutex
)
);
assert
(
pCache
->
iMaxKey
>=
iLimit
);
assert
(
pCache
->
nHash
>
0
);
if
(
pCache
->
iMaxKey
-
iLimit
<
pCache
->
nHash
)
{
assert
(
sqlite3_mutex_held
(
pCache
->
pGroup
->
mutex
)
);
assert
(
pCache
->
iMaxKey
>=
iLimit
);
assert
(
pCache
->
nHash
>
0
);
if
(
pCache
->
iMaxKey
-
iLimit
<
pCache
->
nHash
)
{
/* If we are just shaving the last few pages off the end of the
** cache, then there is no point in scanning the entire hash table.
** Only scan those hash slots that might contain pages that need to
** be removed. */
h
=
iLimit
%
pCache
->
nHash
;
iStop
=
pCache
->
iMaxKey
%
pCache
->
nHash
;
TESTONLY
(
nPage
=
-
10
;
)
/* Disable the pCache->nPage validity check */
}
else
{
TESTONLY
(
nPage
=
-
10
;)
/* Disable the pCache->nPage validity check */
}
else
{
/* This is the general case where many pages are being removed.
** It is necessary to scan the entire hash table */
h
=
pCache
->
nHash
/
2
;
h
=
pCache
->
nHash
/
2
;
iStop
=
h
-
1
;
}
for
(;;)
{
for
(;;)
{
PgHdr1
**
pp
;
PgHdr1
*
pPage
;
assert
(
h
<
pCache
->
nHash
);
PgHdr1
*
pPage
;
assert
(
h
<
pCache
->
nHash
);
pp
=
&
pCache
->
apHash
[
h
];
while
(
(
pPage
=
*
pp
)
!=
0
)
{
if
(
pPage
->
iKey
>=
iLimit
)
{
while
((
pPage
=
*
pp
)
!=
0
)
{
if
(
pPage
->
iKey
>=
iLimit
)
{
pCache
->
nPage
--
;
*
pp
=
pPage
->
pNext
;
if
(
PAGE_IS_UNPINNED
(
pPage
)
)
pcache1PinPage
(
pPage
);
if
(
PAGE_IS_UNPINNED
(
pPage
)
)
pcache1PinPage
(
pPage
);
pcache1FreePage
(
pPage
);
}
else
{
}
else
{
pp
=
&
pPage
->
pNext
;
TESTONLY
(
if
(
nPage
>=
0
)
nPage
++
;
)
TESTONLY
(
if
(
nPage
>=
0
)
nPage
++
;
)
}
}
if
(
h
==
iStop
)
break
;
h
=
(
h
+
1
)
%
pCache
->
nHash
;
if
(
h
==
iStop
)
break
;
h
=
(
h
+
1
)
%
pCache
->
nHash
;
}
assert
(
nPage
<
0
||
pCache
->
nPage
==
(
unsigned
)
nPage
);
assert
(
nPage
<
0
||
pCache
->
nPage
==
(
unsigned
)
nPage
);
}
/******************************************************************************/
...
...
@@ -707,52 +692,45 @@ static void pcache1TruncateUnsafe(
/*
** Implementation of the sqlite3_pcache.xInit method.
*/
static
int
pcache1Init
(
void
*
NotUsed
){
UNUSED_PARAMETER
(
NotUsed
);
assert
(
pcache1
.
isInit
==
0
);
static
int
pcache1Init
(
void
*
NotUsed
)
{
assert
(
pcache1
.
isInit
==
0
);
memset
(
&
pcache1
,
0
,
sizeof
(
pcache1
));
/*
** The pcache1.separateCache variable is true if each PCache has its own
** private PGroup (mode-1). pcache1.separateCache is false if the single
** PGroup in pcache1.grp is used for all page caches (mode-2).
**
** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT
**
** * Use a unified cache in single-threaded applications that have
** configured a start-time buffer for use as page-cache memory using
** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL
** pBuf argument.
**
** * Otherwise use separate caches (mode-1)
*/
#if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT)
pcache1
.
separateCache
=
0
;
#elif SQLITE_THREADSAFE
pcache1
.
separateCache
=
sqlite3GlobalConfig
.
pPage
==
0
||
sqlite3GlobalConfig
.
bCoreMutex
>
0
;
#else
pcache1
.
separateCache
=
sqlite3GlobalConfig
.
pPage
==
0
;
#endif
#if SQLITE_THREADSAFE
if
(
sqlite3GlobalConfig
.
bCoreMutex
){
pcache1
.
grp
.
mutex
=
sqlite3MutexAlloc
(
SQLITE_MUTEX_STATIC_LRU
);
pcache1
.
mutex
=
sqlite3MutexAlloc
(
SQLITE_MUTEX_STATIC_PMEM
);
}
#endif
if
(
pcache1
.
separateCache
&&
sqlite3GlobalConfig
.
nPage
!=
0
&&
sqlite3GlobalConfig
.
pPage
==
0
){
pcache1
.
nInitPage
=
sqlite3GlobalConfig
.
nPage
;
}
else
{
// /*
// ** The pcache1.separateCache variable is true if each PCache has its own
// ** private PGroup (mode-1). pcache1.separateCache is false if the single
// ** PGroup in pcache1.grp is used for all page caches (mode-2).
// **
// ** * Always use a unified cache (mode-2) if ENABLE_MEMORY_MANAGEMENT
// **
// ** * Use a unified cache in single-threaded applications that have
// ** configured a start-time buffer for use as page-cache memory using
// ** sqlite3_config(SQLITE_CONFIG_PAGECACHE, pBuf, sz, N) with non-NULL
// ** pBuf argument.
// **
// ** * Otherwise use separate caches (mode-1)
// */
// #if defined(SQLITE_ENABLE_MEMORY_MANAGEMENT)
// pcache1.separateCache = 0;
// #elif SQLITE_THREADSAFE
// pcache1.separateCache = sqlite3GlobalConfig.pPage==0
// || sqlite3GlobalConfig.bCoreMutex>0;
// #else
// pcache1.separateCache = sqlite3GlobalConfig.pPage==0;
// #endif
pcache1
.
separateCache
=
1
;
pthread_mutex_init
(
&
pcache1
.
grp
.
mutex
,
NULL
);
pthread_mutex_init
(
&
pcache1
.
mutex
,
NULL
);
// if (pcache1.separateCache && sqlite3GlobalConfig.nPage != 0 && sqlite3GlobalConfig.pPage == 0) {
// pcache1.nInitPage = sqlite3GlobalConfig.nPage;
// } else {
pcache1
.
nInitPage
=
0
;
}
//
}
pcache1
.
grp
.
mxPinned
=
10
;
pcache1
.
isInit
=
1
;
return
SQLITE_OK
;
return
0
;
}
/*
...
...
@@ -760,9 +738,8 @@ static int pcache1Init(void *NotUsed){
** Note that the static mutex allocated in xInit does
** not need to be freed.
*/
static
void
pcache1Shutdown
(
void
*
NotUsed
){
UNUSED_PARAMETER
(
NotUsed
);
assert
(
pcache1
.
isInit
!=
0
);
static
void
pcache1Shutdown
(
void
*
NotUsed
)
{
assert
(
pcache1
.
isInit
!=
0
);
memset
(
&
pcache1
,
0
,
sizeof
(
pcache1
));
}
...
...
@@ -774,25 +751,25 @@ static void pcache1Destroy(sqlite3_pcache *p);
**
** Allocate a new cache.
*/
static
sqlite3_pcache
*
pcache1Create
(
int
szPage
,
int
szExtra
,
int
bPurgeable
){
static
sqlite3_pcache
*
pcache1Create
(
int
szPage
,
int
szExtra
,
int
bPurgeable
)
{
PCache1
*
pCache
;
/* The newly created page cache */
PGroup
*
pGroup
;
/* The group the new page cache will belong to */
PGroup
*
pGroup
;
/* The group the new page cache will belong to */
int
sz
;
/* Bytes of memory required to allocate the new cache */
assert
(
(
szPage
&
(
szPage
-
1
))
==
0
&&
szPage
>=
512
&&
szPage
<=
65536
);
assert
(
szExtra
<
300
);
assert
(
(
szPage
&
(
szPage
-
1
))
==
0
&&
szPage
>=
512
&&
szPage
<=
65536
);
assert
(
szExtra
<
300
);
sz
=
sizeof
(
PCache1
)
+
sizeof
(
PGroup
)
*
pcache1
.
separateCache
;
sz
=
sizeof
(
PCache1
)
+
sizeof
(
PGroup
)
*
pcache1
.
separateCache
;
pCache
=
(
PCache1
*
)
sqlite3MallocZero
(
sz
);
if
(
pCache
)
{
if
(
pcache1
.
separateCache
)
{
pGroup
=
(
PGroup
*
)
&
pCache
[
1
];
if
(
pCache
)
{
if
(
pcache1
.
separateCache
)
{
pGroup
=
(
PGroup
*
)
&
pCache
[
1
];
pGroup
->
mxPinned
=
10
;
}
else
{
}
else
{
pGroup
=
&
pcache1
.
grp
;
}
pcache1EnterMutex
(
pGroup
);
if
(
pGroup
->
lru
.
isAnchor
==
0
)
{
if
(
pGroup
->
lru
.
isAnchor
==
0
)
{
pGroup
->
lru
.
isAnchor
=
1
;
pGroup
->
lru
.
pLruPrev
=
pGroup
->
lru
.
pLruNext
=
&
pGroup
->
lru
;
}
...
...
@@ -802,17 +779,17 @@ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){
pCache
->
szAlloc
=
szPage
+
szExtra
+
ROUND8
(
sizeof
(
PgHdr1
));
pCache
->
bPurgeable
=
(
bPurgeable
?
1
:
0
);
pcache1ResizeHash
(
pCache
);
if
(
bPurgeable
)
{
if
(
bPurgeable
)
{
pCache
->
nMin
=
10
;
pGroup
->
nMinPage
+=
pCache
->
nMin
;
pGroup
->
mxPinned
=
pGroup
->
nMaxPage
+
10
-
pGroup
->
nMinPage
;
pCache
->
pnPurgeable
=
&
pGroup
->
nPurgeable
;
}
else
{
}
else
{
pCache
->
pnPurgeable
=
&
pCache
->
nPurgeableDummy
;
}
pcache1LeaveMutex
(
pGroup
);
if
(
pCache
->
nHash
==
0
)
{
pcache1Destroy
((
sqlite3_pcache
*
)
pCache
);
if
(
pCache
->
nHash
==
0
)
{
pcache1Destroy
((
sqlite3_pcache
*
)
pCache
);
pCache
=
0
;
}
}
...
...
@@ -824,21 +801,21 @@ static sqlite3_pcache *pcache1Create(int szPage, int szExtra, int bPurgeable){
**
** Configure the cache_size limit for a cache.
*/
static
void
pcache1Cachesize
(
sqlite3_pcache
*
p
,
int
nMax
){
static
void
pcache1Cachesize
(
sqlite3_pcache
*
p
,
int
nMax
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
u32
n
;
assert
(
nMax
>=
0
);
if
(
pCache
->
bPurgeable
)
{
assert
(
nMax
>=
0
);
if
(
pCache
->
bPurgeable
)
{
PGroup
*
pGroup
=
pCache
->
pGroup
;
pcache1EnterMutex
(
pGroup
);
n
=
(
u32
)
nMax
;
if
(
n
>
0x7fff0000
-
pGroup
->
nMaxPage
+
pCache
->
nMax
)
{
if
(
n
>
0x7fff0000
-
pGroup
->
nMaxPage
+
pCache
->
nMax
)
{
n
=
0x7fff0000
-
pGroup
->
nMaxPage
+
pCache
->
nMax
;
}
pGroup
->
nMaxPage
+=
(
n
-
pCache
->
nMax
);
pGroup
->
mxPinned
=
pGroup
->
nMaxPage
+
10
-
pGroup
->
nMinPage
;
pCache
->
nMax
=
n
;
pCache
->
n90pct
=
pCache
->
nMax
*
9
/
10
;
pCache
->
n90pct
=
pCache
->
nMax
*
9
/
10
;
pcache1EnforceMaxPage
(
pCache
);
pcache1LeaveMutex
(
pGroup
);
}
...
...
@@ -849,10 +826,10 @@ static void pcache1Cachesize(sqlite3_pcache *p, int nMax){
**
** Free up as much memory as possible.
*/
static
void
pcache1Shrink
(
sqlite3_pcache
*
p
){
PCache1
*
pCache
=
(
PCache1
*
)
p
;
if
(
pCache
->
bPurgeable
)
{
PGroup
*
pGroup
=
pCache
->
pGroup
;
static
void
pcache1Shrink
(
sqlite3_pcache
*
p
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
if
(
pCache
->
bPurgeable
)
{
PGroup
*
pGroup
=
pCache
->
pGroup
;
unsigned
int
savedMaxPage
;
pcache1EnterMutex
(
pGroup
);
savedMaxPage
=
pGroup
->
nMaxPage
;
...
...
@@ -866,16 +843,15 @@ static void pcache1Shrink(sqlite3_pcache *p){
/*
** Implementation of the sqlite3_pcache.xPagecount method.
*/
static
int
pcache1Pagecount
(
sqlite3_pcache
*
p
){
static
int
pcache1Pagecount
(
sqlite3_pcache
*
p
)
{
int
n
;
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PCache1
*
pCache
=
(
PCache1
*
)
p
;
pcache1EnterMutex
(
pCache
->
pGroup
);
n
=
pCache
->
nPage
;
pcache1LeaveMutex
(
pCache
->
pGroup
);
return
n
;
}
/*
** Implement steps 3, 4, and 5 of the pcache1Fetch() algorithm described
** in the header of the pcache1Fetch() procedure.
...
...
@@ -884,46 +860,37 @@ static int pcache1Pagecount(sqlite3_pcache *p){
** usually not needed, and by avoiding the stack initialization required
** for these steps, the main pcache1Fetch() procedure can run faster.
*/
static
SQLITE_NOINLINE
PgHdr1
*
pcache1FetchStage2
(
PCache1
*
pCache
,
unsigned
int
iKey
,
int
createFlag
){
static
SQLITE_NOINLINE
PgHdr1
*
pcache1FetchStage2
(
PCache1
*
pCache
,
unsigned
int
iKey
,
int
createFlag
)
{
unsigned
int
nPinned
;
PGroup
*
pGroup
=
pCache
->
pGroup
;
PgHdr1
*
pPage
=
0
;
PGroup
*
pGroup
=
pCache
->
pGroup
;
PgHdr1
*
pPage
=
0
;
/* Step 3: Abort if createFlag is 1 but the cache is nearly full */
assert
(
pCache
->
nPage
>=
pCache
->
nRecyclable
);
assert
(
pCache
->
nPage
>=
pCache
->
nRecyclable
);
nPinned
=
pCache
->
nPage
-
pCache
->
nRecyclable
;
assert
(
pGroup
->
mxPinned
==
pGroup
->
nMaxPage
+
10
-
pGroup
->
nMinPage
);
assert
(
pCache
->
n90pct
==
pCache
->
nMax
*
9
/
10
);
if
(
createFlag
==
1
&&
(
nPinned
>=
pGroup
->
mxPinned
||
nPinned
>=
pCache
->
n90pct
||
(
pcache1UnderMemoryPressure
(
pCache
)
&&
pCache
->
nRecyclable
<
nPinned
)
)){
assert
(
pGroup
->
mxPinned
==
pGroup
->
nMaxPage
+
10
-
pGroup
->
nMinPage
);
assert
(
pCache
->
n90pct
==
pCache
->
nMax
*
9
/
10
);
if
(
createFlag
==
1
&&
(
nPinned
>=
pGroup
->
mxPinned
||
nPinned
>=
pCache
->
n90pct
||
(
pcache1UnderMemoryPressure
(
pCache
)
&&
pCache
->
nRecyclable
<
nPinned
)))
{
return
0
;
}
if
(
pCache
->
nPage
>=
pCache
->
nHash
)
pcache1ResizeHash
(
pCache
);
assert
(
pCache
->
nHash
>
0
&&
pCache
->
apHash
);
if
(
pCache
->
nPage
>=
pCache
->
nHash
)
pcache1ResizeHash
(
pCache
);
assert
(
pCache
->
nHash
>
0
&&
pCache
->
apHash
);
/* Step 4. Try to recycle a page. */
if
(
pCache
->
bPurgeable
&&
!
pGroup
->
lru
.
pLruPrev
->
isAnchor
&&
((
pCache
->
nPage
+
1
>=
pCache
->
nMax
)
||
pcache1UnderMemoryPressure
(
pCache
))
){
if
(
pCache
->
bPurgeable
&&
!
pGroup
->
lru
.
pLruPrev
->
isAnchor
&&
((
pCache
->
nPage
+
1
>=
pCache
->
nMax
)
||
pcache1UnderMemoryPressure
(
pCache
)))
{
PCache1
*
pOther
;
pPage
=
pGroup
->
lru
.
pLruPrev
;
assert
(
PAGE_IS_UNPINNED
(
pPage
)
);
assert
(
PAGE_IS_UNPINNED
(
pPage
)
);
pcache1RemoveFromHash
(
pPage
,
0
);
pcache1PinPage
(
pPage
);
pOther
=
pPage
->
pCache
;
if
(
pOther
->
szAlloc
!=
pCache
->
szAlloc
)
{
if
(
pOther
->
szAlloc
!=
pCache
->
szAlloc
)
{
pcache1FreePage
(
pPage
);
pPage
=
0
;
}
else
{
}
else
{
pGroup
->
nPurgeable
-=
(
pOther
->
bPurgeable
-
pCache
->
bPurgeable
);
}
}
...
...
@@ -931,11 +898,11 @@ static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2(
/* Step 5. If a usable page buffer has still not been found,
** attempt to allocate a new one.
*/
if
(
!
pPage
)
{
pPage
=
pcache1AllocPage
(
pCache
,
createFlag
==
1
);
if
(
!
pPage
)
{
pPage
=
pcache1AllocPage
(
pCache
,
createFlag
==
1
);
}
if
(
pPage
)
{
if
(
pPage
)
{
unsigned
int
h
=
iKey
%
pCache
->
nHash
;
pCache
->
nPage
++
;
pPage
->
iKey
=
iKey
;
...
...
@@ -946,7 +913,7 @@ static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2(
** No need to clear pLruPrev since it is not accessed when pLruNext==0 */
*
(
void
**
)
pPage
->
page
.
pExtra
=
0
;
pCache
->
apHash
[
h
]
=
pPage
;
if
(
iKey
>
pCache
->
iMaxKey
)
{
if
(
iKey
>
pCache
->
iMaxKey
)
{
pCache
->
iMaxKey
=
iKey
;
}
}
...
...
@@ -1012,103 +979,88 @@ static SQLITE_NOINLINE PgHdr1 *pcache1FetchStage2(
** the common case where pGroup->mutex is NULL. The pcache1Fetch() wrapper
** invokes the appropriate routine.
*/
static
PgHdr1
*
pcache1FetchNoMutex
(
sqlite3_pcache
*
p
,
unsigned
int
iKey
,
int
createFlag
){
static
PgHdr1
*
pcache1FetchNoMutex
(
sqlite3_pcache
*
p
,
unsigned
int
iKey
,
int
createFlag
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PgHdr1
*
pPage
=
0
;
PgHdr1
*
pPage
=
0
;
/* Step 1: Search the hash table for an existing entry. */
pPage
=
pCache
->
apHash
[
iKey
%
pCache
->
nHash
];
while
(
pPage
&&
pPage
->
iKey
!=
iKey
){
pPage
=
pPage
->
pNext
;
}
while
(
pPage
&&
pPage
->
iKey
!=
iKey
)
{
pPage
=
pPage
->
pNext
;
}
/* Step 2: If the page was found in the hash table, then return it.
** If the page was not in the hash table and createFlag is 0, abort.
** Otherwise (page not in hash and createFlag!=0) continue with
** subsequent steps to try to create the page. */
if
(
pPage
)
{
if
(
PAGE_IS_UNPINNED
(
pPage
)
)
{
if
(
pPage
)
{
if
(
PAGE_IS_UNPINNED
(
pPage
))
{
return
pcache1PinPage
(
pPage
);
}
else
{
}
else
{
return
pPage
;
}
}
else
if
(
createFlag
)
{
}
else
if
(
createFlag
)
{
/* Steps 3, 4, and 5 implemented by this subroutine */
return
pcache1FetchStage2
(
pCache
,
iKey
,
createFlag
);
}
else
{
}
else
{
return
0
;
}
}
#if PCACHE1_MIGHT_USE_GROUP_MUTEX
static
PgHdr1
*
pcache1FetchWithMutex
(
sqlite3_pcache
*
p
,
unsigned
int
iKey
,
int
createFlag
){
static
PgHdr1
*
pcache1FetchWithMutex
(
sqlite3_pcache
*
p
,
unsigned
int
iKey
,
int
createFlag
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PgHdr1
*
pPage
;
PgHdr1
*
pPage
;
pcache1EnterMutex
(
pCache
->
pGroup
);
pPage
=
pcache1FetchNoMutex
(
p
,
iKey
,
createFlag
);
assert
(
pPage
==
0
||
pCache
->
iMaxKey
>=
iKey
);
assert
(
pPage
==
0
||
pCache
->
iMaxKey
>=
iKey
);
pcache1LeaveMutex
(
pCache
->
pGroup
);
return
pPage
;
}
#endif
static
sqlite3_pcache_page
*
pcache1Fetch
(
sqlite3_pcache
*
p
,
unsigned
int
iKey
,
int
createFlag
){
static
sqlite3_pcache_page
*
pcache1Fetch
(
sqlite3_pcache
*
p
,
unsigned
int
iKey
,
int
createFlag
)
{
#if PCACHE1_MIGHT_USE_GROUP_MUTEX || defined(SQLITE_DEBUG)
PCache1
*
pCache
=
(
PCache1
*
)
p
;
#endif
assert
(
offsetof
(
PgHdr1
,
page
)
==
0
);
assert
(
pCache
->
bPurgeable
||
createFlag
!=
1
);
assert
(
pCache
->
bPurgeable
||
pCache
->
nMin
==
0
);
assert
(
pCache
->
bPurgeable
==
0
||
pCache
->
nMin
==
10
);
assert
(
pCache
->
nMin
==
0
||
pCache
->
bPurgeable
);
assert
(
pCache
->
nHash
>
0
);
assert
(
offsetof
(
PgHdr1
,
page
)
==
0
);
assert
(
pCache
->
bPurgeable
||
createFlag
!=
1
);
assert
(
pCache
->
bPurgeable
||
pCache
->
nMin
==
0
);
assert
(
pCache
->
bPurgeable
==
0
||
pCache
->
nMin
==
10
);
assert
(
pCache
->
nMin
==
0
||
pCache
->
bPurgeable
);
assert
(
pCache
->
nHash
>
0
);
#if PCACHE1_MIGHT_USE_GROUP_MUTEX
if
(
pCache
->
pGroup
->
mutex
)
{
return
(
sqlite3_pcache_page
*
)
pcache1FetchWithMutex
(
p
,
iKey
,
createFlag
);
}
else
if
(
pCache
->
pGroup
->
mutex
)
{
return
(
sqlite3_pcache_page
*
)
pcache1FetchWithMutex
(
p
,
iKey
,
createFlag
);
}
else
#endif
{
return
(
sqlite3_pcache_page
*
)
pcache1FetchNoMutex
(
p
,
iKey
,
createFlag
);
return
(
sqlite3_pcache_page
*
)
pcache1FetchNoMutex
(
p
,
iKey
,
createFlag
);
}
}
/*
** Implementation of the sqlite3_pcache.xUnpin method.
**
** Mark a page as unpinned (eligible for asynchronous recycling).
*/
static
void
pcache1Unpin
(
sqlite3_pcache
*
p
,
sqlite3_pcache_page
*
pPg
,
int
reuseUnlikely
){
static
void
pcache1Unpin
(
sqlite3_pcache
*
p
,
sqlite3_pcache_page
*
pPg
,
int
reuseUnlikely
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PgHdr1
*
pPage
=
(
PgHdr1
*
)
pPg
;
PGroup
*
pGroup
=
pCache
->
pGroup
;
PgHdr1
*
pPage
=
(
PgHdr1
*
)
pPg
;
PGroup
*
pGroup
=
pCache
->
pGroup
;
assert
(
pPage
->
pCache
==
pCache
);
assert
(
pPage
->
pCache
==
pCache
);
pcache1EnterMutex
(
pGroup
);
/* It is an error to call this function if the page is already
** part of the PGroup LRU list.
*/
assert
(
pPage
->
pLruNext
==
0
);
assert
(
PAGE_IS_PINNED
(
pPage
)
);
assert
(
pPage
->
pLruNext
==
0
);
assert
(
PAGE_IS_PINNED
(
pPage
)
);
if
(
reuseUnlikely
||
pGroup
->
nPurgeable
>
pGroup
->
nMaxPage
)
{
if
(
reuseUnlikely
||
pGroup
->
nPurgeable
>
pGroup
->
nMaxPage
)
{
pcache1RemoveFromHash
(
pPage
,
1
);
}
else
{
}
else
{
/* Add the page to the PGroup LRU list. */
PgHdr1
**
ppFirst
=
&
pGroup
->
lru
.
pLruNext
;
pPage
->
pLruPrev
=
&
pGroup
->
lru
;
...
...
@@ -1123,33 +1075,28 @@ static void pcache1Unpin(
/*
** Implementation of the sqlite3_pcache.xRekey method.
*/
static
void
pcache1Rekey
(
sqlite3_pcache
*
p
,
sqlite3_pcache_page
*
pPg
,
unsigned
int
iOld
,
unsigned
int
iNew
){
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PgHdr1
*
pPage
=
(
PgHdr1
*
)
pPg
;
PgHdr1
**
pp
;
static
void
pcache1Rekey
(
sqlite3_pcache
*
p
,
sqlite3_pcache_page
*
pPg
,
unsigned
int
iOld
,
unsigned
int
iNew
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PgHdr1
*
pPage
=
(
PgHdr1
*
)
pPg
;
PgHdr1
**
pp
;
unsigned
int
h
;
assert
(
pPage
->
iKey
==
iOld
);
assert
(
pPage
->
pCache
==
pCache
);
assert
(
pPage
->
iKey
==
iOld
);
assert
(
pPage
->
pCache
==
pCache
);
pcache1EnterMutex
(
pCache
->
pGroup
);
h
=
iOld
%
pCache
->
nHash
;
h
=
iOld
%
pCache
->
nHash
;
pp
=
&
pCache
->
apHash
[
h
];
while
(
(
*
pp
)
!=
pPage
)
{
while
((
*
pp
)
!=
pPage
)
{
pp
=
&
(
*
pp
)
->
pNext
;
}
*
pp
=
pPage
->
pNext
;
h
=
iNew
%
pCache
->
nHash
;
h
=
iNew
%
pCache
->
nHash
;
pPage
->
iKey
=
iNew
;
pPage
->
pNext
=
pCache
->
apHash
[
h
];
pCache
->
apHash
[
h
]
=
pPage
;
if
(
iNew
>
pCache
->
iMaxKey
)
{
if
(
iNew
>
pCache
->
iMaxKey
)
{
pCache
->
iMaxKey
=
iNew
;
}
...
...
@@ -1163,12 +1110,12 @@ static void pcache1Rekey(
** or greater than parameter iLimit. Any pinned pages with a page number
** equal to or greater than iLimit are implicitly unpinned.
*/
static
void
pcache1Truncate
(
sqlite3_pcache
*
p
,
unsigned
int
iLimit
){
static
void
pcache1Truncate
(
sqlite3_pcache
*
p
,
unsigned
int
iLimit
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
pcache1EnterMutex
(
pCache
->
pGroup
);
if
(
iLimit
<=
pCache
->
iMaxKey
)
{
if
(
iLimit
<=
pCache
->
iMaxKey
)
{
pcache1TruncateUnsafe
(
pCache
,
iLimit
);
pCache
->
iMaxKey
=
iLimit
-
1
;
pCache
->
iMaxKey
=
iLimit
-
1
;
}
pcache1LeaveMutex
(
pCache
->
pGroup
);
}
...
...
@@ -1178,15 +1125,15 @@ static void pcache1Truncate(sqlite3_pcache *p, unsigned int iLimit){
**
** Destroy a cache allocated using pcache1Create().
*/
static
void
pcache1Destroy
(
sqlite3_pcache
*
p
){
static
void
pcache1Destroy
(
sqlite3_pcache
*
p
)
{
PCache1
*
pCache
=
(
PCache1
*
)
p
;
PGroup
*
pGroup
=
pCache
->
pGroup
;
assert
(
pCache
->
bPurgeable
||
(
pCache
->
nMax
==
0
&&
pCache
->
nMin
==
0
)
);
PGroup
*
pGroup
=
pCache
->
pGroup
;
assert
(
pCache
->
bPurgeable
||
(
pCache
->
nMax
==
0
&&
pCache
->
nMin
==
0
)
);
pcache1EnterMutex
(
pGroup
);
if
(
pCache
->
nPage
)
pcache1TruncateUnsafe
(
pCache
,
0
);
assert
(
pGroup
->
nMaxPage
>=
pCache
->
nMax
);
if
(
pCache
->
nPage
)
pcache1TruncateUnsafe
(
pCache
,
0
);
assert
(
pGroup
->
nMaxPage
>=
pCache
->
nMax
);
pGroup
->
nMaxPage
-=
pCache
->
nMax
;
assert
(
pGroup
->
nMinPage
>=
pCache
->
nMin
);
assert
(
pGroup
->
nMinPage
>=
pCache
->
nMin
);
pGroup
->
nMinPage
-=
pCache
->
nMin
;
pGroup
->
mxPinned
=
pGroup
->
nMaxPage
+
10
-
pGroup
->
nMinPage
;
pcache1EnforceMaxPage
(
pCache
);
...
...
@@ -1196,42 +1143,16 @@ static void pcache1Destroy(sqlite3_pcache *p){
sqlite3_free
(
pCache
);
}
/*
** This function is called during initialization (sqlite3_initialize()) to
** install the default pluggable cache module, assuming the user has not
** already provided an alternative.
*/
void
sqlite3PCacheSetDefault
(
void
){
static
const
sqlite3_pcache_methods2
defaultMethods
=
{
1
,
/* iVersion */
0
,
/* pArg */
pcache1Init
,
/* xInit */
pcache1Shutdown
,
/* xShutdown */
pcache1Create
,
/* xCreate */
pcache1Cachesize
,
/* xCachesize */
pcache1Pagecount
,
/* xPagecount */
pcache1Fetch
,
/* xFetch */
pcache1Unpin
,
/* xUnpin */
pcache1Rekey
,
/* xRekey */
pcache1Truncate
,
/* xTruncate */
pcache1Destroy
,
/* xDestroy */
pcache1Shrink
/* xShrink */
};
sqlite3_config
(
SQLITE_CONFIG_PCACHE2
,
&
defaultMethods
);
}
/*
** Return the size of the header on each page of this PCACHE implementation.
*/
int
sqlite3HeaderSizePcache1
(
void
){
return
ROUND8
(
sizeof
(
PgHdr1
));
}
int
sqlite3HeaderSizePcache1
(
void
)
{
return
ROUND8
(
sizeof
(
PgHdr1
));
}
/*
** Return the global mutex used by this PCACHE implementation. The
** sqlite3_status() routine needs access to this mutex.
*/
sqlite3_mutex
*
sqlite3Pcache1Mutex
(
void
){
return
pcache1
.
mutex
;
}
sqlite3_mutex
*
sqlite3Pcache1Mutex
(
void
)
{
return
pcache1
.
mutex
;
}
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
/*
...
...
@@ -1243,22 +1164,19 @@ sqlite3_mutex *sqlite3Pcache1Mutex(void){
** been released, the function returns. The return value is the total number
** of bytes of memory released.
*/
int
sqlite3PcacheReleaseMemory
(
int
nReq
){
int
sqlite3PcacheReleaseMemory
(
int
nReq
)
{
int
nFree
=
0
;
assert
(
sqlite3_mutex_notheld
(
pcache1
.
grp
.
mutex
)
);
assert
(
sqlite3_mutex_notheld
(
pcache1
.
mutex
)
);
if
(
sqlite3GlobalConfig
.
pPage
==
0
)
{
assert
(
sqlite3_mutex_notheld
(
pcache1
.
grp
.
mutex
)
);
assert
(
sqlite3_mutex_notheld
(
pcache1
.
mutex
)
);
if
(
sqlite3GlobalConfig
.
pPage
==
0
)
{
PgHdr1
*
p
;
pcache1EnterMutex
(
&
pcache1
.
grp
);
while
(
(
nReq
<
0
||
nFree
<
nReq
)
&&
(
p
=
pcache1
.
grp
.
lru
.
pLruPrev
)
!=
0
&&
p
->
isAnchor
==
0
){
while
((
nReq
<
0
||
nFree
<
nReq
)
&&
(
p
=
pcache1
.
grp
.
lru
.
pLruPrev
)
!=
0
&&
p
->
isAnchor
==
0
)
{
nFree
+=
pcache1MemSize
(
p
->
page
.
pBuf
);
#ifdef SQLITE_PCACHE_SEPARATE_HEADER
nFree
+=
sqlite3MemSize
(
p
);
#endif
assert
(
PAGE_IS_UNPINNED
(
p
)
);
assert
(
PAGE_IS_UNPINNED
(
p
)
);
pcache1PinPage
(
p
);
pcache1RemoveFromHash
(
p
,
1
);
}
...
...
@@ -1273,16 +1191,15 @@ int sqlite3PcacheReleaseMemory(int nReq){
** This function is used by test procedures to inspect the internal state
** of the global cache.
*/
void
sqlite3PcacheStats
(
int
*
pnCurrent
,
/* OUT: Total number of pages cached */
void
sqlite3PcacheStats
(
int
*
pnCurrent
,
/* OUT: Total number of pages cached */
int
*
pnMax
,
/* OUT: Global maximum cache size */
int
*
pnMin
,
/* OUT: Sum of PCache1.nMin for purgeable caches */
int
*
pnRecyclable
/* OUT: Total number of pages available for recycling */
){
)
{
PgHdr1
*
p
;
int
nRecyclable
=
0
;
for
(
p
=
pcache1
.
grp
.
lru
.
pLruNext
;
p
&&
!
p
->
isAnchor
;
p
=
p
->
pLruNext
)
{
assert
(
PAGE_IS_UNPINNED
(
p
)
);
for
(
p
=
pcache1
.
grp
.
lru
.
pLruNext
;
p
&&
!
p
->
isAnchor
;
p
=
p
->
pLruNext
)
{
assert
(
PAGE_IS_UNPINNED
(
p
)
);
nRecyclable
++
;
}
*
pnCurrent
=
pcache1
.
grp
.
nPurgeable
;
...
...
@@ -1291,3 +1208,19 @@ void sqlite3PcacheStats(
*
pnRecyclable
=
nRecyclable
;
}
#endif
sqlite3_pcache_methods2
pcache2
=
{
1
,
/* iVersion */
0
,
/* pArg */
pcache1Init
,
/* xInit */
pcache1Shutdown
,
/* xShutdown */
pcache1Create
,
/* xCreate */
pcache1Cachesize
,
/* xCachesize */
pcache1Pagecount
,
/* xPagecount */
pcache1Fetch
,
/* xFetch */
pcache1Unpin
,
/* xUnpin */
pcache1Rekey
,
/* xRekey */
pcache1Truncate
,
/* xTruncate */
pcache1Destroy
,
/* xDestroy */
pcache1Shrink
/* xShrink */
};
source/libs/tdb/src/sqliteinc/sqliteInt.h
浏览文件 @
7184778c
...
...
@@ -13,7 +13,10 @@
**
*/
#include <assert.h>
#include <pthread.h>
#include <stdint.h>
#include <string.h>
#ifndef SQLITEINT_H
#define SQLITEINT_H
...
...
@@ -32,6 +35,10 @@ typedef struct sqlite3_pcache_page {
void
*
pExtra
;
/* Extra information associated with the page */
}
sqlite3_pcache_page
;
#define ROUNDDOWN8(x) ((x) & ~7)
#define ROUND8(x) (((x) + 7) & ~7)
typedef
u32
Pgno
;
typedef
struct
Pager
Pager
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录