Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Add support for read-only clients reading from dormant databases using the checkpointer lock. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | read-only-clients |
Files: | files | file ages | folders |
SHA1: |
45e447261816c9bdf59eee73b6b31de0 |
User & Date: | dan 2013-02-18 19:46:02.459 |
Context
2013-02-19
| ||
19:35 | Changes to allow read-only clients to safely work with live databases. check-in: 08cc3604cf user: dan tags: read-only-clients | |
2013-02-18
| ||
19:46 | Add support for read-only clients reading from dormant databases using the checkpointer lock. check-in: 45e4472618 user: dan tags: read-only-clients | |
2013-02-17
| ||
14:19 | Merge trunk changes into this branch. check-in: 29390891c5 user: dan tags: read-only-clients | |
Changes
Changes to lsm-test/lsmtest_main.c.
︙ | ︙ | |||
1329 1330 1331 1332 1333 1334 1335 | if( strcmp(azArg[0], "-")==0 ){ pInput = stdin; }else{ pClose = pInput = fopen(azArg[0], "r"); } zDb = azArg[1]; pEnv = tdb_lsm_env(); | | | 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 | if( strcmp(azArg[0], "-")==0 ){ pInput = stdin; }else{ pClose = pInput = fopen(azArg[0], "r"); } zDb = azArg[1]; pEnv = tdb_lsm_env(); rc = pEnv->xOpen(pEnv, zDb, 0, &pOut); if( rc!=LSM_OK ) return rc; while( feof(pInput)==0 ){ char zLine[80]; fgets(zLine, sizeof(zLine)-1, pInput); zLine[sizeof(zLine)-1] = '\0'; |
︙ | ︙ |
Changes to lsm-test/lsmtest_tdb3.c.
︙ | ︙ | |||
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 | lsm_env *pRealEnv = tdb_lsm_env(); return pRealEnv->xFullpath(pRealEnv, zFile, zOut, pnOut); } static int testEnvOpen( lsm_env *pEnv, /* Environment for current LsmDb */ const char *zFile, /* Name of file to open */ lsm_file **ppFile /* OUT: New file handle object */ ){ lsm_env *pRealEnv = tdb_lsm_env(); LsmDb *pDb = (LsmDb *)pEnv->pVfsCtx; int rc; /* Return Code */ LsmFile *pRet; /* The new file handle */ int nFile; /* Length of string zFile in bytes */ nFile = strlen(zFile); pRet = (LsmFile *)testMalloc(sizeof(LsmFile)); pRet->pDb = pDb; pRet->bLog = (nFile > 4 && 0==memcmp("-log", &zFile[nFile-4], 4)); | > | | 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 | lsm_env *pRealEnv = tdb_lsm_env(); return pRealEnv->xFullpath(pRealEnv, zFile, zOut, pnOut); } static int testEnvOpen( lsm_env *pEnv, /* Environment for current LsmDb */ const char *zFile, /* Name of file to open */ int flags, lsm_file **ppFile /* OUT: New file handle object */ ){ lsm_env *pRealEnv = tdb_lsm_env(); LsmDb *pDb = (LsmDb *)pEnv->pVfsCtx; int rc; /* Return Code */ LsmFile *pRet; /* The new file handle */ int nFile; /* Length of string zFile in bytes */ nFile = strlen(zFile); pRet = (LsmFile *)testMalloc(sizeof(LsmFile)); pRet->pDb = pDb; pRet->bLog = (nFile > 4 && 0==memcmp("-log", &zFile[nFile-4], 4)); rc = pRealEnv->xOpen(pRealEnv, zFile, flags, &pRet->pReal); if( rc!=LSM_OK ){ testFree(pRet); pRet = 0; } *ppFile = (lsm_file *)pRet; return rc; |
︙ | ︙ | |||
376 377 378 379 380 381 382 | char *zFile = pDb->zName; char *zFree = 0; for(iFile=0; iFile<2; iFile++){ lsm_file *pFile = 0; int i; | | | 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 | char *zFile = pDb->zName; char *zFree = 0; for(iFile=0; iFile<2; iFile++){ lsm_file *pFile = 0; int i; pEnv->xOpen(pEnv, zFile, 0, &pFile); for(i=0; i<pDb->aFile[iFile].nSector; i++){ u8 *aOld = pDb->aFile[iFile].aSector[i].aOld; if( aOld ){ int iOpt = testPrngValue(iSeed++) % 3; switch( iOpt ){ case 0: break; |
︙ | ︙ |
Changes to src/lsm.h.
︙ | ︙ | |||
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | typedef long long int lsm_i64; /* 64-bit signed integer type */ /* Candidate values for the 3rd argument to lsm_env.xLock() */ #define LSM_LOCK_UNLOCK 0 #define LSM_LOCK_SHARED 1 #define LSM_LOCK_EXCL 2 /* ** CAPI: Database Runtime Environment ** ** Run-time environment used by LSM */ struct lsm_env { int nByte; /* Size of this structure in bytes */ int iVersion; /* Version number of this structure (1) */ /****** file i/o ***********************************************/ void *pVfsCtx; int (*xFullpath)(lsm_env*, const char *, char *, int *); | > > > | | 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | typedef long long int lsm_i64; /* 64-bit signed integer type */ /* Candidate values for the 3rd argument to lsm_env.xLock() */ #define LSM_LOCK_UNLOCK 0 #define LSM_LOCK_SHARED 1 #define LSM_LOCK_EXCL 2 /* Flags for lsm_env.xOpen() */ #define LSM_OPEN_READONLY 0x0001 /* ** CAPI: Database Runtime Environment ** ** Run-time environment used by LSM */ struct lsm_env { int nByte; /* Size of this structure in bytes */ int iVersion; /* Version number of this structure (1) */ /****** file i/o ***********************************************/ void *pVfsCtx; int (*xFullpath)(lsm_env*, const char *, char *, int *); int (*xOpen)(lsm_env*, const char *, int flags, lsm_file **); int (*xRead)(lsm_file *, lsm_i64, void *, int); int (*xWrite)(lsm_file *, lsm_i64, void *, int); int (*xTruncate)(lsm_file *, lsm_i64); int (*xSync)(lsm_file *); int (*xSectorSize)(lsm_file *); int (*xRemap)(lsm_file *, lsm_i64, void **, lsm_i64*); int (*xFileid)(lsm_file *, void *pBuf, int *pnBuf); |
︙ | ︙ | |||
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 | /* ** CAPI: LSM Error Codes */ #define LSM_OK 0 #define LSM_ERROR 1 #define LSM_BUSY 5 #define LSM_NOMEM 7 #define LSM_IOERR 10 #define LSM_CORRUPT 11 #define LSM_FULL 13 #define LSM_CANTOPEN 14 #define LSM_PROTOCOL 15 #define LSM_MISUSE 21 #define LSM_MISMATCH 50 /* ** CAPI: Creating and Destroying Database Connection Handles ** ** Open and close a database connection handle. */ int lsm_new(lsm_env*, lsm_db **ppDb); int lsm_close(lsm_db *pDb); | > > > > | 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 | /* ** CAPI: LSM Error Codes */ #define LSM_OK 0 #define LSM_ERROR 1 #define LSM_BUSY 5 #define LSM_NOMEM 7 #define LSM_READONLY 8 #define LSM_IOERR 10 #define LSM_CORRUPT 11 #define LSM_FULL 13 #define LSM_CANTOPEN 14 #define LSM_PROTOCOL 15 #define LSM_MISUSE 21 #define LSM_MISMATCH 50 #define LSM_IOERR_NOENT (LSM_IOERR | (1<<8)) /* ** CAPI: Creating and Destroying Database Connection Handles ** ** Open and close a database connection handle. */ int lsm_new(lsm_env*, lsm_db **ppDb); int lsm_close(lsm_db *pDb); |
︙ | ︙ | |||
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 | ** LSM_CONFIG_GET_COMPRESSION: ** Query the compression methods used to compress and decompress database ** content. ** ** LSM_CONFIG_SET_COMPRESSION_FACTORY: ** Configure a factory method to be invoked in case of an LSM_MISMATCH ** error. */ #define LSM_CONFIG_AUTOFLUSH 1 #define LSM_CONFIG_PAGE_SIZE 2 #define LSM_CONFIG_SAFETY 3 #define LSM_CONFIG_BLOCK_SIZE 4 #define LSM_CONFIG_AUTOWORK 5 #define LSM_CONFIG_MMAP 7 #define LSM_CONFIG_USE_LOG 8 #define LSM_CONFIG_AUTOMERGE 9 #define LSM_CONFIG_MAX_FREELIST 10 #define LSM_CONFIG_MULTIPLE_PROCESSES 11 #define LSM_CONFIG_AUTOCHECKPOINT 12 #define LSM_CONFIG_SET_COMPRESSION 13 #define LSM_CONFIG_GET_COMPRESSION 14 #define LSM_CONFIG_SET_COMPRESSION_FACTORY 15 #define LSM_SAFETY_OFF 0 #define LSM_SAFETY_NORMAL 1 #define LSM_SAFETY_FULL 2 /* ** CAPI: Compression and/or Encryption Hooks | > > > > > | 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | ** LSM_CONFIG_GET_COMPRESSION: ** Query the compression methods used to compress and decompress database ** content. ** ** LSM_CONFIG_SET_COMPRESSION_FACTORY: ** Configure a factory method to be invoked in case of an LSM_MISMATCH ** error. ** ** LSM_CONFIG_READONLY: ** A read/write boolean parameter. This parameter may only be set before ** lsm_open() is called. */ #define LSM_CONFIG_AUTOFLUSH 1 #define LSM_CONFIG_PAGE_SIZE 2 #define LSM_CONFIG_SAFETY 3 #define LSM_CONFIG_BLOCK_SIZE 4 #define LSM_CONFIG_AUTOWORK 5 #define LSM_CONFIG_MMAP 7 #define LSM_CONFIG_USE_LOG 8 #define LSM_CONFIG_AUTOMERGE 9 #define LSM_CONFIG_MAX_FREELIST 10 #define LSM_CONFIG_MULTIPLE_PROCESSES 11 #define LSM_CONFIG_AUTOCHECKPOINT 12 #define LSM_CONFIG_SET_COMPRESSION 13 #define LSM_CONFIG_GET_COMPRESSION 14 #define LSM_CONFIG_SET_COMPRESSION_FACTORY 15 #define LSM_CONFIG_READONLY 16 #define LSM_SAFETY_OFF 0 #define LSM_SAFETY_NORMAL 1 #define LSM_SAFETY_FULL 2 /* ** CAPI: Compression and/or Encryption Hooks |
︙ | ︙ |
Changes to src/lsmInt.h.
︙ | ︙ | |||
110 111 112 113 114 115 116 | #ifdef LSM_DEBUG int lsmErrorBkpt(int); #else # define lsmErrorBkpt(x) (x) #endif | > | | | | | < | | | | 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | #ifdef LSM_DEBUG int lsmErrorBkpt(int); #else # define lsmErrorBkpt(x) (x) #endif #define LSM_PROTOCOL_BKPT lsmErrorBkpt(LSM_PROTOCOL) #define LSM_IOERR_BKPT lsmErrorBkpt(LSM_IOERR) #define LSM_NOMEM_BKPT lsmErrorBkpt(LSM_NOMEM) #define LSM_CORRUPT_BKPT lsmErrorBkpt(LSM_CORRUPT) #define LSM_MISUSE_BKPT lsmErrorBkpt(LSM_MISUSE) #define unused_parameter(x) (void)(x) #define array_size(x) (sizeof(x)/sizeof(x[0])) /* The size of each shared-memory chunk */ #define LSM_SHM_CHUNK_SIZE (32*1024) /* The number of bytes reserved at the start of each shm chunk for MM. */ #define LSM_SHM_CHUNK_HDR (sizeof(ShmChunk)) /* The number of available read locks. */ #define LSM_LOCK_NREADER 6 /* The number of available read-write client locks. */ #define LSM_LOCK_NRWCLIENT 16 /* Lock definitions. */ #define LSM_LOCK_DMS1 1 #define LSM_LOCK_DMS2 2 #define LSM_LOCK_WRITER 3 #define LSM_LOCK_WORKER 4 #define LSM_LOCK_CHECKPOINTER 5 #define LSM_LOCK_READER(i) ((i) + LSM_LOCK_CHECKPOINTER + 1) #define LSM_LOCK_RWCLIENT(i) ((i) + LSM_LOCK_READER(LSM_LOCK_NREADER)) /* ** Hard limit on the number of free-list entries that may be stored in ** a checkpoint (the remainder are stored as a system record in the LSM). ** See also LSM_CONFIG_MAX_FREELIST. |
︙ | ︙ | |||
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 | int bUseLog; /* Configured by LSM_CONFIG_USE_LOG */ int nDfltPgsz; /* Configured by LSM_CONFIG_PAGE_SIZE */ int nDfltBlksz; /* Configured by LSM_CONFIG_BLOCK_SIZE */ int nMaxFreelist; /* Configured by LSM_CONFIG_MAX_FREELIST */ int bMmap; /* Configured by LSM_CONFIG_MMAP */ i64 nAutockpt; /* Configured by LSM_CONFIG_AUTOCHECKPOINT */ int bMultiProc; /* Configured by L_C_MULTIPLE_PROCESSES */ lsm_compress compress; /* Compression callbacks */ lsm_compress_factory factory; /* Compression callback factory */ /* Sub-system handles */ FileSystem *pFS; /* On-disk portion of database */ Database *pDatabase; /* Database shared data */ int iRwclient; /* Read-write client lock held (-1 == none) */ /* Client transaction context */ Snapshot *pClient; /* Client snapshot */ int iReader; /* Read lock held (-1 == unlocked) */ MultiCursor *pCsr; /* List of all open cursors */ LogWriter *pLogWriter; /* Context for writing to the log file */ int nTransOpen; /* Number of opened write transactions */ int nTransAlloc; /* Allocated size of aTrans[] array */ TransMark *aTrans; /* Array of marks for transaction rollback */ IntArray rollback; /* List of tree-nodes to roll back */ int bDiscardOld; /* True if lsmTreeDiscardOld() was called */ | > > > | 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 | int bUseLog; /* Configured by LSM_CONFIG_USE_LOG */ int nDfltPgsz; /* Configured by LSM_CONFIG_PAGE_SIZE */ int nDfltBlksz; /* Configured by LSM_CONFIG_BLOCK_SIZE */ int nMaxFreelist; /* Configured by LSM_CONFIG_MAX_FREELIST */ int bMmap; /* Configured by LSM_CONFIG_MMAP */ i64 nAutockpt; /* Configured by LSM_CONFIG_AUTOCHECKPOINT */ int bMultiProc; /* Configured by L_C_MULTIPLE_PROCESSES */ int bReadonly; /* Configured by LSM_CONFIG_READONLY */ lsm_compress compress; /* Compression callbacks */ lsm_compress_factory factory; /* Compression callback factory */ /* Sub-system handles */ FileSystem *pFS; /* On-disk portion of database */ Database *pDatabase; /* Database shared data */ int iRwclient; /* Read-write client lock held (-1 == none) */ /* Client transaction context */ Snapshot *pClient; /* Client snapshot */ int iReader; /* Read lock held (-1 == unlocked) */ int bRoTrans; /* True if a read-only db trans is open */ MultiCursor *pCsr; /* List of all open cursors */ LogWriter *pLogWriter; /* Context for writing to the log file */ int nTransOpen; /* Number of opened write transactions */ int nTransAlloc; /* Allocated size of aTrans[] array */ TransMark *aTrans; /* Array of marks for transaction rollback */ IntArray rollback; /* List of tree-nodes to roll back */ int bDiscardOld; /* True if lsmTreeDiscardOld() was called */ |
︙ | ︙ | |||
651 652 653 654 655 656 657 | int lsmMutexHeld(lsm_env *, lsm_mutex *); int lsmMutexNotHeld(lsm_env *, lsm_mutex *); #endif /************************************************************************** ** Start of functions from "lsm_file.c". */ | | > > | 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 | int lsmMutexHeld(lsm_env *, lsm_mutex *); int lsmMutexNotHeld(lsm_env *, lsm_mutex *); #endif /************************************************************************** ** Start of functions from "lsm_file.c". */ int lsmFsOpen(lsm_db *, const char *, int); int lsmFsOpenLog(lsm_db *, int *); void lsmFsCloseLog(lsm_db *); void lsmFsClose(FileSystem *); int lsmFsConfigure(lsm_db *db); int lsmFsBlockSize(FileSystem *); void lsmFsSetBlockSize(FileSystem *, int); |
︙ | ︙ | |||
725 726 727 728 729 730 731 | void lsmFsFlushWaiting(FileSystem *, int *); /* Used by lsm_info(ARRAY_STRUCTURE) and lsm_config(MMAP) */ int lsmInfoArrayStructure(lsm_db *pDb, int bBlock, Pgno iFirst, char **pzOut); int lsmInfoArrayPages(lsm_db *pDb, Pgno iFirst, char **pzOut); int lsmConfigMmap(lsm_db *pDb, int *piParam); | | | 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 | void lsmFsFlushWaiting(FileSystem *, int *); /* Used by lsm_info(ARRAY_STRUCTURE) and lsm_config(MMAP) */ int lsmInfoArrayStructure(lsm_db *pDb, int bBlock, Pgno iFirst, char **pzOut); int lsmInfoArrayPages(lsm_db *pDb, Pgno iFirst, char **pzOut); int lsmConfigMmap(lsm_db *pDb, int *piParam); int lsmEnvOpen(lsm_env *, const char *, int, lsm_file **); int lsmEnvClose(lsm_env *pEnv, lsm_file *pFile); int lsmEnvLock(lsm_env *pEnv, lsm_file *pFile, int iLock, int eLock); int lsmEnvShmMap(lsm_env *, lsm_file *, int, int, void **); void lsmEnvShmBarrier(lsm_env *); void lsmEnvShmUnmap(lsm_env *, lsm_file *, int); |
︙ | ︙ |
Changes to src/lsm_ckpt.c.
︙ | ︙ | |||
879 880 881 882 883 884 885 | if( piRead ) *piRead = 2; return LSM_OK; } } lsmShmBarrier(pDb); } | | | 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 | if( piRead ) *piRead = 2; return LSM_OK; } } lsmShmBarrier(pDb); } return LSM_PROTOCOL_BKPT; } int lsmInfoCompressionId(lsm_db *db, u32 *piCmpId){ int rc; assert( db->pClient==0 && db->pWorker==0 ); rc = lsmCheckpointLoad(db, 0); |
︙ | ︙ | |||
930 931 932 933 934 935 936 | nInt2 = pShm->aSnap2[CKPT_HDR_NCKPT]; if( nInt1!=nInt2 || memcmp(pShm->aSnap1, pShm->aSnap2, nInt2*sizeof(u32)) ){ if( ckptChecksumOk(pShm->aSnap1) ){ memcpy(pShm->aSnap2, pShm->aSnap1, sizeof(u32)*nInt1); }else if( ckptChecksumOk(pShm->aSnap2) ){ memcpy(pShm->aSnap1, pShm->aSnap2, sizeof(u32)*nInt2); }else{ | | | 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 | nInt2 = pShm->aSnap2[CKPT_HDR_NCKPT]; if( nInt1!=nInt2 || memcmp(pShm->aSnap1, pShm->aSnap2, nInt2*sizeof(u32)) ){ if( ckptChecksumOk(pShm->aSnap1) ){ memcpy(pShm->aSnap2, pShm->aSnap1, sizeof(u32)*nInt1); }else if( ckptChecksumOk(pShm->aSnap2) ){ memcpy(pShm->aSnap1, pShm->aSnap2, sizeof(u32)*nInt2); }else{ return LSM_PROTOCOL_BKPT; } } rc = lsmCheckpointDeserialize(pDb, 1, pShm->aSnap1, &pDb->pWorker); if( pDb->pWorker ) pDb->pWorker->pDatabase = pDb->pDatabase; if( rc==LSM_OK ){ |
︙ | ︙ |
Changes to src/lsm_file.c.
︙ | ︙ | |||
299 300 301 302 303 304 305 | ** lsmEnvSync() ** lsmEnvSectorSize() ** lsmEnvClose() ** lsmEnvTruncate() ** lsmEnvUnlink() ** lsmEnvRemap() */ | | | | 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 | ** lsmEnvSync() ** lsmEnvSectorSize() ** lsmEnvClose() ** lsmEnvTruncate() ** lsmEnvUnlink() ** lsmEnvRemap() */ int lsmEnvOpen(lsm_env *pEnv, const char *zFile, int flags, lsm_file **ppNew){ return pEnv->xOpen(pEnv, zFile, flags, ppNew); } static int lsmEnvRead( lsm_env *pEnv, lsm_file *pFile, lsm_i64 iOff, void *pRead, int nRead |
︙ | ︙ | |||
455 456 457 458 459 460 461 462 463 464 465 466 | /* ** This is a helper function for lsmFsOpen(). It opens a single file on ** disk (either the database or log file). */ static lsm_file *fsOpenFile( FileSystem *pFS, /* File system object */ int bLog, /* True for log, false for db */ int *pRc /* IN/OUT: Error code */ ){ lsm_file *pFile = 0; if( *pRc==LSM_OK ){ | > > > > | | > > | > > > > > > > > > > > > > > > > > > > > > > > > | > > > > | 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 | /* ** This is a helper function for lsmFsOpen(). It opens a single file on ** disk (either the database or log file). */ static lsm_file *fsOpenFile( FileSystem *pFS, /* File system object */ int bReadonly, /* True to open this file read-only */ int bLog, /* True for log, false for db */ int *pRc /* IN/OUT: Error code */ ){ lsm_file *pFile = 0; if( *pRc==LSM_OK ){ int flags = (bReadonly ? LSM_OPEN_READONLY : 0); const char *zPath = (bLog ? pFS->zLog : pFS->zDb); *pRc = lsmEnvOpen(pFS->pEnv, zPath, flags, &pFile); } return pFile; } /* ** If it is not already open, this function opens the log file. It returns ** LSM_OK if successful (or if the log file was already open) or an LSM ** error code otherwise. ** ** The log file must be opened before any of the following may be called: ** ** lsmFsWriteLog ** lsmFsSyncLog ** lsmFsReadLog */ int lsmFsOpenLog(lsm_db *db, int *pbOpen){ int rc = LSM_OK; FileSystem *pFS = db->pFS; if( 0==pFS->fdLog ){ pFS->fdLog = fsOpenFile(pFS, db->bReadonly, 1, &rc); if( rc==LSM_IOERR_NOENT && db->bReadonly ){ rc = LSM_OK; } } if( pbOpen ) *pbOpen = (pFS->fdLog!=0); return rc; } void lsmFsCloseLog(lsm_db *db){ FileSystem *pFS = db->pFS; if( pFS->fdLog ){ lsmEnvClose(pFS->pEnv, pFS->fdLog); pFS->fdLog = 0; } } /* ** Open a connection to a database stored within the file-system (the ** "system of files"). ** ** If parameter bReadonly is true, then open a read-only file-descriptor ** on the database file. It is possible that bReadonly will be false even ** if the user requested that pDb be opened read-only. This is because the ** file-descriptor may later on be recycled by a read-write connection. ** If the db file can be opened for read-write access, it always is. Parameter ** bReadonly is only ever true if it has already been determined that the ** db can only be opened for read-only access. */ int lsmFsOpen( lsm_db *pDb, /* Database connection to open fd for */ const char *zDb, /* Full path to database file */ int bReadonly /* True to open db file read-only */ ){ FileSystem *pFS; int rc = LSM_OK; int nDb = strlen(zDb); int nByte; assert( pDb->pFS==0 ); assert( pDb->pWorker==0 && pDb->pClient==0 ); |
︙ | ︙ | |||
527 528 529 530 531 532 533 | if( pLsmFile ){ pFS->pLsmFile = pLsmFile; pFS->fdDb = pLsmFile->pFile; memset(pLsmFile, 0, sizeof(LsmFile)); }else{ pFS->pLsmFile = lsmMallocZeroRc(pDb->pEnv, sizeof(LsmFile), &rc); if( rc==LSM_OK ){ | | | 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 | if( pLsmFile ){ pFS->pLsmFile = pLsmFile; pFS->fdDb = pLsmFile->pFile; memset(pLsmFile, 0, sizeof(LsmFile)); }else{ pFS->pLsmFile = lsmMallocZeroRc(pDb->pEnv, sizeof(LsmFile), &rc); if( rc==LSM_OK ){ pFS->fdDb = fsOpenFile(pFS, bReadonly, 0, &rc); } } if( rc!=LSM_OK ){ lsmFsClose(pFS); pFS = 0; }else{ |
︙ | ︙ |
Changes to src/lsm_log.c.
︙ | ︙ | |||
356 357 358 359 360 361 362 | LogWriter *pNew; LogRegion *aReg; if( pDb->bUseLog==0 ) return LSM_OK; /* If the log file has not yet been opened, open it now. Also allocate ** the LogWriter structure, if it has not already been allocated. */ | | | 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 | LogWriter *pNew; LogRegion *aReg; if( pDb->bUseLog==0 ) return LSM_OK; /* If the log file has not yet been opened, open it now. Also allocate ** the LogWriter structure, if it has not already been allocated. */ rc = lsmFsOpenLog(pDb, 0); if( pDb->pLogWriter==0 ){ pNew = lsmMallocZeroRc(pDb->pEnv, sizeof(LogWriter), &rc); if( pNew ){ lsmStringInit(&pNew->buf, pDb->pEnv); rc = lsmStringExtend(&pNew->buf, 2); } }else{ |
︙ | ︙ | |||
954 955 956 957 958 959 960 961 | LsmString buf2; /* Value buffer */ LogReader reader; /* Log reader object */ int rc = LSM_OK; /* Return code */ int nCommit = 0; /* Number of transactions to recover */ int iPass; int nJump = 0; /* Number of LSM_LOG_JUMP records in pass 0 */ DbLog *pLog; | > | > | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > > > > > | 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 | LsmString buf2; /* Value buffer */ LogReader reader; /* Log reader object */ int rc = LSM_OK; /* Return code */ int nCommit = 0; /* Number of transactions to recover */ int iPass; int nJump = 0; /* Number of LSM_LOG_JUMP records in pass 0 */ DbLog *pLog; int bOpen; rc = lsmFsOpenLog(pDb, &bOpen); if( rc!=LSM_OK ) return rc; rc = lsmTreeInit(pDb); if( rc!=LSM_OK ) return rc; pLog = &pDb->treehdr.log; lsmCheckpointLogoffset(pDb->pShmhdr->aSnap2, pLog); logReaderInit(pDb, pLog, 1, &reader); lsmStringInit(&buf1, pDb->pEnv); lsmStringInit(&buf2, pDb->pEnv); /* The outer for() loop runs at most twice. The first iteration is to ** count the number of committed transactions in the log. The second ** iterates through those transactions and updates the in-memory tree ** structure with their contents. */ if( bOpen ){ for(iPass=0; iPass<2 && rc==LSM_OK; iPass++){ int bEof = 0; while( rc==LSM_OK && !bEof ){ u8 eType = 0; logReaderByte(&reader, &eType, &rc); switch( eType ){ case LSM_LOG_PAD1: break; case LSM_LOG_PAD2: { int nPad; logReaderVarint(&reader, &buf1, &nPad, &rc); logReaderBlob(&reader, &buf1, nPad, 0, &rc); break; } case LSM_LOG_WRITE: case LSM_LOG_WRITE_CKSUM: { int nKey; int nVal; u8 *aVal; logReaderVarint(&reader, &buf1, &nKey, &rc); logReaderVarint(&reader, &buf2, &nVal, &rc); if( eType==LSM_LOG_WRITE_CKSUM ){ logReaderCksum(&reader, &buf1, &bEof, &rc); }else{ bEof = logRequireCksum(&reader, nKey+nVal); } if( bEof ) break; logReaderBlob(&reader, &buf1, nKey, 0, &rc); logReaderBlob(&reader, &buf2, nVal, &aVal, &rc); if( iPass==1 && rc==LSM_OK ){ rc = lsmTreeInsert(pDb, (u8 *)buf1.z, nKey, aVal, nVal); } break; } case LSM_LOG_DELETE: case LSM_LOG_DELETE_CKSUM: { int nKey; u8 *aKey; logReaderVarint(&reader, &buf1, &nKey, &rc); if( eType==LSM_LOG_DELETE_CKSUM ){ logReaderCksum(&reader, &buf1, &bEof, &rc); }else{ bEof = logRequireCksum(&reader, nKey); } if( bEof ) break; logReaderBlob(&reader, &buf1, nKey, &aKey, &rc); if( iPass==1 && rc==LSM_OK ){ rc = lsmTreeInsert(pDb, aKey, nKey, NULL, -1); } break; } case LSM_LOG_COMMIT: logReaderCksum(&reader, &buf1, &bEof, &rc); if( bEof==0 ){ nCommit++; assert( nCommit>0 || iPass==1 ); if( nCommit==0 ) bEof = 1; } break; case LSM_LOG_JUMP: { int iOff = 0; logReaderVarint(&reader, &buf1, &iOff, &rc); if( rc==LSM_OK ){ if( iPass==1 ){ if( pLog->aRegion[2].iStart==0 ){ assert( pLog->aRegion[1].iStart==0 ); pLog->aRegion[1].iEnd = reader.iOff; }else{ assert( pLog->aRegion[0].iStart==0 ); pLog->aRegion[0].iStart = pLog->aRegion[2].iStart; pLog->aRegion[0].iEnd = reader.iOff - reader.buf.n+reader.iBuf; } pLog->aRegion[2].iStart = iOff; }else{ if( (nJump++)==2 ){ bEof = 1; } } reader.iOff = iOff; reader.buf.n = reader.iBuf; } break; } default: /* Including LSM_LOG_EOF */ bEof = 1; break; } } if( rc==LSM_OK && iPass==0 ){ if( nCommit==0 ){ if( pLog->aRegion[2].iStart==0 ){ iPass = 1; }else{ pLog->aRegion[2].iStart = 0; iPass = -1; lsmCheckpointZeroLogoffset(pDb); } } logReaderInit(pDb, pLog, 0, &reader); nCommit = nCommit * -1; } } } /* Initialize DbLog object */ if( rc==LSM_OK ){ pLog->aRegion[2].iEnd = reader.iOff - reader.buf.n + reader.iBuf; pLog->cksum0 = reader.cksum0; pLog->cksum1 = reader.cksum1; } if( rc==LSM_OK ){ rc = lsmFinishRecovery(pDb); }else{ lsmFinishRecovery(pDb); } if( pDb->bRoTrans ){ lsmFsCloseLog(pDb); } lsmStringClear(&buf1); lsmStringClear(&buf2); lsmStringClear(&reader.buf); return rc; } |
︙ | ︙ |
Changes to src/lsm_main.c.
︙ | ︙ | |||
35 36 37 38 39 40 41 | */ static void assert_db_state(lsm_db *pDb){ /* If there is at least one cursor or a write transaction open, the database ** handle must be holding a pointer to a client snapshot. And the reverse ** - if there are no open cursors and no write transactions then there must ** not be a client snapshot. */ | > | | | 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 | */ static void assert_db_state(lsm_db *pDb){ /* If there is at least one cursor or a write transaction open, the database ** handle must be holding a pointer to a client snapshot. And the reverse ** - if there are no open cursors and no write transactions then there must ** not be a client snapshot. */ assert( (pDb->pCsr!=0||pDb->nTransOpen>0)==(pDb->iReader>=0||pDb->bRoTrans) ); assert( (pDb->iReader<0 && pDb->bRoTrans==0) || pDb->pClient!=0 ); assert( pDb->nTransOpen>=0 ); } #else # define assert_db_state(x) #endif |
︙ | ︙ | |||
159 160 161 162 163 164 165 | ** than one purpose - to open both the database and log files, and ** perhaps to unlink the log file during disconnection. An absolute ** path is required to ensure that the correct files are operated ** on even if the application changes the cwd. */ rc = getFullpathname(pDb->pEnv, zFilename, &zFull); assert( rc==LSM_OK || zFull==0 ); | | > | | | | > | | | > > > > | 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 | ** than one purpose - to open both the database and log files, and ** perhaps to unlink the log file during disconnection. An absolute ** path is required to ensure that the correct files are operated ** on even if the application changes the cwd. */ rc = getFullpathname(pDb->pEnv, zFilename, &zFull); assert( rc==LSM_OK || zFull==0 ); /* Connect to the database. */ if( rc==LSM_OK ){ rc = lsmDbDatabaseConnect(pDb, zFull); } if( pDb->bReadonly==0 ){ /* Configure the file-system connection with the page-size and block-size ** of this database. Even if the database file is zero bytes in size ** on disk, these values have been set in shared-memory by now, and so ** are guaranteed not to change during the lifetime of this connection. */ if( rc==LSM_OK && LSM_OK==(rc = lsmCheckpointLoad(pDb, 0)) ){ lsmFsSetPageSize(pDb->pFS, lsmCheckpointPgsz(pDb->aSnapshot)); lsmFsSetBlockSize(pDb->pFS, lsmCheckpointBlksz(pDb->aSnapshot)); } } lsmFree(pDb->pEnv, zFull); } assert( pDb->bReadonly==0 || pDb->bReadonly==1 ); assert( rc!=LSM_OK || (pDb->pShmhdr==0)==(pDb->bReadonly==1) ); return rc; } int lsm_close(lsm_db *pDb){ int rc = LSM_OK; if( pDb ){ |
︙ | ︙ | |||
341 342 343 344 345 346 347 348 349 350 351 352 353 354 | ** in multi-process mode. */ *piVal = lsmDbMultiProc(pDb); }else{ pDb->bMultiProc = *piVal = (*piVal!=0); } break; } case LSM_CONFIG_SET_COMPRESSION: { lsm_compress *p = va_arg(ap, lsm_compress *); if( pDb->iReader>=0 && pDb->bInFactory==0 ){ /* May not change compression schemes with an open transaction */ rc = LSM_MISUSE_BKPT; }else{ | > > > > > > > > > > | 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 | ** in multi-process mode. */ *piVal = lsmDbMultiProc(pDb); }else{ pDb->bMultiProc = *piVal = (*piVal!=0); } break; } case LSM_CONFIG_READONLY: { int *piVal = va_arg(ap, int *); /* If lsm_open() has been called, this is a read-only parameter. */ if( pDb->pDatabase==0 && *piVal>=0 ){ pDb->bReadonly = *piVal = (*piVal!=0); } *piVal = pDb->bReadonly; break; } case LSM_CONFIG_SET_COMPRESSION: { lsm_compress *p = va_arg(ap, lsm_compress *); if( pDb->iReader>=0 && pDb->bInFactory==0 ){ /* May not change compression schemes with an open transaction */ rc = LSM_MISUSE_BKPT; }else{ |
︙ | ︙ | |||
718 719 720 721 722 723 724 | */ int lsm_csr_open(lsm_db *pDb, lsm_cursor **ppCsr){ int rc; /* Return code */ MultiCursor *pCsr = 0; /* New cursor object */ /* Open a read transaction if one is not already open. */ assert_db_state(pDb); | > > > > > > | > | 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 | */ int lsm_csr_open(lsm_db *pDb, lsm_cursor **ppCsr){ int rc; /* Return code */ MultiCursor *pCsr = 0; /* New cursor object */ /* Open a read transaction if one is not already open. */ assert_db_state(pDb); if( pDb->pShmhdr==0 ){ assert( pDb->bReadonly ); rc = lsmBeginRoTrans(pDb); }else{ assert( pDb->bRoTrans==0 ); rc = lsmBeginReadTrans(pDb); } /* Allocate the multi-cursor. */ if( rc==LSM_OK ) rc = lsmMCursorNew(pDb, &pCsr); /* If an error has occured, set the output to NULL and delete any partially ** allocated cursor. If this means there are no open cursors, release the ** client snapshot. */ |
︙ | ︙ | |||
821 822 823 824 825 826 827 | va_end(ap2); pDb->xLog(pDb->pLogCtx, rc, s.z); lsmStringClear(&s); } } int lsm_begin(lsm_db *pDb, int iLevel){ | | > < | 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | va_end(ap2); pDb->xLog(pDb->pLogCtx, rc, s.z); lsmStringClear(&s); } } int lsm_begin(lsm_db *pDb, int iLevel){ int rc; assert_db_state( pDb ); rc = (pDb->bReadonly ? LSM_READONLY : LSM_OK); /* A value less than zero means open one more transaction. */ if( iLevel<0 ) iLevel = pDb->nTransOpen + 1; if( iLevel>pDb->nTransOpen ){ int i; /* Extend the pDb->aTrans[] array if required. */ if( rc==LSM_OK && pDb->nTransAlloc<iLevel ){ TransMark *aNew; /* New allocation */ int nByte = sizeof(TransMark) * (iLevel+1); |
︙ | ︙ |
Changes to src/lsm_shared.c.
︙ | ︙ | |||
45 46 47 48 49 50 51 52 53 54 55 56 57 58 | /* Protected by the global mutex (enterGlobalMutex/leaveGlobalMutex): */ char *zName; /* Canonical path to database file */ int nName; /* strlen(zName) */ int nDbRef; /* Number of associated lsm_db handles */ Database *pDbNext; /* Next Database structure in global list */ /* Protected by the local mutex (pClientMutex) */ int bMultiProc; /* True if running in multi-process mode */ lsm_file *pFile; /* Used for locks/shm in multi-proc mode */ LsmFile *pLsmFile; /* List of deferred closes */ lsm_mutex *pClientMutex; /* Protects the apShmChunk[] and pConn */ int nShmChunk; /* Number of entries in apShmChunk[] array */ void **apShmChunk; /* Array of "shared" memory regions */ lsm_db *pConn; /* List of connections to this db. */ | > | 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 | /* Protected by the global mutex (enterGlobalMutex/leaveGlobalMutex): */ char *zName; /* Canonical path to database file */ int nName; /* strlen(zName) */ int nDbRef; /* Number of associated lsm_db handles */ Database *pDbNext; /* Next Database structure in global list */ /* Protected by the local mutex (pClientMutex) */ int bReadonly; /* True if Database.pFile is read-only */ int bMultiProc; /* True if running in multi-process mode */ lsm_file *pFile; /* Used for locks/shm in multi-proc mode */ LsmFile *pLsmFile; /* List of deferred closes */ lsm_mutex *pClientMutex; /* Protects the apShmChunk[] and pConn */ int nShmChunk; /* Number of entries in apShmChunk[] array */ void **apShmChunk; /* Array of "shared" memory regions */ lsm_db *pConn; /* List of connections to this db. */ |
︙ | ︙ | |||
357 358 359 360 361 362 363 364 365 366 367 368 369 370 | break; } } } lsmShmLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK, 0); return rc; } /* ** Return a reference to the shared Database handle for the database ** identified by canonical path zName. If this is the first connection to ** the named database, a new Database object is allocated. Otherwise, a ** pointer to an existing object is returned. ** | > > > > > > > > > > > > | 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 | break; } } } lsmShmLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK, 0); return rc; } static int dbOpenSharedFd(lsm_env *pEnv, Database *p, int bRoOk){ int rc; rc = lsmEnvOpen(pEnv, p->zName, 0, &p->pFile); if( rc==LSM_IOERR && bRoOk ){ rc = lsmEnvOpen(pEnv, p->zName, LSM_OPEN_READONLY, &p->pFile); p->bReadonly = 1; } return rc; } /* ** Return a reference to the shared Database handle for the database ** identified by canonical path zName. If this is the first connection to ** the named database, a new Database object is allocated. Otherwise, a ** pointer to an existing object is returned. ** |
︙ | ︙ | |||
409 410 411 412 413 414 415 | rc = lsmMutexNew(pEnv, &p->pClientMutex); } /* If nothing has gone wrong so far, open the shared fd. And if that ** succeeds and this connection requested single-process mode, ** attempt to take the exclusive lock on DMS2. */ if( rc==LSM_OK ){ | > | > > | 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 | rc = lsmMutexNew(pEnv, &p->pClientMutex); } /* If nothing has gone wrong so far, open the shared fd. And if that ** succeeds and this connection requested single-process mode, ** attempt to take the exclusive lock on DMS2. */ if( rc==LSM_OK ){ int bReadonly = (pDb->bReadonly && pDb->bMultiProc); rc = dbOpenSharedFd(pDb->pEnv, p, bReadonly); } if( rc==LSM_OK && p->bMultiProc==0 ){ assert( p->bReadonly==0 ); rc = lsmEnvLock(pDb->pEnv, p->pFile, LSM_LOCK_DMS2, LSM_LOCK_EXCL); } if( rc==LSM_OK ){ p->pDbNext = gShared.pDatabase; gShared.pDatabase = p; }else{ |
︙ | ︙ | |||
440 441 442 443 444 445 446 | lsmMutexLeave(pDb->pEnv, p->pClientMutex); } } pDb->pDatabase = p; if( rc==LSM_OK ){ assert( p ); | | > > > > > > | | | | | > | 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 | lsmMutexLeave(pDb->pEnv, p->pClientMutex); } } pDb->pDatabase = p; if( rc==LSM_OK ){ assert( p ); rc = lsmFsOpen(pDb, zName, p->bReadonly); } /* If the db handle is read-write, then connect to the system now. Run ** recovery as necessary. Or, if this is a read-only database handle, ** defer attempting to connect to the system until a read-transaction ** is opened. */ if( pDb->bReadonly==0 ){ if( rc==LSM_OK ){ rc = doDbConnect(pDb); } if( rc==LSM_OK ){ rc = lsmFsConfigure(pDb); } } return rc; } static void dbDeferClose(lsm_db *pDb){ if( pDb->pFS ){ |
︙ | ︙ | |||
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 | if( p ){ lsmSortedFreeLevel(pEnv, p->pLevel); lsmFree(pEnv, p->freelist.aEntry); lsmFree(pEnv, p->redirect.a); lsmFree(pEnv, p); } } /* ** Argument bFlush is true if the contents of the in-memory tree has just ** been flushed to disk. The significance of this is that once the snapshot ** created to hold the updated state of the database is synced to disk, log ** file space can be recycled. */ void lsmFinishWork(lsm_db *pDb, int bFlush, int *pRc){ int rc = *pRc; assert( rc!=0 || pDb->pWorker ); if( pDb->pWorker ){ /* If no error has occurred, serialize the worker snapshot and write ** it to shared memory. */ if( rc==LSM_OK ){ rc = lsmSaveWorker(pDb, bFlush); } /* Assuming no error has occurred, update a read lock slot with the | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | | 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 | if( p ){ lsmSortedFreeLevel(pEnv, p->pLevel); lsmFree(pEnv, p->freelist.aEntry); lsmFree(pEnv, p->redirect.a); lsmFree(pEnv, p); } } /* ** Attempt to populate one of the read-lock slots to contain lock values ** iLsm/iShm. Or, if such a slot exists already, this function is a no-op. ** ** It is not an error if no slot can be populated because the write-lock ** cannot be obtained. If any other error occurs, return an LSM error code. ** Otherwise, LSM_OK. ** ** This function is called at various points to try to ensure that there ** always exists at least one read-lock slot that can be used by a read-only ** client. And so that, in the usual case, there is an "exact match" available ** whenever a read transaction is opened by any client. At present this ** function is called when: ** ** * A write transaction that called lsmTreeDiscardOld() is committed, and ** * Whenever the working snapshot is updated (i.e. lsmFinishWork()). */ static int dbSetReadLock(lsm_db *db, i64 iLsm, u32 iShm){ int rc = LSM_OK; ShmHeader *pShm = db->pShmhdr; int i; /* Check if there is already a slot containing the required values. */ for(i=0; i<LSM_LOCK_NREADER; i++){ ShmReader *p = &pShm->aReader[i]; if( p->iLsmId==iLsm && p->iTreeId==iShm ) return LSM_OK; } /* Iterate through all read-lock slots, attempting to take a write-lock ** on each of them. If a write-lock succeeds, populate the locked slot ** with the required values and break out of the loop. */ for(i=0; rc==LSM_OK && i<LSM_LOCK_NREADER; i++){ rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_EXCL, 0); if( rc==LSM_BUSY ){ rc = LSM_OK; }else{ ShmReader *p = &pShm->aReader[i]; p->iLsmId = iLsm; p->iTreeId = iShm; lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_UNLOCK, 0); break; } } return rc; } /* ** Argument bFlush is true if the contents of the in-memory tree has just ** been flushed to disk. The significance of this is that once the snapshot ** created to hold the updated state of the database is synced to disk, log ** file space can be recycled. */ void lsmFinishWork(lsm_db *pDb, int bFlush, int *pRc){ int rc = *pRc; assert( rc!=0 || pDb->pWorker ); if( pDb->pWorker ){ /* If no error has occurred, serialize the worker snapshot and write ** it to shared memory. */ if( rc==LSM_OK ){ rc = lsmSaveWorker(pDb, bFlush); } /* Assuming no error has occurred, update a read lock slot with the ** new snapshot id (see comments above function dbSetReadLock()). */ if( rc==LSM_OK ){ if( pDb->iReader<0 ){ rc = lsmTreeLoadHeader(pDb, 0); } if( rc==LSM_OK ){ rc = dbSetReadLock(pDb, pDb->pWorker->iId, pDb->treehdr.iUsedShmid); } } /* Free the snapshot object. */ lsmFreeSnapshot(pDb->pEnv, pDb->pWorker); pDb->pWorker = 0; } |
︙ | ︙ | |||
985 986 987 988 989 990 991 992 993 994 995 996 | /* ** Begin a read transaction. This function is a no-op if the connection ** passed as the only argument already has an open read transaction. */ int lsmBeginReadTrans(lsm_db *pDb){ const int MAX_READLOCK_ATTEMPTS = 10; int rc = LSM_OK; /* Return code */ int iAttempt = 0; assert( pDb->pWorker==0 ); | > > > | | 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 | /* ** Begin a read transaction. This function is a no-op if the connection ** passed as the only argument already has an open read transaction. */ int lsmBeginReadTrans(lsm_db *pDb){ const int MAX_READLOCK_ATTEMPTS = 10; const int nMaxAttempt = (pDb->bRoTrans ? 1 : MAX_READLOCK_ATTEMPTS); int rc = LSM_OK; /* Return code */ int iAttempt = 0; assert( pDb->pWorker==0 ); while( rc==LSM_OK && pDb->iReader<0 && (iAttempt++)<nMaxAttempt ){ int iTreehdr = 0; int iSnap = 0; assert( pDb->pCsr==0 && pDb->nTransOpen==0 ); /* Load the in-memory tree header. */ rc = lsmTreeLoadHeader(pDb, &iTreehdr); |
︙ | ︙ | |||
1031 1032 1033 1034 1035 1036 1037 | ** checkpoint just loaded. TODO: This will be removed after ** lsm_sorted.c is changed to work directly from the serialized ** version of the snapshot. */ if( pDb->pClient==0 ){ rc = lsmCheckpointDeserialize(pDb, 0, pDb->aSnapshot,&pDb->pClient); } assert( (rc==LSM_OK)==(pDb->pClient!=0) ); | | | 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 | ** checkpoint just loaded. TODO: This will be removed after ** lsm_sorted.c is changed to work directly from the serialized ** version of the snapshot. */ if( pDb->pClient==0 ){ rc = lsmCheckpointDeserialize(pDb, 0, pDb->aSnapshot,&pDb->pClient); } assert( (rc==LSM_OK)==(pDb->pClient!=0) ); assert( pDb->iReader>=0 || pDb->bRoTrans ); /* Check that the client has the right compression hooks loaded. ** If not, set rc to LSM_MISMATCH. */ if( rc==LSM_OK ){ rc = lsmCheckCompressionId(pDb, pDb->pClient->iCmpId); } }else{ |
︙ | ︙ | |||
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 | } if( rc!=LSM_OK ){ lsmReleaseReadlock(pDb); } if( pDb->pClient==0 && rc==LSM_OK ) rc = LSM_BUSY; return rc; } /* ** Close the currently open read transaction. */ void lsmFinishReadTrans(lsm_db *pDb){ /* Worker connections should not be closing read transactions. And ** read transactions should only be closed after all cursors and write ** transactions have been closed. Finally pClient should be non-NULL ** only iff pDb->iReader>=0. */ assert( pDb->pWorker==0 ); assert( pDb->pCsr==0 && pDb->nTransOpen==0 ); | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | > | < | > > | > > | < < < < | < < | 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 | } if( rc!=LSM_OK ){ lsmReleaseReadlock(pDb); } if( pDb->pClient==0 && rc==LSM_OK ) rc = LSM_BUSY; return rc; } /* ** db is a read-only database handle in the disconnected state. This function ** attempts to open a read-transaction on the database. This may involve ** connecting to the database system (opening shared memory etc.). */ int lsmBeginRoTrans(lsm_db *db){ int rc = LSM_OK; assert( db->bReadonly && db->pShmhdr==0 ); assert( db->iReader<0 ); if( db->bRoTrans==0 ){ if( 1 ){ rc = lsmShmLock(db, LSM_LOCK_CHECKPOINTER, LSM_LOCK_SHARED, 0); if( rc==LSM_OK ){ db->bRoTrans = 1; rc = lsmShmCacheChunks(db, 1); if( rc==LSM_OK ){ db->pShmhdr = (ShmHeader *)db->apShm[0]; memset(db->pShmhdr, 0, sizeof(ShmHeader)); rc = lsmCheckpointRecover(db); if( rc==LSM_OK ){ rc = lsmLogRecover(db); } } } }else{ /* lock(DMS2, SHARED) etc. */ } if( rc==LSM_OK ){ rc = lsmBeginReadTrans(db); } } return rc; } /* ** Close the currently open read transaction. */ void lsmFinishReadTrans(lsm_db *pDb){ /* Worker connections should not be closing read transactions. And ** read transactions should only be closed after all cursors and write ** transactions have been closed. Finally pClient should be non-NULL ** only iff pDb->iReader>=0. */ assert( pDb->pWorker==0 ); assert( pDb->pCsr==0 && pDb->nTransOpen==0 ); lsmReleaseReadlock(pDb); if( pDb->bRoTrans ){ int i; for(i=0; i<pDb->nShm; i++){ lsmFree(pDb->pEnv, pDb->apShm[i]); } lsmFree(pDb->pEnv, pDb->apShm); pDb->apShm = 0; pDb->nShm = 0; pDb->pShmhdr = 0; lsmShmLock(pDb, LSM_LOCK_CHECKPOINTER, LSM_LOCK_UNLOCK, 0); pDb->bRoTrans = 0; } } /* ** Open a write transaction. */ int lsmBeginWriteTrans(lsm_db *pDb){ int rc; /* Return code */ |
︙ | ︙ | |||
1179 1180 1181 1182 1183 1184 1185 | } lsmTreeEndTransaction(pDb, bCommit); if( rc==LSM_OK ){ if( bFlush && pDb->bAutowork ){ rc = lsmSortedAutoWork(pDb, 1); }else if( bCommit && pDb->bDiscardOld ){ | | | 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 | } lsmTreeEndTransaction(pDb, bCommit); if( rc==LSM_OK ){ if( bFlush && pDb->bAutowork ){ rc = lsmSortedAutoWork(pDb, 1); }else if( bCommit && pDb->bDiscardOld ){ rc = dbSetReadLock(pDb, pDb->pClient->iId, pDb->treehdr.iUsedShmid); } } pDb->bDiscardOld = 0; lsmShmLock(pDb, LSM_LOCK_WRITER, LSM_LOCK_UNLOCK, 0); if( bFlush && pDb->bAutowork==0 && pDb->xWork ){ pDb->xWork(pDb, pDb->pWorkCtx); |
︙ | ︙ | |||
1209 1210 1211 1212 1213 1214 1215 | return( p->iLsmId && p->iLsmId<=iLsm && shm_sequence_ge(iShmMax, p->iTreeId) && shm_sequence_ge(p->iTreeId, iShmMin) ); } | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < > > > | 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 | return( p->iLsmId && p->iLsmId<=iLsm && shm_sequence_ge(iShmMax, p->iTreeId) && shm_sequence_ge(p->iTreeId, iShmMin) ); } /* ** Obtain a read-lock on database version identified by the combination ** of snapshot iLsm and tree iTree. Return LSM_OK if successful, or ** an LSM error code otherwise. */ int lsmReadlock(lsm_db *db, i64 iLsm, u32 iShmMin, u32 iShmMax){ int rc = LSM_OK; ShmHeader *pShm = db->pShmhdr; int i; assert( db->iReader<0 ); assert( shm_sequence_ge(iShmMax, iShmMin) ); /* This is a no-op if the read-only transaction flag is set. */ if( db->bRoTrans ) return LSM_OK; /* Search for an exact match. */ for(i=0; db->iReader<0 && rc==LSM_OK && i<LSM_LOCK_NREADER; i++){ ShmReader *p = &pShm->aReader[i]; if( p->iLsmId==iLsm && p->iTreeId==iShmMax ){ rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0); if( rc==LSM_OK && p->iLsmId==iLsm && p->iTreeId==iShmMax ){ |
︙ | ︙ | |||
1466 1467 1468 1469 1470 1471 1472 | void **apShm; nAlloc += NINCR; apShm = lsmRealloc(pEnv, db->apShm, sizeof(void*)*nAlloc); if( !apShm ) return LSM_NOMEM_BKPT; db->apShm = apShm; } | > > > > > > > > | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | > | 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 | void **apShm; nAlloc += NINCR; apShm = lsmRealloc(pEnv, db->apShm, sizeof(void*)*nAlloc); if( !apShm ) return LSM_NOMEM_BKPT; db->apShm = apShm; } if( db->bRoTrans ){ for(i=db->nShm; rc==LSM_OK && i<nChunk; i++){ db->apShm[i] = lsmMallocZeroRc(pEnv, LSM_SHM_CHUNK_SIZE, &rc); db->nShm++; } }else{ /* Enter the client mutex */ lsmMutexEnter(pEnv, p->pClientMutex); /* Extend the Database objects apShmChunk[] array if necessary. Using the ** same pattern as for the lsm_db.apShm[] array above. */ nAlloc = ((p->nShmChunk + NINCR - 1) / NINCR) * NINCR; while( nChunk>=nAlloc ){ void **apShm; nAlloc += NINCR; apShm = lsmRealloc(pEnv, p->apShmChunk, sizeof(void*)*nAlloc); if( !apShm ){ rc = LSM_NOMEM_BKPT; break; } p->apShmChunk = apShm; } for(i=db->nShm; rc==LSM_OK && i<nChunk; i++){ if( i>=p->nShmChunk ){ void *pChunk = 0; if( p->bMultiProc==0 ){ /* Single process mode */ pChunk = lsmMallocZeroRc(pEnv, LSM_SHM_CHUNK_SIZE, &rc); }else{ /* Multi-process mode */ rc = lsmEnvShmMap(pEnv, p->pFile, i, LSM_SHM_CHUNK_SIZE, &pChunk); } if( rc==LSM_OK ){ p->apShmChunk[i] = pChunk; p->nShmChunk++; } } if( rc==LSM_OK ){ db->apShm[i] = p->apShmChunk[i]; db->nShm++; } } /* Release the client mutex */ lsmMutexLeave(pEnv, p->pClientMutex); } } return rc; } static int lockSharedFile(lsm_env *pEnv, Database *p, int iLock, int eOp){ int rc = LSM_OK; |
︙ | ︙ | |||
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 | ){ lsm_db *pIter; const u64 me = ((u64)1 << (iLock-1)); const u64 ms = ((u64)1 << (iLock+32-1)); int rc = LSM_OK; Database *p = db->pDatabase; assert( iLock>=1 && iLock<=LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT-1) ); assert( LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT-1)<=32 ); assert( eOp==LSM_LOCK_UNLOCK || eOp==LSM_LOCK_SHARED || eOp==LSM_LOCK_EXCL ); /* Check for a no-op. Proceed only if this is not one of those. */ if( (eOp==LSM_LOCK_UNLOCK && (db->mLock & (me|ms))!=0) || (eOp==LSM_LOCK_SHARED && (db->mLock & (me|ms))!=ms) | > | 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 | ){ lsm_db *pIter; const u64 me = ((u64)1 << (iLock-1)); const u64 ms = ((u64)1 << (iLock+32-1)); int rc = LSM_OK; Database *p = db->pDatabase; assert( eOp!=LSM_LOCK_EXCL || db->bReadonly==0 ); assert( iLock>=1 && iLock<=LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT-1) ); assert( LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT-1)<=32 ); assert( eOp==LSM_LOCK_UNLOCK || eOp==LSM_LOCK_SHARED || eOp==LSM_LOCK_EXCL ); /* Check for a no-op. Proceed only if this is not one of those. */ if( (eOp==LSM_LOCK_UNLOCK && (db->mLock & (me|ms))!=0) || (eOp==LSM_LOCK_SHARED && (db->mLock & (me|ms))!=ms) |
︙ | ︙ |
Changes to src/lsm_tree.c.
︙ | ︙ | |||
2353 2354 2355 2356 2357 2358 2359 | if( treeHeaderChecksumOk(&pDb->treehdr) ){ if( piRead ) *piRead = 2; return LSM_OK; } lsmShmBarrier(pDb); } | | | 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 | if( treeHeaderChecksumOk(&pDb->treehdr) ){ if( piRead ) *piRead = 2; return LSM_OK; } lsmShmBarrier(pDb); } return LSM_PROTOCOL_BKPT; } int lsmTreeLoadHeaderOk(lsm_db *pDb, int iRead){ TreeHeader *p = (iRead==1) ? &pDb->pShmhdr->hdr1 : &pDb->pShmhdr->hdr2; assert( iRead==1 || iRead==2 ); return (0==memcmp(pDb->treehdr.aCksum, p->aCksum, sizeof(u32)*2)); } |
︙ | ︙ |
Changes to src/lsm_unix.c.
︙ | ︙ | |||
54 55 56 57 58 59 60 | int shmfd; /* Shared memory file-descriptor */ void *pMap; /* Pointer to mapping of file fd */ off_t nMap; /* Size of mapping at pMap in bytes */ int nShm; /* Number of entries in array apShm[] */ void **apShm; /* Array of 32K shared memory segments */ }; | < < | > > > | > | > > > | | | | | | | 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | int shmfd; /* Shared memory file-descriptor */ void *pMap; /* Pointer to mapping of file fd */ off_t nMap; /* Size of mapping at pMap in bytes */ int nShm; /* Number of entries in array apShm[] */ void **apShm; /* Array of 32K shared memory segments */ }; static char *posixShmFile(PosixFile *p){ char *zShm; int nName = strlen(p->zName); zShm = (char *)lsmMalloc(p->pEnv, nName+4+1); if( zShm ){ memcpy(zShm, p->zName, nName); memcpy(&zShm[nName], "-shm", 5); } return zShm; } static int lsmPosixOsOpen( lsm_env *pEnv, const char *zFile, int flags, lsm_file **ppFile ){ int rc = LSM_OK; PosixFile *p; p = lsm_malloc(pEnv, sizeof(PosixFile)); if( p==0 ){ rc = LSM_NOMEM; }else{ int bReadonly = (flags & LSM_OPEN_READONLY); int oflags = (bReadonly ? O_RDONLY : (O_RDWR|O_CREAT)); memset(p, 0, sizeof(PosixFile)); p->zName = zFile; p->pEnv = pEnv; p->fd = open(zFile, oflags, 0644); if( p->fd<0 ){ lsm_free(pEnv, p); p = 0; if( errno==ENOENT ){ rc = lsmErrorBkpt(LSM_IOERR_NOENT); }else{ rc = LSM_IOERR_BKPT; } } } *ppFile = (lsm_file *)p; return rc; } static int lsmPosixOsWrite( lsm_file *pFile, /* File to write to */ lsm_i64 iOff, /* Offset to write to */ void *pData, /* Write data from this buffer */ int nData /* Bytes of data to write */ ){ int rc = LSM_OK; PosixFile *p = (PosixFile *)pFile; off_t offset; offset = lseek(p->fd, (off_t)iOff, SEEK_SET); if( offset!=iOff ){ rc = LSM_IOERR_BKPT; }else{ ssize_t prc = write(p->fd, pData, (size_t)nData); if( prc<0 ) rc = LSM_IOERR_BKPT; } return rc; } static int lsmPosixOsTruncate( lsm_file *pFile, /* File to write to */ lsm_i64 nSize /* Size to truncate file to */ ){ PosixFile *p = (PosixFile *)pFile; int rc = LSM_OK; /* Return code */ int prc; /* Posix Return Code */ struct stat sStat; /* Result of fstat() invocation */ prc = fstat(p->fd, &sStat); if( prc==0 && sStat.st_size>nSize ){ prc = ftruncate(p->fd, (off_t)nSize); } if( prc<0 ) rc = LSM_IOERR_BKPT; return rc; } static int lsmPosixOsRead( lsm_file *pFile, /* File to read from */ lsm_i64 iOff, /* Offset to read from */ void *pData, /* Read data into this buffer */ int nData /* Bytes of data to read */ ){ int rc = LSM_OK; PosixFile *p = (PosixFile *)pFile; off_t offset; offset = lseek(p->fd, (off_t)iOff, SEEK_SET); if( offset!=iOff ){ rc = LSM_IOERR_BKPT; }else{ ssize_t prc = read(p->fd, pData, (size_t)nData); if( prc<0 ){ rc = LSM_IOERR_BKPT; }else if( prc<nData ){ memset(&((u8 *)pData)[prc], 0, nData - prc); } } return rc; } static int lsmPosixOsSync(lsm_file *pFile){ int rc = LSM_OK; #ifndef LSM_NO_SYNC PosixFile *p = (PosixFile *)pFile; int prc = 0; if( p->pMap ){ prc = msync(p->pMap, p->nMap, MS_SYNC); } if( prc==0 ) prc = fdatasync(p->fd); if( prc<0 ) rc = LSM_IOERR_BKPT; #else (void)pFile; #endif return rc; } |
︙ | ︙ | |||
315 316 317 318 319 320 321 | lock.l_start = (4096-iLock); if( fcntl(p->fd, F_SETLK, &lock) ){ int e = errno; if( e==EACCES || e==EAGAIN ){ rc = LSM_BUSY; }else{ | | | 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 | lock.l_start = (4096-iLock); if( fcntl(p->fd, F_SETLK, &lock) ){ int e = errno; if( e==EACCES || e==EAGAIN ){ rc = LSM_BUSY; }else{ rc = LSM_IOERR_BKPT; } } return rc; } int lsmPosixOsShmMap(lsm_file *pFile, int iChunk, int sz, void **ppShm){ |
︙ | ︙ | |||
369 370 371 372 373 374 375 | p->nShm = nNew; } if( p->apShm[iChunk]==0 ){ p->apShm[iChunk] = mmap(0, LSM_SHM_CHUNK_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, p->shmfd, iChunk*LSM_SHM_CHUNK_SIZE ); | | | 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 | p->nShm = nNew; } if( p->apShm[iChunk]==0 ){ p->apShm[iChunk] = mmap(0, LSM_SHM_CHUNK_SIZE, PROT_READ|PROT_WRITE, MAP_SHARED, p->shmfd, iChunk*LSM_SHM_CHUNK_SIZE ); if( p->apShm[iChunk]==0 ) return LSM_IOERR_BKPT; } *ppShm = p->apShm[iChunk]; return LSM_OK; } void lsmPosixOsShmBarrier(void){ |
︙ | ︙ |
Changes to test/lsm5.test.
︙ | ︙ | |||
12 13 14 15 16 17 18 19 20 21 22 23 24 25 | # The focus of this file is testing the LSM library. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix lsm5 db close #------------------------------------------------------------------------- # When the database system is shut down (i.e. when the last connection # disconnects), an attempt is made to truncate the database file to the # minimum number of blocks required. # # This test case checks that this process does not actually cause the | > > > > > > > > > > > > > > > > > > > | 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | # The focus of this file is testing the LSM library. # set testdir [file dirname $argv0] source $testdir/tester.tcl set testprefix lsm5 db close proc db_fetch {db key} { db csr_open csr csr seek $key eq set ret [csr value] csr close set ret } # Create a new database with file name $file. # proc create_abc_db {file} { forcedelete $file lsm_open db $file db write a alpha db write b bravo db write c charlie db close } #------------------------------------------------------------------------- # When the database system is shut down (i.e. when the last connection # disconnects), an attempt is made to truncate the database file to the # minimum number of blocks required. # # This test case checks that this process does not actually cause the |
︙ | ︙ | |||
33 34 35 36 37 38 39 40 41 42 | db write 1 one db write 2 two db close } {} do_test 1.3 { expr [file size test.db] < (64*1024) } 1 finish_test | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | db write 1 one db write 2 two db close } {} do_test 1.3 { expr [file size test.db] < (64*1024) } 1 #------------------------------------------------------------------------- # Test that if an attempt is made to open a read-write connection to a # database that the client does not have permission to write to is attempted # an error is reported. In order to open a read-write connection to a # database, the client requires: # # * read-write access to the db file, # * read-write access to the log file, # * for multi-process mode, read-write access to the shm file. # # In the above, "read-write access" includes the ability to create the db, # log or shm file if it does not exist. # # These tests verify that the lsm_open() command returns LSM_IOERR. At some # point in the future this will be improved. Likely when sqlite4 level tests # for opening read-only databases are added. # foreach {tn filename setup} { 1 test.dir/test.db { # Create a directory "test.dir". forcedelete test.dir file mkdir test.dir # Create a database within test.dir create_abc_db test.dir/test.db # Now make the db and its directory read-only. file attr test.dir/test.db -perm r--r--r-- file attr test.dir -perm r-xr-xr-x } 2 test.db { # Create a database test.db and set its permissions to read-only create_abc_db test.db file attr test.db -perm r--r--r-- } 3 test.dir/test.db { # Create a directory "test.dir". forcedelete test.dir file mkdir test.dir # Create a database within test.dir create_abc_db test.dir/test.db # Now make test.dir read-only. file attr test.dir -perm r-xr-xr-x } } { do_test 2.$tn.1 { eval $setup set rc [catch {lsm_open db $filename} msg] list $rc $msg } {1 {error in lsm_open() - 10}} do_test 2.$tn.2 { eval $setup lsm_open db $filename {readonly 1} set res [list [db_fetch db a] [db_fetch db b] [db_fetch db c]] db close set res } {alpha bravo charlie} } finish_test |
Changes to test/test_lsm.c.
︙ | ︙ | |||
494 495 496 497 498 499 500 501 502 503 504 505 506 507 | { "mmap", LSM_CONFIG_MMAP }, { "use_log", LSM_CONFIG_USE_LOG }, { "automerge", LSM_CONFIG_AUTOMERGE }, { "max_freelist", LSM_CONFIG_MAX_FREELIST }, { "multi_proc", LSM_CONFIG_MULTIPLE_PROCESSES }, { "set_compression", LSM_CONFIG_SET_COMPRESSION }, { "set_compression_factory", LSM_CONFIG_SET_COMPRESSION_FACTORY }, { 0, 0 } }; int nElem; int i; Tcl_Obj **apElem; int rc; | > | 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 | { "mmap", LSM_CONFIG_MMAP }, { "use_log", LSM_CONFIG_USE_LOG }, { "automerge", LSM_CONFIG_AUTOMERGE }, { "max_freelist", LSM_CONFIG_MAX_FREELIST }, { "multi_proc", LSM_CONFIG_MULTIPLE_PROCESSES }, { "set_compression", LSM_CONFIG_SET_COMPRESSION }, { "set_compression_factory", LSM_CONFIG_SET_COMPRESSION_FACTORY }, { "readonly", LSM_CONFIG_READONLY }, { 0, 0 } }; int nElem; int i; Tcl_Obj **apElem; int rc; |
︙ | ︙ |
Changes to www/lsm.wiki.
1 2 3 4 | <title>LSM Design Overview</title> <nowiki> | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | <title>LSM Design Overview</title> <nowiki> <div id=start_of_toc></div> <a href=#summary style=text-decoration:none>1. Summary </a><br> <a href=#data_structures style=text-decoration:none>2. Data Structures </a><br> <a href=#locks style=text-decoration:none>2.1. Locks</a><br> <a href=#database_file style=text-decoration:none>2.2. Database file</a><br> <a href=#sorted_runs style=text-decoration:none>2.2.1. Sorted Runs</a><br> <a href=#levels style=text-decoration:none>2.2.2. Levels</a><br> <a href=#snapshots style=text-decoration:none>2.2.3. Snapshots</a><br> <a href=#in-memory_tree style=text-decoration:none>2.3. In-Memory Tree</a><br> <a href=#memory_allocation style=text-decoration:none>2.3.1. Memory Allocation</a><br> <a href=#header_fields style=text-decoration:none>2.3.2. Header Fields</a><br> <a href=#other_shared-memory_fields style=text-decoration:none>2.4. Other Shared-Memory Fields</a><br> <a href=#log_file style=text-decoration:none>2.5. Log file</a><br> <a href=#database_recovery_and_shutdown style=text-decoration:none>3. Database Recovery and Shutdown</a><br> <a href=#read-write_clients style=text-decoration:none>3.1. Read-write clients</a><br> <a href=#database_operations style=text-decoration:none>4. Database Operations </a><br> <a href=#reading style=text-decoration:none>4.1. Reading</a><br> <a href=#writing style=text-decoration:none>4.2. Writing</a><br> <a href=#flushing_the_in-memory_tree_to_disk style=text-decoration:none>4.2.1. Flushing the in-memory tree to disk</a><br> <a href=#shared-memory_management style=text-decoration:none>4.2.2. Shared-memory management</a><br> <a href=#log_file_management style=text-decoration:none>4.2.3. Log file management</a><br> <a href=#working style=text-decoration:none>4.3. Working</a><br> <a href=#free-block_list_management style=text-decoration:none>4.3.1. Free-block list management</a><br> <a href=#checkpoint_operations style=text-decoration:none>4.4. Checkpoint Operations</a><br> <a href=#scheduling_policies style=text-decoration:none>5. Scheduling Policies</a><br> <div id=end_of_toc></div> <h1 id=summary>1. Summary </h1> The LSM embedded database software stores data in three distinct data structures: <ul> <li> <p>The <b>shared-memory region</b>. This may actually be allocated in either shared or heap memory, depending on whether LSM is running in |
︙ | ︙ | |||
42 43 44 45 46 47 48 | <p> When an application writes to the database, the new data is written to the in-memory tree. Once the in-memory tree has grown large enough, its contents are written into the database file as a new sorted run. To reduce the number of sorted runs in the database file, chronologically adjacent sorted runs may be merged together into a single run, either automatically or on demand. | | | | | 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | <p> When an application writes to the database, the new data is written to the in-memory tree. Once the in-memory tree has grown large enough, its contents are written into the database file as a new sorted run. To reduce the number of sorted runs in the database file, chronologically adjacent sorted runs may be merged together into a single run, either automatically or on demand. <h1 id=data_structures>2. Data Structures </h1> <h2 id=locks>2.1. Locks</h2> <p> Read/write (shared/exclusive) file locks are used to control concurrent access. LSM uses the following file-locks: <ul> <li> <p>The <b>DMS1</b>, <b>DMS2</b> locking regions. These are used to implement the "dead-man-switch" mechanism copied from SQLite's WAL mode for safely connecting to and disconnecting from a database. See "Database Recovery and Shutdown" below. <li> <p>Several (say 3) <b>READER</b> locking regions. Database clients hold a SHARED lock one of the READER locking regions while reading the database. As in SQLite WAL mode, each reader lock is paired with a |
︙ | ︙ | |||
88 89 90 91 92 93 94 | <p> In the following sections, "the WRITER lock", refers to an exclusive lock on the WRITER locking region. For example "holding the WRITER lock" is equivalent to "holding an exclusive lock on the WRITER locking region". Similar interpretations apply to "the WORKER lock" and "the CHECKPOINTER lock". | | | 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 | <p> In the following sections, "the WRITER lock", refers to an exclusive lock on the WRITER locking region. For example "holding the WRITER lock" is equivalent to "holding an exclusive lock on the WRITER locking region". Similar interpretations apply to "the WORKER lock" and "the CHECKPOINTER lock". <h2 id=database_file>2.2. Database file</h2> <p> This section summarizes the contents of the database file informally. A detailed description is found in the header comments for source code files <a href="../src/lsm_file.c">lsm_file.c</a> (blocks, pages etc.), <a href="../src/lsm_sorted.c">lsm_sorted.c</a> (sorted run format) and <a href="../src/lsm_ckpt.c">lsm_ckpt.c</a> (database snapshot format). |
︙ | ︙ | |||
122 123 124 125 126 127 128 | <p> As with an SQLite database file, each page in the database may be addressed by its 32-bit page number. This means the maximum database size is roughly (pgsz * 2^32) bytes. The first and last pages in each block are 4 bytes smaller than the others. This is to make room for a single page-number. | | | 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 | <p> As with an SQLite database file, each page in the database may be addressed by its 32-bit page number. This means the maximum database size is roughly (pgsz * 2^32) bytes. The first and last pages in each block are 4 bytes smaller than the others. This is to make room for a single page-number. <h3 id=sorted_runs>2.2.1. Sorted Runs</h3> <p> A single sorted run is spread across one or more database pages (each page is a part of at most one sorted run). Given the page number of a page in a sorted run the following statements are true: <ul> |
︙ | ︙ | |||
164 165 166 167 168 169 170 | In other words, given the page numbers of the first and last pages of a sorted run and the page number of the root page for the embedded b-tree, it is possible to traverse the entire run in either direction or query for arbitrary values. <p><span style="color:red"> TODO: Embedded pointers. </span> | | | 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 | In other words, given the page numbers of the first and last pages of a sorted run and the page number of the root page for the embedded b-tree, it is possible to traverse the entire run in either direction or query for arbitrary values. <p><span style="color:red"> TODO: Embedded pointers. </span> <h3 id=levels>2.2.2. Levels</h3> <p> Each sorted run is assigned to a "level". Normally, a level consists of a single sorted run. However, a level may also consist of a set of sorted runs being incrementally merged into a single run. <p> |
︙ | ︙ | |||
225 226 227 228 229 230 231 | time for all entries. | | | 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 | time for all entries. <h3 id=snapshots>2.2.3. Snapshots</h3> <p> Each meta page may contain a database <b>snapshot</b>. A snapshot contains all the information required to interpret the remainder of the database file (the sorted runs and free space). Specifically, it contains: <ul> |
︙ | ︙ | |||
250 251 252 253 254 255 256 | Recovery and Shutdown" below). </ul> <p> A more detailed description is available in the header comments in source code file <a href="../src/lsm_ckpt.c">lsm_ckpt.c</a> | | | | 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 | Recovery and Shutdown" below). </ul> <p> A more detailed description is available in the header comments in source code file <a href="../src/lsm_ckpt.c">lsm_ckpt.c</a> <h2 id=in-memory_tree>2.3. In-Memory Tree</h2> <p> The in-memory tree is an append-only b-tree of order 4 (each node may have up to 4 children), which is more or less equivalent to a red-black tree. An append-only tree is convenient, as it naturally supports the single-writer/many-readers MVCC concurrency model. <p> The implementation includes some optimizations to reduce the number of interior nodes that are updated when a leaf node is written that are not described here. See header comments in source code file <a href=../src/lsm_tree.c>lsm_tree.c</a> for details. <h3 id=memory_allocation>2.3.1. Memory Allocation</h3> <p> More than one in-memory tree may exist in shared-memory at any time. For example in the following scenario: <ol> |
︙ | ︙ | |||
330 331 332 333 334 335 336 | but the values that connect the linked list together are not. The writer that detects the failure must scan the entire shared-memory region to reconstruct the linked list. Any sequence ids assigned by the failed writer are reverted (perhaps not to their original values, but to values that put them at the start of the linked list - before those chunks that may still be in use by existing readers). | | | | | > > | | 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 | but the values that connect the linked list together are not. The writer that detects the failure must scan the entire shared-memory region to reconstruct the linked list. Any sequence ids assigned by the failed writer are reverted (perhaps not to their original values, but to values that put them at the start of the linked list - before those chunks that may still be in use by existing readers). <h3 id=header_fields>2.3.2. Header Fields</h3> <p> As well as the in-memory tree data, the following fixed-size fields stored in well-known locations in shared-memory are part of the in-memory tree. Like the in-memory tree data, outside of recovery these fields are only ever written to by clients holding the WRITER lock. <ul> <li> Two copies of a data structure called a "tree-header". Tree-header-1 and tree-header 2. A tree-header structure contains all the information required to read or write to a particular version of the append only b-tree. It also contains a 64-bit checksum. <li> A boolean flag set to true at the beginning of every write transaction and cleared after that transaction is successfully concluded - the "writer flag". This is used to detect failures that occur mid-transaction. It is only ever read (or written) by clients that hold the WRITER lock. </ul> <h2 id=other_shared-memory_fields>2.4. Other Shared-Memory Fields</h2> <ul> <li> Snapshot 1. <li> Snapshot 2. <li> The meta-page pointer. This value is either 1 or 2. It indicates which of the two meta-pages contains the most recent database snapshot. <li> READER lock values. </ul> <h2 id=log_file>2.5. Log file</h2> <a href=../src/lsm_log.c>lsm_log.c</a>. <h1 id=database_recovery_and_shutdown>3. Database Recovery and Shutdown</h1> <h2 id=read-write_clients>3.1. Read-write clients</h2> <p> Exclusive locks on locking region DMS1 are used to serialize all connect and disconnect operations performed by read-write clients. <p>When an LSM database connection is opened (i.e. lsm_open() is called): <pre> lock(DMS1, EXCLUSIVE) # Block until successful lock(DMS2, EXCLUSIVE) # Abandon if not immediately successful if( DMS2 successfully locked ){ |
︙ | ︙ | |||
415 416 417 418 419 420 421 | ...TODO... delete *-shm file (or equivalent) } unlock(DMS2) unlock(DMS1) </pre> | > > > > > > > > | | | 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 | ...TODO... delete *-shm file (or equivalent) } unlock(DMS2) unlock(DMS1) </pre> <h2 id=read-only_clients>3.1. Read-only clients</h2> <p>It is assumed that read-only clients may take SHARED locks only. And that a read-only client may not run database recovery when a db is opened in multi-process mode. <p> <h1 id=database_operations>4. Database Operations </h1> <h2 id=reading>4.1. Reading</h2> <p> Opening a read transaction: <ol> <li> <p>Load the current tree-header from shared-memory. |
︙ | ︙ | |||
512 513 514 515 516 517 518 | Once a read transaction is opened, the reader may continue to read the versions of the in-memory tree and database file for as long as the READER lock is held. <p> To close a read transaction all that is required is to drop the SHARED lock held on the READER slot. | | | 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 | Once a read transaction is opened, the reader may continue to read the versions of the in-memory tree and database file for as long as the READER lock is held. <p> To close a read transaction all that is required is to drop the SHARED lock held on the READER slot. <h2 id=writing>4.2. Writing</h2> <p> To open a write transaction: <ol> <li> <p>Open a read transaction, if one is not already open. <li> <p>Obtain the WRITER lock. |
︙ | ︙ | |||
560 561 562 563 564 565 566 | <li> Sweep the shared-memory area to rebuild the linked list of chunks so that it is consistent with the current tree-header. <li> Clear the writer flag. </ol> | | | 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 | <li> Sweep the shared-memory area to rebuild the linked list of chunks so that it is consistent with the current tree-header. <li> Clear the writer flag. </ol> <h3 id=flushing_the_in-memory_tree_to_disk>4.2.1. Flushing the in-memory tree to disk</h3> <p> For the purposes of writing, the database file and the in-memory tree are largely independent. Processes holding the WRITER lock write to the in-memory tree, and processes holding the WORKER lock write to the database file. <ol> |
︙ | ︙ | |||
582 583 584 585 586 587 588 | <li> Update the private copy of the tree-header to reflect a new, empty tree. <li> Commit the write transaction, writing the new, empty tree to shared-memory. </ol> | | | | | 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 | <li> Update the private copy of the tree-header to reflect a new, empty tree. <li> Commit the write transaction, writing the new, empty tree to shared-memory. </ol> <h3 id=shared-memory_management>4.2.2. Shared-memory management</h3> <p> A writer client may have to allocate new shared-memory chunks. This can be done either by extending the shared-memory region or by recycling the first chunk in the linked-list. To check if the first chunk in the linked-list may be reused, the writer must check that: <ul> <li> The chunk is not part of the current in-memory tree (the one being appended to by the writer). A writer can check this by examining its private copy of the tree-header. <li> The chunk is not part of an in-memory tree being used by an existing reader. A writer checks this by scanning (and possibly updating) the values associated with the READER locks - similar to the way SQLite does in WAL mode. </ul> <h3 id=log_file_management>4.2.3. Log file management</h3> <p> A writer client also writes to the log file. All information required to write to the log file (the offset to write to and the initial checksum values) is embedded in the tree-header. Except, in order to reuse log file space (wrap around to the start of the log file), a writer needs to know that the space being recycled will not be required by any recovery process in the future. In other words, that the information contained in the transactions being overwritten has been written into the database file and is part of the snapshot written into the database file by a checkpointer (see "Checkpoint Operations" below). <p> To determine whether or not the log file can be wrapped, the writer requires access to information stored in the newest snapshot written into the database header. Their exists a shared-memory variable indicating which of the two meta-pages contain this snapshot, but the writer process still has to read the snapshot data and verify its checksum from disk. <h2 id=working>4.3. Working</h2> <p> Working is similar to writing. The difference is that a "writer" modifies the in-memory tree. A "worker" modifies the contents of the database file. <ol> <li> <p>Take the WORKER lock. |
︙ | ︙ | |||
645 646 647 648 649 650 651 | <li> <p>Invoke xShmBarrier(). <li> <p>Update snapshot-1 in shared-memory. <li> <p>Release the WORKER lock. </ol> | | | 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 | <li> <p>Invoke xShmBarrier(). <li> <p>Update snapshot-1 in shared-memory. <li> <p>Release the WORKER lock. </ol> <h3 id=free-block_list_management>4.3.1. Free-block list management</h3> <p> Worker clients occasionally need to allocate new database blocks or move existing blocks to the free-block list. Along with the block number of each free block, the free-block list contains the snapshot-id of the first snapshot created after the block was moved to the free list. The free-block list is always stored in order of snapshot-id, so that the first block in |
︙ | ︙ | |||
675 676 677 678 679 680 681 | header. This is done by reading (and verifying the checksum) of the snapshot currently stored in the database meta-page indicated by the shared-memory variable. </ul> | | | 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 | header. This is done by reading (and verifying the checksum) of the snapshot currently stored in the database meta-page indicated by the shared-memory variable. </ul> <h2 id=checkpoint_operations>4.4. Checkpoint Operations</h2> <ol> <li> Take CHECKPOINTER lock. <li> Load snapshot-1 from shared-memory. If the checksum does not match the content here, release the CHECKPOINTER lock and abandon the attempt to checkpoint the database. |
︙ | ︙ | |||
701 702 703 704 705 706 707 | <li> Update the shared-memory variable to indicate the meta-page written in step 5. <li> Drop the CHECKPOINTER lock. </ol> | | | 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 | <li> Update the shared-memory variable to indicate the meta-page written in step 5. <li> Drop the CHECKPOINTER lock. </ol> <h1 id=scheduling_policies>5. Scheduling Policies</h1> <p> When a client writes to a database, the in-memory tree and log file are updated by the client itself before the lsm_write() call returns. Eventually, once sufficient writes have accumulated in memory, the client marks the current tree as "old", and subsequent writes are accumulated in a new tree. |
︙ | ︙ | |||
744 745 746 747 748 749 750 751 752 753 754 | write both the old and new tree to a new database level. <p> If the WORKER lock cannot be obtained immediately, block until it can be </ul> <p><b> Auto work </b> | > > | 783 784 785 786 787 788 789 790 791 792 793 794 795 | write both the old and new tree to a new database level. <p> If the WORKER lock cannot be obtained immediately, block until it can be </ul> <p><b> Auto work </b> |