Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Move RowHashBlock.nUsed to RowHash.nUsed. Fix a typo in a comment in test_async.c. (CVS 6533) |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
799d31d99fd18a6f99862433384e37d6 |
User & Date: | danielk1977 2009-04-21 18:20:45.000 |
Context
2009-04-22
| ||
00:47 | Extend the Rowset object to contain all the capabilities of Rowhash in addition to its legacy capabilities. Use Rowset to replace Rowhash. In addition to requiring less code, This removes the 2^32 result row limitation, uses less memory, and gives better bounds on worst-case performance. The Rowhash implementation has yet to be removed. (CVS 6534) (check-in: b101cf70b7 user: drh tags: trunk) | |
2009-04-21
| ||
18:20 | Move RowHashBlock.nUsed to RowHash.nUsed. Fix a typo in a comment in test_async.c. (CVS 6533) (check-in: 799d31d99f user: danielk1977 tags: trunk) | |
17:23 | Fix a segfault that followed a malloc failure introduced by (6527). (CVS 6532) (check-in: 08e71b1140 user: danielk1977 tags: trunk) | |
Changes
Changes to src/rowhash.c.
︙ | ︙ | |||
27 28 29 30 31 32 33 | ** The insert batch number is a parameter to the TEST primitive. The ** hash table is rebuilt whenever the batch number increases. TEST ** operations only look for INSERTs that occurred in prior batches. ** ** The caller is responsible for insuring that there are no duplicate ** INSERTs. ** | | | 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 | ** The insert batch number is a parameter to the TEST primitive. The ** hash table is rebuilt whenever the batch number increases. TEST ** operations only look for INSERTs that occurred in prior batches. ** ** The caller is responsible for insuring that there are no duplicate ** INSERTs. ** ** $Id: rowhash.c,v 1.4 2009/04/21 18:20:45 danielk1977 Exp $ */ #include "sqliteInt.h" /* ** An upper bound on the size of heap allocations made by this module. ** Limiting the size of allocations helps to avoid memory fragmentation. */ |
︙ | ︙ | |||
121 122 123 124 125 126 127 | ** The linked list of RowHashBlock objects also provides a way to sequentially ** scan all elements in the RowHash. This sequential scan is used when ** rebuilding the hash table. The hash table is rebuilt after every ** batch of inserts. */ struct RowHashBlock { struct RowHashBlockData { | < > | 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | ** The linked list of RowHashBlock objects also provides a way to sequentially ** scan all elements in the RowHash. This sequential scan is used when ** rebuilding the hash table. The hash table is rebuilt after every ** batch of inserts. */ struct RowHashBlock { struct RowHashBlockData { RowHashBlock *pNext; /* Next RowHashBlock object in list of them all */ } data; RowHashElem aElem[ROWHASH_ELEM_PER_BLOCK]; /* Available RowHashElem objects */ }; /* ** RowHash structure. References to a structure of this type are passed ** around and used as opaque handles by code in other modules. */ struct RowHash { int nUsed; /* Number of used entries in first RowHashBlock */ int nEntry; /* Number of used entries over all RowHashBlocks */ int iBatch; /* The current insert batch number */ u8 nHeight; /* Height of tree of hash pages */ u8 nLinearLimit; /* Linear search limit (used if pHash==0) */ int nBucket; /* Number of buckets in hash table */ RowHashPage *pHash; /* Pointer to root of hash table tree */ RowHashBlock *pBlock; /* Linked list of RowHashBlocks */ |
︙ | ︙ | |||
266 267 268 269 270 271 272 | /* Allocate the hash-table. */ if( allocHashTable(&p->pHash, p->nHeight, &nLeaf) ){ return SQLITE_NOMEM; } /* Insert all values into the hash-table. */ for(pBlock=p->pBlock; pBlock; pBlock=pBlock->data.pNext){ | | > > | 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 | /* Allocate the hash-table. */ if( allocHashTable(&p->pHash, p->nHeight, &nLeaf) ){ return SQLITE_NOMEM; } /* Insert all values into the hash-table. */ for(pBlock=p->pBlock; pBlock; pBlock=pBlock->data.pNext){ RowHashElem * const pEnd = &pBlock->aElem[ pBlock==p->pBlock?p->nUsed:ROWHASH_ELEM_PER_BLOCK ]; RowHashElem *pIter; for(pIter=pBlock->aElem; pIter<pEnd; pIter++){ RowHashElem **ppElem = findHashBucket(p, pIter->iVal); pIter->pNext = *ppElem; *ppElem = pIter; } } |
︙ | ︙ | |||
350 351 352 353 354 355 356 | } p->db = db; *pp = p; } /* If the current RowHashBlock is full, or if the first RowHashBlock has ** not yet been allocated, allocate one now. */ | | < > > | | | 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 | } p->db = db; *pp = p; } /* If the current RowHashBlock is full, or if the first RowHashBlock has ** not yet been allocated, allocate one now. */ if( !p->pBlock || p->nUsed==ROWHASH_ELEM_PER_BLOCK ){ RowHashBlock *pBlock = (RowHashBlock*)sqlite3Malloc(sizeof(RowHashBlock)); if( !pBlock ){ return SQLITE_NOMEM; } pBlock->data.pNext = p->pBlock; p->pBlock = pBlock; p->nUsed = 0; } assert( p->nUsed==(p->nEntry % ROWHASH_ELEM_PER_BLOCK) ); /* Add iVal to the current RowHashBlock. */ p->pBlock->aElem[p->nUsed].iVal = iVal; p->nUsed++; p->nEntry++; return SQLITE_OK; } /* ** Destroy the RowHash object passed as the first argument. */ |
︙ | ︙ |
Changes to src/test_async.c.
1 2 3 4 5 6 7 8 9 10 11 12 | /* ** 2005 December 14 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** | | | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 | /* ** 2005 December 14 ** ** The author disclaims copyright to this source code. In place of ** a legal notice, here is a blessing: ** ** May you do good and not evil. ** May you find forgiveness for yourself and forgive others. ** May you share freely, never taking more than you give. ** ************************************************************************* ** ** $Id: test_async.c,v 1.58 2009/04/21 18:20:45 danielk1977 Exp $ ** ** This file contains an example implementation of an asynchronous IO ** backend for SQLite. ** ** WHAT IS ASYNCHRONOUS I/O? ** ** With asynchronous I/O, write requests are handled by a separate thread |
︙ | ︙ | |||
69 70 71 72 73 74 75 | ** Multiple connections from within a single process that use this ** implementation of asynchronous IO may access a single database ** file concurrently. From the point of view of the user, if all ** connections are from within a single process, there is no difference ** between the concurrency offered by "normal" SQLite and SQLite ** using the asynchronous backend. ** | | | 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | ** Multiple connections from within a single process that use this ** implementation of asynchronous IO may access a single database ** file concurrently. From the point of view of the user, if all ** connections are from within a single process, there is no difference ** between the concurrency offered by "normal" SQLite and SQLite ** using the asynchronous backend. ** ** If connections from within multiple processes may access the ** database file, the ENABLE_FILE_LOCKING symbol (see below) must be ** defined. If it is not defined, then no locks are established on ** the database file. In this case, if multiple processes access ** the database file, corruption will quickly result. ** ** If ENABLE_FILE_LOCKING is defined (the default), then connections ** from within multiple processes may access a single database file |
︙ | ︙ |