Many hyperlinks are disabled.
Use anonymous login
to enable hyperlinks.
Overview
Comment: | Fix some problems causing multi-threaded btree tests to fail. Some still remain. |
---|---|
Downloads: | Tarball | ZIP archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA1: |
67b28147ea037a363bee73dc006b6ead |
User & Date: | dan 2013-10-31 16:31:45.145 |
Context
2013-11-01
| ||
19:54 | Use the log to store the page-size, database size and user cookie value instead of writing these directly to the database header. check-in: 37983095fd user: dan tags: trunk | |
2013-10-31
| ||
16:31 | Fix some problems causing multi-threaded btree tests to fail. Some still remain. check-in: 67b28147ea user: dan tags: trunk | |
2013-10-30
| ||
19:57 | Btree fixes related to multiple client tests. check-in: 58f7282211 user: dan tags: trunk | |
Changes
Changes to main.mk.
︙ | ︙ | |||
39 40 41 42 43 44 45 | # # Once the macros above are defined, the rest of this make script will # build the SQLite library and testing tools. ################################################################################ # FIXME: Required options for now. # | | | | 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 | # # Once the macros above are defined, the rest of this make script will # build the SQLite library and testing tools. ################################################################################ # FIXME: Required options for now. # #OPTS += -DLSM_MUTEX_NONE #OPTS += -DSQLITE4_DEBUG=1 -DLSM_DEBUG=1 OPTS += -DHAVE_GMTIME_R OPTS += -DHAVE_LOCALTIME_R OPTS += -DHAVE_MALLOC_USABLE_SIZE OPTS += -DHAVE_USLEEP #OPTS += -DSQLITE4_MEMDEBUG=1 #OPTS += -DSQLITE4_NO_SYNC=1 -DLSM_NO_SYNC=1 #OPTS += -DSQLITE4_OMIT_ANALYZE #OPTS += -DSQLITE4_OMIT_AUTOMATIC_INDEX OPTS += -DSQLITE4_OMIT_VIRTUALTABLE=1 OPTS += -DSQLITE4_OMIT_XFER_OPT #OPTS += -DSQLITE4_THREADSAFE=0 # This is how we compile # TCCX = $(TCC) $(OPTS) -I. -I$(TOP)/src -I$(TOP) TCCX += -I$(TOP)/ext/rtree -I$(TOP)/ext/icu -I$(TOP)/ext/fts3 TCCX += -I$(TOP)/ext/async |
︙ | ︙ |
Changes to src/bt_lock.c.
︙ | ︙ | |||
71 72 73 74 75 76 77 | BtFile *pBtFile; /* List of deferred closes */ }; /* ** Grab the global mutex that protects the linked list of BtShared ** objects. */ | | | | | < < < < < < < < < | | < | 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | BtFile *pBtFile; /* List of deferred closes */ }; /* ** Grab the global mutex that protects the linked list of BtShared ** objects. */ static void btLockMutexEnter(sqlite4_env *pEnv){ sqlite4_mutex_enter(sqlite4_mutex_alloc(pEnv, SQLITE4_MUTEX_STATIC_KV)); } /* ** Relinquish the mutex obtained by calling btLockMutexEnter(). */ static void btLockMutexLeave(sqlite4_env *pEnv){ sqlite4_mutex_leave(sqlite4_mutex_alloc(pEnv, SQLITE4_MUTEX_STATIC_KV)); } static int btLockLockopNonblocking( BtLock *p, /* BtLock handle */ int iLock, /* Slot to lock */ int eOp /* One of BT_LOCK_UNLOCK, SHARED or EXCL */ ){ const u32 mask = ((u32)1 << iLock); int rc = SQLITE4_OK; BtShared *pShared = p->pShared; assert( iLock>=0 && iLock<(BT_LOCK_READER0 + BT_NREADER) ); assert( (BT_LOCK_READER0+BT_NREADER)<=32 ); |
︙ | ︙ | |||
175 176 177 178 179 180 181 | p->mSharedLock &= ~mask; p->mExclLock |= mask; } } break; } | | > > > > > > > > > > > > > > > > > > > > > > > > > | | 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 | p->mSharedLock &= ~mask; p->mExclLock |= mask; } } break; } sqlite4_mutex_leave(pShared->pClientMutex); } return rc; } /* ** Attempt to obtain the lock identified by the iLock and bExcl parameters. ** If successful, return SQLITE4_OK. If the lock cannot be obtained because ** there exists some other conflicting lock, return SQLITE4_BUSY. If some ** other error occurs, return an SQLite4 error code. ** ** Parameter iLock must be one of BT_LOCK_WRITER, WORKER or CHECKPOINTER, ** or else a value returned by the BT_LOCK_READER macro. */ static int btLockLockop( BtLock *p, /* BtLock handle */ int iLock, /* Slot to lock */ int eOp, /* One of BT_LOCK_UNLOCK, SHARED or EXCL */ int bBlock /* True for a blocking lock */ ){ int rc; while( 1 ){ rc = btLockLockopNonblocking(p, iLock, eOp); if( rc!=SQLITE4_BUSY || bBlock==0 ) break; /* todo: Fix blocking locks */ usleep(10000); } return rc; } /* ** Connect to the database as a read/write connection. If recovery ** is required (i.e. if this is the first connection to the db), invoke ** the xRecover() method. ** ** Return SQLITE4_OK if successful, or an SQLite4 error code if an ** error occurs. */ int sqlite4BtLockConnect(BtLock *p, int (*xRecover)(BtLock*)){ sqlite4_env *pEnv = p->pEnv; int rc = SQLITE4_OK; const char *zName; int nName; BtShared *pShared; zName = sqlite4BtPagerFilename((BtPager*)p, BT_PAGERFILE_DATABASE); nName = strlen(zName); btLockMutexEnter(p->pEnv); for(pShared=gShared.pDatabase; pShared; pShared=pShared->pNext){ if( pShared->nName==nName && 0==memcmp(zName, pShared->zName, nName) ){ break; } } if( pShared==0 ){ |
︙ | ︙ | |||
232 233 234 235 236 237 238 | gShared.pDatabase = pShared; } } if( rc==SQLITE4_OK ){ pShared->nRef++; } | | | 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 | gShared.pDatabase = pShared; } } if( rc==SQLITE4_OK ){ pShared->nRef++; } btLockMutexLeave(p->pEnv); /* Add this connection to the linked list at BtShared.pLock */ if( rc==SQLITE4_OK ){ sqlite4_mutex_enter(pShared->pClientMutex); p->pNext = pShared->pLock; pShared->pLock = p; sqlite4_mutex_leave(pShared->pClientMutex); |
︙ | ︙ | |||
312 313 314 315 316 317 318 | } if( rc==SQLITE4_BUSY ) rc = SQLITE4_OK; btLockLockop(p, BT_LOCK_DMS2_RW, BT_LOCK_UNLOCK, 0); btLockLockop(p, BT_LOCK_DMS2_RO, BT_LOCK_UNLOCK, 0); btLockLockop(p, BT_LOCK_DMS1, BT_LOCK_UNLOCK, 0); } | | | | 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 | } if( rc==SQLITE4_BUSY ) rc = SQLITE4_OK; btLockLockop(p, BT_LOCK_DMS2_RW, BT_LOCK_UNLOCK, 0); btLockLockop(p, BT_LOCK_DMS2_RO, BT_LOCK_UNLOCK, 0); btLockLockop(p, BT_LOCK_DMS1, BT_LOCK_UNLOCK, 0); } btLockMutexEnter(p->pEnv); pShared->nRef--; if( pShared->nRef==0 ){ int i; BtShared **ppS; for(ppS=&gShared.pDatabase; *ppS!=pShared; ppS=&(*ppS)->pNext); *ppS = (*ppS)->pNext; sqlite4_mutex_free(pShared->pClientMutex); for(i=0; i<pShared->nShmChunk; i++){ sqlite4_free(p->pEnv, pShared->apShmChunk[i]); } sqlite4_free(p->pEnv, pShared->apShmChunk); sqlite4_free(p->pEnv, pShared); } btLockMutexLeave(p->pEnv); return rc; } /* ** Obtain a READER lock. ** ** Argument aLog points to an array of 6 frame addresses. These are the |
︙ | ︙ |
Changes to src/bt_log.c.
︙ | ︙ | |||
218 219 220 221 222 223 224 225 226 227 228 229 230 231 | static void btDebugTopology(char *zStr, u32 *aLog){ fprintf(stderr, "%s: %d..%d %d..%d %d..%d\n", zStr, (int)aLog[0], (int)aLog[1], (int)aLog[2], (int)aLog[3], (int)aLog[4], (int)aLog[5] ); fflush(stderr); } static void btDebugCkptPage(u32 pgno, u8 *aData, int pgsz){ #if 0 static nCall = 0; u32 aCksum[2]; btLogChecksum(1, aData, pgsz, 0, aCksum); fprintf(stderr, "%d: Ckpt page %d (cksum=%08x%08x)\n", nCall++, | > > > > > > > > > > > | 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 | static void btDebugTopology(char *zStr, u32 *aLog){ fprintf(stderr, "%s: %d..%d %d..%d %d..%d\n", zStr, (int)aLog[0], (int)aLog[1], (int)aLog[2], (int)aLog[3], (int)aLog[4], (int)aLog[5] ); fflush(stderr); } #ifndef NDEBUG static void btDebugCheckSnapshot(BtShmHdr *pHdr){ u32 *aLog = pHdr->aLog; assert( pHdr->iNextFrame!=1 || (aLog[0]==0 && aLog[1]==0 && aLog[2]==0 && aLog[3]==0) ); } #else #define btDebugCheckSnapshot(x,y) #endif static void btDebugCkptPage(u32 pgno, u8 *aData, int pgsz){ #if 0 static nCall = 0; u32 aCksum[2]; btLogChecksum(1, aData, pgsz, 0, aCksum); fprintf(stderr, "%d: Ckpt page %d (cksum=%08x%08x)\n", nCall++, |
︙ | ︙ | |||
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 | rc = btLogWriteHeader(pLog, 0, &hdr); if( rc!=SQLITE4_OK ) return rc; pLog->snapshot.aFrameCksum[0] = hdr.iSalt1; pLog->snapshot.aFrameCksum[1] = hdr.iSalt2; pLog->snapshot.iNextFrame = 1; } /* Figure out the offset to write the current frame to. */ iFrame = pLog->snapshot.iNextFrame; iOff = btLogFrameOffset(pLog, pgsz, iFrame); /* The current frame will be written to location pLog->snapshot.iNextFrame. ** This code determines where the following frame will be stored. There ** are three possibilities: ** ** 1) The next frame follows the current frame (this is the usual case). ** 2) The next frame is frame 1 - the log wraps around. ** 3) Following the current frame is a block of frames still in use. ** So the next frame will immediately follow this block. */ iNextFrame = pLog->snapshot.iNextFrame + 1; | > | > | 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 | rc = btLogWriteHeader(pLog, 0, &hdr); if( rc!=SQLITE4_OK ) return rc; pLog->snapshot.aFrameCksum[0] = hdr.iSalt1; pLog->snapshot.aFrameCksum[1] = hdr.iSalt2; pLog->snapshot.iNextFrame = 1; } btDebugCheckSnapshot(&pLog->snapshot); /* Figure out the offset to write the current frame to. */ iFrame = pLog->snapshot.iNextFrame; iOff = btLogFrameOffset(pLog, pgsz, iFrame); /* The current frame will be written to location pLog->snapshot.iNextFrame. ** This code determines where the following frame will be stored. There ** are three possibilities: ** ** 1) The next frame follows the current frame (this is the usual case). ** 2) The next frame is frame 1 - the log wraps around. ** 3) Following the current frame is a block of frames still in use. ** So the next frame will immediately follow this block. */ iNextFrame = pLog->snapshot.iNextFrame + 1; if( iFrame!=1 && iFrame==aLog[5]+1 && aLog[0]==0 && aLog[2]==0 && aLog[4]!=0 && aLog[4]>pLog->nWrapLog ){ /* Case 2) It is possible to wrap the log around */ iNextFrame = 1; }else if( iNextFrame==aLog[0] ){ /* Case 3) It is necessary to jump over some existing log. */ iNextFrame = aLog[1]+1; assert( iNextFrame!=1 ); } if( iNextFrame & 0x80000000 ){ rc = SQLITE4_FULL; }else{ /* Populate the frame header object. */ |
︙ | ︙ | |||
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 | if( iFrame==1 ){ pLog->snapshot.iHashSide = (pLog->snapshot.iHashSide+1) %2; } rc = btLogHashInsert(pLog, pgno, iFrame); } /* Update the private copy of the shm-header */ if( rc==SQLITE4_OK ){ if( btLogIsEmpty(pLog) ){ assert( iFrame==1 ); aLog[4] = iFrame; }else if( iFrame==1 ){ assert( aLog[0]==0 && aLog[1]==0 && aLog[2]==0 && aLog[3]==0 ); aLog[0] = aLog[4]; aLog[1] = aLog[5]; aLog[4] = iFrame; }else if( iFrame!=aLog[5]+1 ){ assert( iFrame>aLog[5] ); assert( aLog[2]==0 && aLog[3]==0 ); aLog[2] = aLog[4]; aLog[3] = aLog[5]; aLog[4] = iFrame; } aLog[5] = iFrame; memcpy(pLog->snapshot.aFrameCksum, frame.aCksum, sizeof(frame.aCksum)); } /* If this is a COMMIT, also update the shared shm-header. */ if( bCommit ){ rc = btLogUpdateSharedHdr(pLog); } return rc; | > > > > | 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 | if( iFrame==1 ){ pLog->snapshot.iHashSide = (pLog->snapshot.iHashSide+1) %2; } rc = btLogHashInsert(pLog, pgno, iFrame); } /* Update the private copy of the shm-header */ btDebugCheckSnapshot(&pLog->snapshot); BtShmHdr hdr; memcpy(&hdr, &pLog->snapshot, sizeof(BtShmHdr)); if( rc==SQLITE4_OK ){ if( btLogIsEmpty(pLog) ){ assert( iFrame==1 ); aLog[4] = iFrame; }else if( iFrame==1 ){ assert( aLog[0]==0 && aLog[1]==0 && aLog[2]==0 && aLog[3]==0 ); aLog[0] = aLog[4]; aLog[1] = aLog[5]; aLog[4] = iFrame; }else if( iFrame!=aLog[5]+1 ){ assert( iFrame>aLog[5] ); assert( aLog[2]==0 && aLog[3]==0 ); aLog[2] = aLog[4]; aLog[3] = aLog[5]; aLog[4] = iFrame; } aLog[5] = iFrame; memcpy(pLog->snapshot.aFrameCksum, frame.aCksum, sizeof(frame.aCksum)); } btDebugCheckSnapshot(&pLog->snapshot); /* If this is a COMMIT, also update the shared shm-header. */ if( bCommit ){ rc = btLogUpdateSharedHdr(pLog); } return rc; |
︙ | ︙ | |||
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 | u32 iFirstRead = 0; while( rc==SQLITE4_NOTFOUND ){ BtShm *pShm; /* Attempt to read a copy of the BtShmHdr from shared-memory. */ rc = btLogSnapshot(pLog, &pLog->snapshot); /* Take a read lock on the database */ if( rc==SQLITE4_OK ){ BtReadSlot *aReadlock; pShm = btLogShm(pLog); aReadlock = pShm->aReadlock; | > | 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 | u32 iFirstRead = 0; while( rc==SQLITE4_NOTFOUND ){ BtShm *pShm; /* Attempt to read a copy of the BtShmHdr from shared-memory. */ rc = btLogSnapshot(pLog, &pLog->snapshot); btDebugCheckSnapshot(&pLog->snapshot); /* Take a read lock on the database */ if( rc==SQLITE4_OK ){ BtReadSlot *aReadlock; pShm = btLogShm(pLog); aReadlock = pShm->aReadlock; |
︙ | ︙ | |||
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 | ** that it contains a map of all frames that are currently in use ** by any reader, or may be used by any future reader or recovery ** process. */ if( rc==SQLITE4_OK ){ u32 *aLog = shmhdr.aLog; u32 iRecover = pShm->ckpt.iFirstRecover; u32 iRead = 0; rc = sqlite4BtLockReaderQuery(pLock, aLog, pShm->aReadlock, &iRead, 0); if( rc==SQLITE4_OK ){ /* Now "trim" the snapshot so that it accesses nothing earlier than ** either iRecover or iRead (whichever occurs first in the log). */ u32 iTrim = iRecover; | > | 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 | ** that it contains a map of all frames that are currently in use ** by any reader, or may be used by any future reader or recovery ** process. */ if( rc==SQLITE4_OK ){ u32 *aLog = shmhdr.aLog; u32 iRecover = pShm->ckpt.iFirstRecover; u32 iRead = 0; btDebugCheckSnapshot(&pLog->snapshot); rc = sqlite4BtLockReaderQuery(pLock, aLog, pShm->aReadlock, &iRead, 0); if( rc==SQLITE4_OK ){ /* Now "trim" the snapshot so that it accesses nothing earlier than ** either iRecover or iRead (whichever occurs first in the log). */ u32 iTrim = iRecover; |
︙ | ︙ | |||
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 | } } } if( rc==SQLITE4_OK ){ memcpy(pLog->snapshot.aLog, aLog, sizeof(u32)*6); } } } return rc; } int sqlite4BtLogSnapshotEndWrite(BtLog *pLog){ | > | 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 | } } } if( rc==SQLITE4_OK ){ memcpy(pLog->snapshot.aLog, aLog, sizeof(u32)*6); } btDebugCheckSnapshot(&pLog->snapshot); } } return rc; } int sqlite4BtLogSnapshotEndWrite(BtLog *pLog){ |
︙ | ︙ |
Changes to src/mutex_unix.c.
︙ | ︙ | |||
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | */ #if defined(SQLITE4_DEBUG) || defined(SQLITE4_HOMEGROWN_RECURSIVE_MUTEX) # define SQLITE4_MUTEX_NREF 1 #else # define SQLITE4_MUTEX_NREF 0 #endif /* ** Each recursive mutex is an instance of the following structure. */ typedef struct sqlite4UnixMutex { sqlite4_mutex base; /* Base class. Must be first */ pthread_mutex_t mutex; /* Mutex controlling the lock */ #if SQLITE4_MUTEX_NREF int id; /* Mutex type */ volatile int nRef; /* Number of entrances */ volatile pthread_t owner; /* Thread that is within this mutex */ int trace; /* True to trace changes */ #endif } sqlite4UnixMutex; #if SQLITE4_MUTEX_NREF | > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > | | > | | 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | */ #if defined(SQLITE4_DEBUG) || defined(SQLITE4_HOMEGROWN_RECURSIVE_MUTEX) # define SQLITE4_MUTEX_NREF 1 #else # define SQLITE4_MUTEX_NREF 0 #endif static int pthreadMutexInit(void *p); static int pthreadMutexEnd(void *p); static sqlite4_mutex *pthreadMutexAlloc(void *pMutexEnv, int iType); static void pthreadMutexFree(sqlite4_mutex *pMutex); static void pthreadMutexEnter(sqlite4_mutex *pMutex); static int pthreadMutexTry(sqlite4_mutex *pMutex); static void pthreadMutexLeave(sqlite4_mutex *pMutex); #ifdef SQLITE4_DEBUG static int pthreadMutexHeld(sqlite4_mutex *pMutex); static int pthreadMutexNotheld(sqlite4_mutex *pMutex); #endif static const sqlite4_mutex_methods sMutexMethods = { pthreadMutexInit, pthreadMutexEnd, pthreadMutexAlloc, pthreadMutexFree, pthreadMutexEnter, pthreadMutexTry, pthreadMutexLeave, #ifdef SQLITE4_DEBUG pthreadMutexHeld, pthreadMutexNotheld, #else 0, 0, #endif 0 }; /* ** Each recursive mutex is an instance of the following structure. */ typedef struct sqlite4UnixMutex { sqlite4_mutex base; /* Base class. Must be first */ pthread_mutex_t mutex; /* Mutex controlling the lock */ #if SQLITE4_MUTEX_NREF int id; /* Mutex type */ volatile int nRef; /* Number of entrances */ volatile pthread_t owner; /* Thread that is within this mutex */ int trace; /* True to trace changes */ #endif } sqlite4UnixMutex; #if SQLITE4_MUTEX_NREF #define SQLITE4_MUTEX_INITIALIZER \ { {&sMutexMethods}, PTHREAD_MUTEX_INITIALIZER, 0, 0, (pthread_t)0, 0 } #else #define SQLITE4_MUTEX_INITIALIZER \ { {&sMutexMethods}, PTHREAD_MUTEX_INITIALIZER } #endif /* ** The sqlite4_mutex_held() and sqlite4_mutex_notheld() routine are ** intended for use only inside assert() statements. On some platforms, ** there might be race conditions that can cause these routines to ** deliver incorrect results. In particular, if pthread_equal() is |
︙ | ︙ | |||
129 130 131 132 133 134 135 136 137 138 139 140 141 142 | ** returns a different mutex on every call. But for the static ** mutex types, the same mutex is returned on every call that has ** the same type number. */ static sqlite4_mutex *pthreadMutexAlloc(void *pMutexEnv, int iType){ sqlite4_env *pEnv = (sqlite4_env*)pMutexEnv; sqlite4UnixMutex *p; switch( iType ){ case SQLITE4_MUTEX_RECURSIVE: { p = sqlite4MallocZero(pEnv, sizeof(*p) ); if( p ){ #ifdef SQLITE4_HOMEGROWN_RECURSIVE_MUTEX /* If recursive mutexes are not available, we will have to ** build our own. See below. */ | > > > > > | 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 | ** returns a different mutex on every call. But for the static ** mutex types, the same mutex is returned on every call that has ** the same type number. */ static sqlite4_mutex *pthreadMutexAlloc(void *pMutexEnv, int iType){ sqlite4_env *pEnv = (sqlite4_env*)pMutexEnv; sqlite4UnixMutex *p; static sqlite4UnixMutex aStaticMutex[] = { SQLITE4_MUTEX_INITIALIZER }; switch( iType ){ case SQLITE4_MUTEX_RECURSIVE: { p = sqlite4MallocZero(pEnv, sizeof(*p) ); if( p ){ #ifdef SQLITE4_HOMEGROWN_RECURSIVE_MUTEX /* If recursive mutexes are not available, we will have to ** build our own. See below. */ |
︙ | ︙ | |||
165 166 167 168 169 170 171 | pthread_mutex_init(&p->mutex, 0); p->base.pMutexMethods = &pEnv->mutex; assert( p->base.pMutexMethods->pMutexEnv==(void*)pEnv ); } break; } default: { | > > | | 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | pthread_mutex_init(&p->mutex, 0); p->base.pMutexMethods = &pEnv->mutex; assert( p->base.pMutexMethods->pMutexEnv==(void*)pEnv ); } break; } default: { assert( SQLITE4_MUTEX_RECURSIVE==1 && SQLITE4_MUTEX_FAST==0 ); assert( (iType-2)<ArraySize(aStaticMutex) ); p = &aStaticMutex[iType-2]; break; } } return (sqlite4_mutex*)p; } |
︙ | ︙ | |||
325 326 327 328 329 330 331 | if( p->trace ){ printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); } #endif } sqlite4_mutex_methods const *sqlite4DefaultMutex(void){ | < < < < < < < < < < < < < < < < < < | | 363 364 365 366 367 368 369 370 371 372 373 | if( p->trace ){ printf("leave mutex %p (%d) with nRef=%d\n", p, p->trace, p->nRef); } #endif } sqlite4_mutex_methods const *sqlite4DefaultMutex(void){ return &sMutexMethods; } #endif /* SQLITE4_MUTEX_PTHREADS */ |
Changes to src/sqlite.h.in.
︙ | ︙ | |||
3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 | ** ** The set of static mutexes may change from one SQLite release to the ** next. Applications that override the built-in mutex logic must be ** prepared to accommodate additional static mutexes. */ #define SQLITE4_MUTEX_FAST 0 #define SQLITE4_MUTEX_RECURSIVE 1 /* ** CAPIREF: Retrieve the mutex for a database connection ** ** ^This interface returns a pointer the [sqlite4_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. | > | 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 | ** ** The set of static mutexes may change from one SQLite release to the ** next. Applications that override the built-in mutex logic must be ** prepared to accommodate additional static mutexes. */ #define SQLITE4_MUTEX_FAST 0 #define SQLITE4_MUTEX_RECURSIVE 1 #define SQLITE4_MUTEX_STATIC_KV 2 /* For use by KV layers*/ /* ** CAPIREF: Retrieve the mutex for a database connection ** ** ^This interface returns a pointer the [sqlite4_mutex] object that ** serializes access to the [database connection] given in the argument ** when the [threading mode] is Serialized. |
︙ | ︙ |