From bb12c1fd00eb51118749bbbc69c5596835fcbd3b Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 4 May 2024 19:31:02 +0200 Subject: Adding upstream version 5:7.0.15. Signed-off-by: Daniel Baumann --- src/debug.c | 2212 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 2212 insertions(+) create mode 100644 src/debug.c (limited to 'src/debug.c') diff --git a/src/debug.c b/src/debug.c new file mode 100644 index 0000000..dffa986 --- /dev/null +++ b/src/debug.c @@ -0,0 +1,2212 @@ +/* + * Copyright (c) 2009-2020, Salvatore Sanfilippo + * Copyright (c) 2020, Redis Labs, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "server.h" +#include "util.h" +#include "sha1.h" /* SHA1 is used for DEBUG DIGEST */ +#include "crc64.h" +#include "bio.h" +#include "quicklist.h" +#include "cluster.h" + +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_BACKTRACE +#include +#ifndef __OpenBSD__ +#include +#else +typedef ucontext_t sigcontext_t; +#endif +#endif /* HAVE_BACKTRACE */ + +#ifdef __CYGWIN__ +#ifndef SA_ONSTACK +#define SA_ONSTACK 0x08000000 +#endif +#endif + +#if defined(__APPLE__) && defined(__arm64__) +#include +#endif + +/* Globals */ +static int bug_report_start = 0; /* True if bug report header was already logged. */ +static pthread_mutex_t bug_report_start_mutex = PTHREAD_MUTEX_INITIALIZER; + +/* Forward declarations */ +void bugReportStart(void); +void printCrashReport(void); +void bugReportEnd(int killViaSignal, int sig); +void logStackTrace(void *eip, int uplevel); + +/* ================================= Debugging ============================== */ + +/* Compute the sha1 of string at 's' with 'len' bytes long. + * The SHA1 is then xored against the string pointed by digest. + * Since xor is commutative, this operation is used in order to + * "add" digests relative to unordered elements. + * + * So digest(a,b,c,d) will be the same of digest(b,a,c,d) */ +void xorDigest(unsigned char *digest, const void *ptr, size_t len) { + SHA1_CTX ctx; + unsigned char hash[20]; + int j; + + SHA1Init(&ctx); + SHA1Update(&ctx,ptr,len); + SHA1Final(hash,&ctx); + + for (j = 0; j < 20; j++) + digest[j] ^= hash[j]; +} + +void xorStringObjectDigest(unsigned char *digest, robj *o) { + o = getDecodedObject(o); + xorDigest(digest,o->ptr,sdslen(o->ptr)); + decrRefCount(o); +} + +/* This function instead of just computing the SHA1 and xoring it + * against digest, also perform the digest of "digest" itself and + * replace the old value with the new one. + * + * So the final digest will be: + * + * digest = SHA1(digest xor SHA1(data)) + * + * This function is used every time we want to preserve the order so + * that digest(a,b,c,d) will be different than digest(b,c,d,a) + * + * Also note that mixdigest("foo") followed by mixdigest("bar") + * will lead to a different digest compared to "fo", "obar". + */ +void mixDigest(unsigned char *digest, const void *ptr, size_t len) { + SHA1_CTX ctx; + + xorDigest(digest,ptr,len); + SHA1Init(&ctx); + SHA1Update(&ctx,digest,20); + SHA1Final(digest,&ctx); +} + +void mixStringObjectDigest(unsigned char *digest, robj *o) { + o = getDecodedObject(o); + mixDigest(digest,o->ptr,sdslen(o->ptr)); + decrRefCount(o); +} + +/* This function computes the digest of a data structure stored in the + * object 'o'. It is the core of the DEBUG DIGEST command: when taking the + * digest of a whole dataset, we take the digest of the key and the value + * pair, and xor all those together. + * + * Note that this function does not reset the initial 'digest' passed, it + * will continue mixing this object digest to anything that was already + * present. */ +void xorObjectDigest(redisDb *db, robj *keyobj, unsigned char *digest, robj *o) { + uint32_t aux = htonl(o->type); + mixDigest(digest,&aux,sizeof(aux)); + long long expiretime = getExpire(db,keyobj); + char buf[128]; + + /* Save the key and associated value */ + if (o->type == OBJ_STRING) { + mixStringObjectDigest(digest,o); + } else if (o->type == OBJ_LIST) { + listTypeIterator *li = listTypeInitIterator(o,0,LIST_TAIL); + listTypeEntry entry; + while(listTypeNext(li,&entry)) { + robj *eleobj = listTypeGet(&entry); + mixStringObjectDigest(digest,eleobj); + decrRefCount(eleobj); + } + listTypeReleaseIterator(li); + } else if (o->type == OBJ_SET) { + setTypeIterator *si = setTypeInitIterator(o); + sds sdsele; + while((sdsele = setTypeNextObject(si)) != NULL) { + xorDigest(digest,sdsele,sdslen(sdsele)); + sdsfree(sdsele); + } + setTypeReleaseIterator(si); + } else if (o->type == OBJ_ZSET) { + unsigned char eledigest[20]; + + if (o->encoding == OBJ_ENCODING_LISTPACK) { + unsigned char *zl = o->ptr; + unsigned char *eptr, *sptr; + unsigned char *vstr; + unsigned int vlen; + long long vll; + double score; + + eptr = lpSeek(zl,0); + serverAssert(eptr != NULL); + sptr = lpNext(zl,eptr); + serverAssert(sptr != NULL); + + while (eptr != NULL) { + vstr = lpGetValue(eptr,&vlen,&vll); + score = zzlGetScore(sptr); + + memset(eledigest,0,20); + if (vstr != NULL) { + mixDigest(eledigest,vstr,vlen); + } else { + ll2string(buf,sizeof(buf),vll); + mixDigest(eledigest,buf,strlen(buf)); + } + + snprintf(buf,sizeof(buf),"%.17g",score); + mixDigest(eledigest,buf,strlen(buf)); + xorDigest(digest,eledigest,20); + zzlNext(zl,&eptr,&sptr); + } + } else if (o->encoding == OBJ_ENCODING_SKIPLIST) { + zset *zs = o->ptr; + dictIterator *di = dictGetIterator(zs->dict); + dictEntry *de; + + while((de = dictNext(di)) != NULL) { + sds sdsele = dictGetKey(de); + double *score = dictGetVal(de); + + snprintf(buf,sizeof(buf),"%.17g",*score); + memset(eledigest,0,20); + mixDigest(eledigest,sdsele,sdslen(sdsele)); + mixDigest(eledigest,buf,strlen(buf)); + xorDigest(digest,eledigest,20); + } + dictReleaseIterator(di); + } else { + serverPanic("Unknown sorted set encoding"); + } + } else if (o->type == OBJ_HASH) { + hashTypeIterator *hi = hashTypeInitIterator(o); + while (hashTypeNext(hi) != C_ERR) { + unsigned char eledigest[20]; + sds sdsele; + + memset(eledigest,0,20); + sdsele = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_KEY); + mixDigest(eledigest,sdsele,sdslen(sdsele)); + sdsfree(sdsele); + sdsele = hashTypeCurrentObjectNewSds(hi,OBJ_HASH_VALUE); + mixDigest(eledigest,sdsele,sdslen(sdsele)); + sdsfree(sdsele); + xorDigest(digest,eledigest,20); + } + hashTypeReleaseIterator(hi); + } else if (o->type == OBJ_STREAM) { + streamIterator si; + streamIteratorStart(&si,o->ptr,NULL,NULL,0); + streamID id; + int64_t numfields; + + while(streamIteratorGetID(&si,&id,&numfields)) { + sds itemid = sdscatfmt(sdsempty(),"%U.%U",id.ms,id.seq); + mixDigest(digest,itemid,sdslen(itemid)); + sdsfree(itemid); + + while(numfields--) { + unsigned char *field, *value; + int64_t field_len, value_len; + streamIteratorGetField(&si,&field,&value, + &field_len,&value_len); + mixDigest(digest,field,field_len); + mixDigest(digest,value,value_len); + } + } + streamIteratorStop(&si); + } else if (o->type == OBJ_MODULE) { + RedisModuleDigest md = {{0},{0},keyobj,db->id}; + moduleValue *mv = o->ptr; + moduleType *mt = mv->type; + moduleInitDigestContext(md); + if (mt->digest) { + mt->digest(&md,mv->value); + xorDigest(digest,md.x,sizeof(md.x)); + } + } else { + serverPanic("Unknown object type"); + } + /* If the key has an expire, add it to the mix */ + if (expiretime != -1) xorDigest(digest,"!!expire!!",10); +} + +/* Compute the dataset digest. Since keys, sets elements, hashes elements + * are not ordered, we use a trick: every aggregate digest is the xor + * of the digests of their elements. This way the order will not change + * the result. For list instead we use a feedback entering the output digest + * as input in order to ensure that a different ordered list will result in + * a different digest. */ +void computeDatasetDigest(unsigned char *final) { + unsigned char digest[20]; + dictIterator *di = NULL; + dictEntry *de; + int j; + uint32_t aux; + + memset(final,0,20); /* Start with a clean result */ + + for (j = 0; j < server.dbnum; j++) { + redisDb *db = server.db+j; + + if (dictSize(db->dict) == 0) continue; + di = dictGetSafeIterator(db->dict); + + /* hash the DB id, so the same dataset moved in a different + * DB will lead to a different digest */ + aux = htonl(j); + mixDigest(final,&aux,sizeof(aux)); + + /* Iterate this DB writing every entry */ + while((de = dictNext(di)) != NULL) { + sds key; + robj *keyobj, *o; + + memset(digest,0,20); /* This key-val digest */ + key = dictGetKey(de); + keyobj = createStringObject(key,sdslen(key)); + + mixDigest(digest,key,sdslen(key)); + + o = dictGetVal(de); + xorObjectDigest(db,keyobj,digest,o); + + /* We can finally xor the key-val digest to the final digest */ + xorDigest(final,digest,20); + decrRefCount(keyobj); + } + dictReleaseIterator(di); + } +} + +#ifdef USE_JEMALLOC +void mallctl_int(client *c, robj **argv, int argc) { + int ret; + /* start with the biggest size (int64), and if that fails, try smaller sizes (int32, bool) */ + int64_t old = 0, val; + if (argc > 1) { + long long ll; + if (getLongLongFromObjectOrReply(c, argv[1], &ll, NULL) != C_OK) + return; + val = ll; + } + size_t sz = sizeof(old); + while (sz > 0) { + if ((ret=je_mallctl(argv[0]->ptr, &old, &sz, argc > 1? &val: NULL, argc > 1?sz: 0))) { + if (ret == EPERM && argc > 1) { + /* if this option is write only, try just writing to it. */ + if (!(ret=je_mallctl(argv[0]->ptr, NULL, 0, &val, sz))) { + addReply(c, shared.ok); + return; + } + } + if (ret==EINVAL) { + /* size might be wrong, try a smaller one */ + sz /= 2; +#if BYTE_ORDER == BIG_ENDIAN + val <<= 8*sz; +#endif + continue; + } + addReplyErrorFormat(c,"%s", strerror(ret)); + return; + } else { +#if BYTE_ORDER == BIG_ENDIAN + old >>= 64 - 8*sz; +#endif + addReplyLongLong(c, old); + return; + } + } + addReplyErrorFormat(c,"%s", strerror(EINVAL)); +} + +void mallctl_string(client *c, robj **argv, int argc) { + int rret, wret; + char *old; + size_t sz = sizeof(old); + /* for strings, it seems we need to first get the old value, before overriding it. */ + if ((rret=je_mallctl(argv[0]->ptr, &old, &sz, NULL, 0))) { + /* return error unless this option is write only. */ + if (!(rret == EPERM && argc > 1)) { + addReplyErrorFormat(c,"%s", strerror(rret)); + return; + } + } + if(argc > 1) { + char *val = argv[1]->ptr; + char **valref = &val; + if ((!strcmp(val,"VOID"))) + valref = NULL, sz = 0; + wret = je_mallctl(argv[0]->ptr, NULL, 0, valref, sz); + } + if (!rret) + addReplyBulkCString(c, old); + else if (wret) + addReplyErrorFormat(c,"%s", strerror(wret)); + else + addReply(c, shared.ok); +} +#endif + +void debugCommand(client *c) { + if (c->argc == 2 && !strcasecmp(c->argv[1]->ptr,"help")) { + const char *help[] = { +"AOF-FLUSH-SLEEP ", +" Server will sleep before flushing the AOF, this is used for testing.", +"ASSERT", +" Crash by assertion failed.", +"CHANGE-REPL-ID", +" Change the replication IDs of the instance.", +" Dangerous: should be used only for testing the replication subsystem.", +"CONFIG-REWRITE-FORCE-ALL", +" Like CONFIG REWRITE but writes all configuration options, including", +" keywords not listed in original configuration file or default values.", +"CRASH-AND-RECOVER []", +" Hard crash and restart after a delay (default 0).", +"DIGEST", +" Output a hex signature representing the current DB content.", +"DIGEST-VALUE [ ...]", +" Output a hex signature of the values of all the specified keys.", +"ERROR ", +" Return a Redis protocol error with as message. Useful for clients", +" unit tests to simulate Redis errors.", +"LEAK ", +" Create a memory leak of the input string.", +"LOG ", +" Write to the server log.", +"HTSTATS ", +" Return hash table statistics of the specified Redis database.", +"HTSTATS-KEY ", +" Like HTSTATS but for the hash table stored at 's value.", +"LOADAOF", +" Flush the AOF buffers on disk and reload the AOF in memory.", +"REPLICATE ", +" Replicates the provided string to replicas, allowing data divergence.", +#ifdef USE_JEMALLOC +"MALLCTL []", +" Get or set a malloc tuning integer.", +"MALLCTL-STR []", +" Get or set a malloc tuning string.", +#endif +"OBJECT ", +" Show low level info about `key` and associated value.", +"DROP-CLUSTER-PACKET-FILTER ", +" Drop all packets that match the filtered type. Set to -1 allow all packets.", +"OOM", +" Crash the server simulating an out-of-memory error.", +"PANIC", +" Crash the server simulating a panic.", +"POPULATE [] []", +" Create string keys named key:. If is specified then", +" it is used instead of the 'key' prefix. These are not propagated to", +" replicas. Cluster slots are not respected so keys not belonging to the", +" current node can be created in cluster mode.", +"PROTOCOL ", +" Reply with a test value of the specified type. can be: string,", +" integer, double, bignum, null, array, set, map, attrib, push, verbatim,", +" true, false.", +"RELOAD [option ...]", +" Save the RDB on disk and reload it back to memory. Valid