+** ^(If the sqlite3_vtab_distinct() interface returns 3, that means
+** that the query planner needs only distinct rows but it does need the
+** rows to be sorted.)^ ^The virtual table implementation is free to omit
+** rows that are identical in all aOrderBy columns, if it wants to, but
+** it is not required to omit any rows. This mode is used for queries
+** that have both DISTINCT and ORDER BY clauses.
**
**
** ^For the purposes of comparing virtual table output values to see if the
@@ -13120,12 +13207,17 @@ struct fts5_api {
/************** End of sqlite3.h *********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
+/*
+** Reuse the STATIC_LRU for mutex access to sqlite3_temp_directory.
+*/
+#define SQLITE_MUTEX_STATIC_TEMPDIR SQLITE_MUTEX_STATIC_VFS1
+
/*
** Include the configuration header output by 'configure' if we're using the
** autoconf-based build
*/
#if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H)
-#include "config.h"
+#include "sqlite_cfg.h"
#define SQLITECONFIG_H 1
#endif
@@ -14363,8 +14455,19 @@ typedef INT16_TYPE LogEst;
/*
** Round up a number to the next larger multiple of 8. This is used
** to force 8-byte alignment on 64-bit architectures.
+**
+** ROUND8() always does the rounding, for any argument.
+**
+** ROUND8P() assumes that the argument is already an integer number of
+** pointers in size, and so it is a no-op on systems where the pointer
+** size is 8.
*/
#define ROUND8(x) (((x)+7)&~7)
+#if SQLITE_PTRSIZE==8
+# define ROUND8P(x) (x)
+#else
+# define ROUND8P(x) (((x)+7)&~7)
+#endif
/*
** Round down to the nearest multiple of 8
@@ -14427,22 +14530,23 @@ typedef INT16_TYPE LogEst;
#endif
/*
-** SELECTTRACE_ENABLED will be either 1 or 0 depending on whether or not
-** the Select query generator tracing logic is turned on.
+** TREETRACE_ENABLED will be either 1 or 0 depending on whether or not
+** the Abstract Syntax Tree tracing logic is turned on.
*/
#if !defined(SQLITE_AMALGAMATION)
-SQLITE_PRIVATE u32 sqlite3SelectTrace;
+SQLITE_PRIVATE u32 sqlite3TreeTrace;
#endif
#if defined(SQLITE_DEBUG) \
- && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE))
-# define SELECTTRACE_ENABLED 1
+ && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE) \
+ || defined(SQLITE_ENABLE_TREETRACE))
+# define TREETRACE_ENABLED 1
# define SELECTTRACE(K,P,S,X) \
- if(sqlite3SelectTrace&(K)) \
+ if(sqlite3TreeTrace&(K)) \
sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\
sqlite3DebugPrintf X
#else
# define SELECTTRACE(K,P,S,X)
-# define SELECTTRACE_ENABLED 0
+# define TREETRACE_ENABLED 0
#endif
/*
@@ -14527,7 +14631,7 @@ struct BusyHandler {
** pointer will work here as long as it is distinct from SQLITE_STATIC
** and SQLITE_TRANSIENT.
*/
-#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3OomFault)
+#define SQLITE_DYNAMIC ((sqlite3_destructor_type)sqlite3OomClear)
/*
** When SQLITE_OMIT_WSD is defined, it means that the target platform does
@@ -14596,6 +14700,7 @@ typedef struct FuncDef FuncDef;
typedef struct FuncDefHash FuncDefHash;
typedef struct IdList IdList;
typedef struct Index Index;
+typedef struct IndexedExpr IndexedExpr;
typedef struct IndexSample IndexSample;
typedef struct KeyClass KeyClass;
typedef struct KeyInfo KeyInfo;
@@ -14603,6 +14708,7 @@ typedef struct Lookaside Lookaside;
typedef struct LookasideSlot LookasideSlot;
typedef struct Module Module;
typedef struct NameContext NameContext;
+typedef struct OnOrUsing OnOrUsing;
typedef struct Parse Parse;
typedef struct ParseCleanup ParseCleanup;
typedef struct PreUpdate PreUpdate;
@@ -14660,6 +14766,7 @@ typedef struct With With;
#define MASKBIT32(n) (((unsigned int)1)<<(n))
#define SMASKBIT32(n) ((n)<=31?((unsigned int)1)<<(n):0)
#define ALLBITS ((Bitmask)-1)
+#define TOPBIT (((Bitmask)1)<<(BMS-1))
/* A VList object records a mapping between parameters/variables/wildcards
** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer
@@ -14674,6 +14781,331 @@ typedef int VList;
** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque
** pointer types (i.e. FuncDef) defined above.
*/
+/************** Include os.h in the middle of sqliteInt.h ********************/
+/************** Begin file os.h **********************************************/
+/*
+** 2001 September 16
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This header file (together with is companion C source-code file
+** "os.c") attempt to abstract the underlying operating system so that
+** the SQLite library will work on both POSIX and windows systems.
+**
+** This header file is #include-ed by sqliteInt.h and thus ends up
+** being included by every source file.
+*/
+#ifndef _SQLITE_OS_H_
+#define _SQLITE_OS_H_
+
+/*
+** Attempt to automatically detect the operating system and setup the
+** necessary pre-processor macros for it.
+*/
+/************** Include os_setup.h in the middle of os.h *********************/
+/************** Begin file os_setup.h ****************************************/
+/*
+** 2013 November 25
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains pre-processor directives related to operating system
+** detection and/or setup.
+*/
+#ifndef SQLITE_OS_SETUP_H
+#define SQLITE_OS_SETUP_H
+
+/*
+** Figure out if we are dealing with Unix, Windows, or some other operating
+** system.
+**
+** After the following block of preprocess macros, all of
+**
+** SQLITE_OS_KV
+** SQLITE_OS_OTHER
+** SQLITE_OS_UNIX
+** SQLITE_OS_WIN
+**
+** will defined to either 1 or 0. One of them will be 1. The others will be 0.
+** If none of the macros are initially defined, then select either
+** SQLITE_OS_UNIX or SQLITE_OS_WIN depending on the target platform.
+**
+** If SQLITE_OS_OTHER=1 is specified at compile-time, then the application
+** must provide its own VFS implementation together with sqlite3_os_init()
+** and sqlite3_os_end() routines.
+*/
+#if !defined(SQLITE_OS_KV) && !defined(SQLITE_OS_OTHER) && \
+ !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_WIN)
+# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \
+ defined(__MINGW32__) || defined(__BORLANDC__)
+# define SQLITE_OS_WIN 1
+# define SQLITE_OS_UNIX 0
+# else
+# define SQLITE_OS_WIN 0
+# define SQLITE_OS_UNIX 1
+# endif
+#endif
+#if SQLITE_OS_OTHER+1>1
+# undef SQLITE_OS_KV
+# define SQLITE_OS_KV 0
+# undef SQLITE_OS_UNIX
+# define SQLITE_OS_UNIX 0
+# undef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+#endif
+#if SQLITE_OS_KV+1>1
+# undef SQLITE_OS_OTHER
+# define SQLITE_OS_OTHER 0
+# undef SQLITE_OS_UNIX
+# define SQLITE_OS_UNIX 0
+# undef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+# define SQLITE_OMIT_LOAD_EXTENSION 1
+# define SQLITE_OMIT_WAL 1
+# define SQLITE_OMIT_DEPRECATED 1
+# undef SQLITE_TEMP_STORE
+# define SQLITE_TEMP_STORE 3 /* Always use memory for temporary storage */
+# define SQLITE_DQS 0
+# define SQLITE_OMIT_SHARED_CACHE 1
+# define SQLITE_OMIT_AUTOINIT 1
+#endif
+#if SQLITE_OS_UNIX+1>1
+# undef SQLITE_OS_KV
+# define SQLITE_OS_KV 0
+# undef SQLITE_OS_OTHER
+# define SQLITE_OS_OTHER 0
+# undef SQLITE_OS_WIN
+# define SQLITE_OS_WIN 0
+#endif
+#if SQLITE_OS_WIN+1>1
+# undef SQLITE_OS_KV
+# define SQLITE_OS_KV 0
+# undef SQLITE_OS_OTHER
+# define SQLITE_OS_OTHER 0
+# undef SQLITE_OS_UNIX
+# define SQLITE_OS_UNIX 0
+#endif
+
+
+#endif /* SQLITE_OS_SETUP_H */
+
+/************** End of os_setup.h ********************************************/
+/************** Continuing where we left off in os.h *************************/
+
+/* If the SET_FULLSYNC macro is not defined above, then make it
+** a no-op
+*/
+#ifndef SET_FULLSYNC
+# define SET_FULLSYNC(x,y)
+#endif
+
+/* Maximum pathname length. Note: FILENAME_MAX defined by stdio.h
+*/
+#ifndef SQLITE_MAX_PATHLEN
+# define SQLITE_MAX_PATHLEN FILENAME_MAX
+#endif
+
+/* Maximum number of symlinks that will be resolved while trying to
+** expand a filename in xFullPathname() in the VFS.
+*/
+#ifndef SQLITE_MAX_SYMLINK
+# define SQLITE_MAX_SYMLINK 200
+#endif
+
+/*
+** The default size of a disk sector
+*/
+#ifndef SQLITE_DEFAULT_SECTOR_SIZE
+# define SQLITE_DEFAULT_SECTOR_SIZE 4096
+#endif
+
+/*
+** Temporary files are named starting with this prefix followed by 16 random
+** alphanumeric characters, and no file extension. They are stored in the
+** OS's standard temporary file directory, and are deleted prior to exit.
+** If sqlite is being embedded in another program, you may wish to change the
+** prefix to reflect your program's name, so that if your program exits
+** prematurely, old temporary files can be easily identified. This can be done
+** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line.
+**
+** 2006-10-31: The default prefix used to be "sqlite_". But then
+** Mcafee started using SQLite in their anti-virus product and it
+** started putting files with the "sqlite" name in the c:/temp folder.
+** This annoyed many windows users. Those users would then do a
+** Google search for "sqlite", find the telephone numbers of the
+** developers and call to wake them up at night and complain.
+** For this reason, the default name prefix is changed to be "sqlite"
+** spelled backwards. So the temp files are still identified, but
+** anybody smart enough to figure out the code is also likely smart
+** enough to know that calling the developer will not help get rid
+** of the file.
+*/
+#ifndef SQLITE_TEMP_FILE_PREFIX
+# define SQLITE_TEMP_FILE_PREFIX "etilqs_"
+#endif
+
+/*
+** The following values may be passed as the second argument to
+** sqlite3OsLock(). The various locks exhibit the following semantics:
+**
+** SHARED: Any number of processes may hold a SHARED lock simultaneously.
+** RESERVED: A single process may hold a RESERVED lock on a file at
+** any time. Other processes may hold and obtain new SHARED locks.
+** PENDING: A single process may hold a PENDING lock on a file at
+** any one time. Existing SHARED locks may persist, but no new
+** SHARED locks may be obtained by other processes.
+** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks.
+**
+** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a
+** process that requests an EXCLUSIVE lock may actually obtain a PENDING
+** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to
+** sqlite3OsLock().
+*/
+#define NO_LOCK 0
+#define SHARED_LOCK 1
+#define RESERVED_LOCK 2
+#define PENDING_LOCK 3
+#define EXCLUSIVE_LOCK 4
+
+/*
+** File Locking Notes: (Mostly about windows but also some info for Unix)
+**
+** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because
+** those functions are not available. So we use only LockFile() and
+** UnlockFile().
+**
+** LockFile() prevents not just writing but also reading by other processes.
+** A SHARED_LOCK is obtained by locking a single randomly-chosen
+** byte out of a specific range of bytes. The lock byte is obtained at
+** random so two separate readers can probably access the file at the
+** same time, unless they are unlucky and choose the same lock byte.
+** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range.
+** There can only be one writer. A RESERVED_LOCK is obtained by locking
+** a single byte of the file that is designated as the reserved lock byte.
+** A PENDING_LOCK is obtained by locking a designated byte different from
+** the RESERVED_LOCK byte.
+**
+** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available,
+** which means we can use reader/writer locks. When reader/writer locks
+** are used, the lock is placed on the same range of bytes that is used
+** for probabilistic locking in Win95/98/ME. Hence, the locking scheme
+** will support two or more Win95 readers or two or more WinNT readers.
+** But a single Win95 reader will lock out all WinNT readers and a single
+** WinNT reader will lock out all other Win95 readers.
+**
+** The following #defines specify the range of bytes used for locking.
+** SHARED_SIZE is the number of bytes available in the pool from which
+** a random byte is selected for a shared lock. The pool of bytes for
+** shared locks begins at SHARED_FIRST.
+**
+** The same locking strategy and
+** byte ranges are used for Unix. This leaves open the possibility of having
+** clients on win95, winNT, and unix all talking to the same shared file
+** and all locking correctly. To do so would require that samba (or whatever
+** tool is being used for file sharing) implements locks correctly between
+** windows and unix. I'm guessing that isn't likely to happen, but by
+** using the same locking range we are at least open to the possibility.
+**
+** Locking in windows is manditory. For this reason, we cannot store
+** actual data in the bytes used for locking. The pager never allocates
+** the pages involved in locking therefore. SHARED_SIZE is selected so
+** that all locks will fit on a single page even at the minimum page size.
+** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE
+** is set high so that we don't have to allocate an unused page except
+** for very large databases. But one should test the page skipping logic
+** by setting PENDING_BYTE low and running the entire regression suite.
+**
+** Changing the value of PENDING_BYTE results in a subtly incompatible
+** file format. Depending on how it is changed, you might not notice
+** the incompatibility right away, even running a full regression test.
+** The default location of PENDING_BYTE is the first byte past the
+** 1GB boundary.
+**
+*/
+#ifdef SQLITE_OMIT_WSD
+# define PENDING_BYTE (0x40000000)
+#else
+# define PENDING_BYTE sqlite3PendingByte
+#endif
+#define RESERVED_BYTE (PENDING_BYTE+1)
+#define SHARED_FIRST (PENDING_BYTE+2)
+#define SHARED_SIZE 510
+
+/*
+** Wrapper around OS specific sqlite3_os_init() function.
+*/
+SQLITE_PRIVATE int sqlite3OsInit(void);
+
+/*
+** Functions for accessing sqlite3_file methods
+*/
+SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*);
+SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset);
+SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset);
+SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size);
+SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int);
+SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize);
+SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int);
+SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int);
+SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut);
+SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*);
+SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*);
+#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0
+SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id);
+SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id);
+#ifndef SQLITE_OMIT_WAL
+SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **);
+SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int);
+SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id);
+SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int);
+#endif /* SQLITE_OMIT_WAL */
+SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **);
+SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *);
+
+
+/*
+** Functions for accessing sqlite3_vfs methods
+*/
+SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *);
+SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int);
+SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut);
+SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *);
+#ifndef SQLITE_OMIT_LOAD_EXTENSION
+SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *);
+SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *);
+SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void);
+SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *);
+#endif /* SQLITE_OMIT_LOAD_EXTENSION */
+SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *);
+SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int);
+SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*);
+SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*);
+
+/*
+** Convenience functions for opening and closing files using
+** sqlite3_malloc() to obtain space for the file-handle structure.
+*/
+SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*);
+SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *);
+
+#endif /* _SQLITE_OS_H_ */
+
+/************** End of os.h **************************************************/
+/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include pager.h in the middle of sqliteInt.h *****************/
/************** Begin file pager.h *******************************************/
/*
@@ -14721,14 +15153,15 @@ typedef struct Pager Pager;
typedef struct PgHdr DbPage;
/*
-** Page number PAGER_MJ_PGNO is never used in an SQLite database (it is
+** Page number PAGER_SJ_PGNO is never used in an SQLite database (it is
** reserved for working around a windows/posix incompatibility). It is
** used in the journal to signify that the remainder of the journal file
** is devoted to storing a super-journal name - there are no more pages to
** roll back. See comments for function writeSuperJournal() in pager.c
** for details.
*/
-#define PAGER_MJ_PGNO(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1))
+#define PAGER_SJ_PGNO_COMPUTED(x) ((Pgno)((PENDING_BYTE/((x)->pageSize))+1))
+#define PAGER_SJ_PGNO(x) ((x)->lckPgno)
/*
** Allowed values for the flags parameter to sqlite3PagerOpen().
@@ -15293,6 +15726,8 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *);
SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64);
+SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree*);
+
/*
** If we are not using shared cache, then there is no need to
** use mutexes to access the BtShared structures. So make the
@@ -15405,7 +15840,6 @@ struct VdbeOp {
#ifdef SQLITE_ENABLE_CURSOR_HINTS
Expr *pExpr; /* Used when p4type is P4_EXPR */
#endif
- int (*xAdvance)(BtCursor *, int);
} p4;
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
char *zComment; /* Comment to improve readability */
@@ -15456,21 +15890,19 @@ typedef struct VdbeOpList VdbeOpList;
#define P4_COLLSEQ (-2) /* P4 is a pointer to a CollSeq structure */
#define P4_INT32 (-3) /* P4 is a 32-bit signed integer */
#define P4_SUBPROGRAM (-4) /* P4 is a pointer to a SubProgram structure */
-#define P4_ADVANCE (-5) /* P4 is a pointer to BtreeNext() or BtreePrev() */
-#define P4_TABLE (-6) /* P4 is a pointer to a Table structure */
+#define P4_TABLE (-5) /* P4 is a pointer to a Table structure */
/* Above do not own any resources. Must free those below */
-#define P4_FREE_IF_LE (-7)
-#define P4_DYNAMIC (-7) /* Pointer to memory from sqliteMalloc() */
-#define P4_FUNCDEF (-8) /* P4 is a pointer to a FuncDef structure */
-#define P4_KEYINFO (-9) /* P4 is a pointer to a KeyInfo structure */
-#define P4_EXPR (-10) /* P4 is a pointer to an Expr tree */
-#define P4_MEM (-11) /* P4 is a pointer to a Mem* structure */
-#define P4_VTAB (-12) /* P4 is a pointer to an sqlite3_vtab structure */
-#define P4_REAL (-13) /* P4 is a 64-bit floating point value */
-#define P4_INT64 (-14) /* P4 is a 64-bit signed integer */
-#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */
-#define P4_FUNCCTX (-16) /* P4 is a pointer to an sqlite3_context object */
-#define P4_DYNBLOB (-17) /* Pointer to memory from sqliteMalloc() */
+#define P4_FREE_IF_LE (-6)
+#define P4_DYNAMIC (-6) /* Pointer to memory from sqliteMalloc() */
+#define P4_FUNCDEF (-7) /* P4 is a pointer to a FuncDef structure */
+#define P4_KEYINFO (-8) /* P4 is a pointer to a KeyInfo structure */
+#define P4_EXPR (-9) /* P4 is a pointer to an Expr tree */
+#define P4_MEM (-10) /* P4 is a pointer to a Mem* structure */
+#define P4_VTAB (-11) /* P4 is a pointer to an sqlite3_vtab structure */
+#define P4_REAL (-12) /* P4 is a 64-bit floating point value */
+#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */
+#define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */
+#define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */
/* Error message codes for OP_Halt */
#define P5_ConstraintNotNull 1
@@ -15515,53 +15947,53 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_Savepoint 0
#define OP_AutoCommit 1
#define OP_Transaction 2
-#define OP_SorterNext 3 /* jump */
-#define OP_Prev 4 /* jump */
-#define OP_Next 5 /* jump */
-#define OP_Checkpoint 6
-#define OP_JournalMode 7
-#define OP_Vacuum 8
-#define OP_VFilter 9 /* jump, synopsis: iplan=r[P3] zplan='P4' */
-#define OP_VUpdate 10 /* synopsis: data=r[P3@P2] */
-#define OP_Goto 11 /* jump */
-#define OP_Gosub 12 /* jump */
-#define OP_InitCoroutine 13 /* jump */
-#define OP_Yield 14 /* jump */
-#define OP_MustBeInt 15 /* jump */
-#define OP_Jump 16 /* jump */
-#define OP_Once 17 /* jump */
-#define OP_If 18 /* jump */
+#define OP_Checkpoint 3
+#define OP_JournalMode 4
+#define OP_Vacuum 5
+#define OP_VFilter 6 /* jump, synopsis: iplan=r[P3] zplan='P4' */
+#define OP_VUpdate 7 /* synopsis: data=r[P3@P2] */
+#define OP_Init 8 /* jump, synopsis: Start at P2 */
+#define OP_Goto 9 /* jump */
+#define OP_Gosub 10 /* jump */
+#define OP_InitCoroutine 11 /* jump */
+#define OP_Yield 12 /* jump */
+#define OP_MustBeInt 13 /* jump */
+#define OP_Jump 14 /* jump */
+#define OP_Once 15 /* jump */
+#define OP_If 16 /* jump */
+#define OP_IfNot 17 /* jump */
+#define OP_IsType 18 /* jump, synopsis: if typeof(P1.P3) in P5 goto P2 */
#define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */
-#define OP_IfNot 20 /* jump */
-#define OP_IsNullOrType 21 /* jump, synopsis: if typeof(r[P1]) IN (P3,5) goto P2 */
-#define OP_IfNullRow 22 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */
-#define OP_SeekLT 23 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekLE 24 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekGE 25 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekGT 26 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IfNotOpen 27 /* jump, synopsis: if( !csr[P1] ) goto P2 */
-#define OP_IfNoHope 28 /* jump, synopsis: key=r[P3@P4] */
-#define OP_NoConflict 29 /* jump, synopsis: key=r[P3@P4] */
-#define OP_NotFound 30 /* jump, synopsis: key=r[P3@P4] */
-#define OP_Found 31 /* jump, synopsis: key=r[P3@P4] */
-#define OP_SeekRowid 32 /* jump, synopsis: intkey=r[P3] */
-#define OP_NotExists 33 /* jump, synopsis: intkey=r[P3] */
-#define OP_Last 34 /* jump */
-#define OP_IfSmaller 35 /* jump */
-#define OP_SorterSort 36 /* jump */
-#define OP_Sort 37 /* jump */
-#define OP_Rewind 38 /* jump */
-#define OP_IdxLE 39 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IdxGT 40 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IdxLT 41 /* jump, synopsis: key=r[P3@P4] */
-#define OP_IdxGE 42 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IfNullRow 20 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */
+#define OP_SeekLT 21 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekLE 22 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekGE 23 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekGT 24 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IfNotOpen 25 /* jump, synopsis: if( !csr[P1] ) goto P2 */
+#define OP_IfNoHope 26 /* jump, synopsis: key=r[P3@P4] */
+#define OP_NoConflict 27 /* jump, synopsis: key=r[P3@P4] */
+#define OP_NotFound 28 /* jump, synopsis: key=r[P3@P4] */
+#define OP_Found 29 /* jump, synopsis: key=r[P3@P4] */
+#define OP_SeekRowid 30 /* jump, synopsis: intkey=r[P3] */
+#define OP_NotExists 31 /* jump, synopsis: intkey=r[P3] */
+#define OP_Last 32 /* jump */
+#define OP_IfSmaller 33 /* jump */
+#define OP_SorterSort 34 /* jump */
+#define OP_Sort 35 /* jump */
+#define OP_Rewind 36 /* jump */
+#define OP_SorterNext 37 /* jump */
+#define OP_Prev 38 /* jump */
+#define OP_Next 39 /* jump */
+#define OP_IdxLE 40 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxGT 41 /* jump, synopsis: key=r[P3@P4] */
+#define OP_IdxLT 42 /* jump, synopsis: key=r[P3@P4] */
#define OP_Or 43 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */
#define OP_And 44 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */
-#define OP_RowSetRead 45 /* jump, synopsis: r[P3]=rowset(P1) */
-#define OP_RowSetTest 46 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */
-#define OP_Program 47 /* jump */
-#define OP_FkIfZero 48 /* jump, synopsis: if fkctr[P1]==0 goto P2 */
-#define OP_IfPos 49 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
+#define OP_IdxGE 45 /* jump, synopsis: key=r[P3@P4] */
+#define OP_RowSetRead 46 /* jump, synopsis: r[P3]=rowset(P1) */
+#define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */
+#define OP_Program 48 /* jump */
+#define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */
#define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */
#define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */
#define OP_Ne 52 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */
@@ -15571,12 +16003,12 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */
#define OP_ElseEq 58 /* jump, same as TK_ESCAPE */
-#define OP_IfNotZero 59 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */
-#define OP_DecrJumpZero 60 /* jump, synopsis: if (--r[P1])==0 goto P2 */
-#define OP_IncrVacuum 61 /* jump */
-#define OP_VNext 62 /* jump */
-#define OP_Filter 63 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */
-#define OP_Init 64 /* jump, synopsis: Start at P2 */
+#define OP_IfPos 59 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
+#define OP_IfNotZero 60 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */
+#define OP_DecrJumpZero 61 /* jump, synopsis: if (--r[P1])==0 goto P2 */
+#define OP_IncrVacuum 62 /* jump */
+#define OP_VNext 63 /* jump */
+#define OP_Filter 64 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */
#define OP_PureFunc 65 /* synopsis: r[P3]=func(r[P2@NP]) */
#define OP_Function 66 /* synopsis: r[P3]=func(r[P2@NP]) */
#define OP_Return 67
@@ -15586,34 +16018,34 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_Integer 71 /* synopsis: r[P2]=P1 */
#define OP_Int64 72 /* synopsis: r[P2]=P4 */
#define OP_String 73 /* synopsis: r[P2]='P4' (len=P1) */
-#define OP_Null 74 /* synopsis: r[P2..P3]=NULL */
-#define OP_SoftNull 75 /* synopsis: r[P1]=NULL */
-#define OP_Blob 76 /* synopsis: r[P2]=P4 (len=P1) */
-#define OP_Variable 77 /* synopsis: r[P2]=parameter(P1,P4) */
-#define OP_Move 78 /* synopsis: r[P2@P3]=r[P1@P3] */
-#define OP_Copy 79 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
-#define OP_SCopy 80 /* synopsis: r[P2]=r[P1] */
-#define OP_IntCopy 81 /* synopsis: r[P2]=r[P1] */
-#define OP_FkCheck 82
-#define OP_ResultRow 83 /* synopsis: output=r[P1@P2] */
-#define OP_CollSeq 84
-#define OP_AddImm 85 /* synopsis: r[P1]=r[P1]+P2 */
-#define OP_RealAffinity 86
-#define OP_Cast 87 /* synopsis: affinity(r[P1]) */
-#define OP_Permutation 88
-#define OP_Compare 89 /* synopsis: r[P1@P3] <-> r[P2@P3] */
-#define OP_IsTrue 90 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */
-#define OP_ZeroOrNull 91 /* synopsis: r[P2] = 0 OR NULL */
-#define OP_Offset 92 /* synopsis: r[P3] = sqlite_offset(P1) */
-#define OP_Column 93 /* synopsis: r[P3]=PX */
-#define OP_TypeCheck 94 /* synopsis: typecheck(r[P1@P2]) */
-#define OP_Affinity 95 /* synopsis: affinity(r[P1@P2]) */
-#define OP_MakeRecord 96 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
-#define OP_Count 97 /* synopsis: r[P2]=count() */
-#define OP_ReadCookie 98
-#define OP_SetCookie 99
-#define OP_ReopenIdx 100 /* synopsis: root=P2 iDb=P3 */
-#define OP_OpenRead 101 /* synopsis: root=P2 iDb=P3 */
+#define OP_BeginSubrtn 74 /* synopsis: r[P2]=NULL */
+#define OP_Null 75 /* synopsis: r[P2..P3]=NULL */
+#define OP_SoftNull 76 /* synopsis: r[P1]=NULL */
+#define OP_Blob 77 /* synopsis: r[P2]=P4 (len=P1) */
+#define OP_Variable 78 /* synopsis: r[P2]=parameter(P1,P4) */
+#define OP_Move 79 /* synopsis: r[P2@P3]=r[P1@P3] */
+#define OP_Copy 80 /* synopsis: r[P2@P3+1]=r[P1@P3+1] */
+#define OP_SCopy 81 /* synopsis: r[P2]=r[P1] */
+#define OP_IntCopy 82 /* synopsis: r[P2]=r[P1] */
+#define OP_FkCheck 83
+#define OP_ResultRow 84 /* synopsis: output=r[P1@P2] */
+#define OP_CollSeq 85
+#define OP_AddImm 86 /* synopsis: r[P1]=r[P1]+P2 */
+#define OP_RealAffinity 87
+#define OP_Cast 88 /* synopsis: affinity(r[P1]) */
+#define OP_Permutation 89
+#define OP_Compare 90 /* synopsis: r[P1@P3] <-> r[P2@P3] */
+#define OP_IsTrue 91 /* synopsis: r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4 */
+#define OP_ZeroOrNull 92 /* synopsis: r[P2] = 0 OR NULL */
+#define OP_Offset 93 /* synopsis: r[P3] = sqlite_offset(P1) */
+#define OP_Column 94 /* synopsis: r[P3]=PX cursor P1 column P2 */
+#define OP_TypeCheck 95 /* synopsis: typecheck(r[P1@P2]) */
+#define OP_Affinity 96 /* synopsis: affinity(r[P1@P2]) */
+#define OP_MakeRecord 97 /* synopsis: r[P3]=mkrec(r[P1@P2]) */
+#define OP_Count 98 /* synopsis: r[P2]=count() */
+#define OP_ReadCookie 99
+#define OP_SetCookie 100
+#define OP_ReopenIdx 101 /* synopsis: root=P2 iDb=P3 */
#define OP_BitAnd 102 /* same as TK_BITAND, synopsis: r[P3]=r[P1]&r[P2] */
#define OP_BitOr 103 /* same as TK_BITOR, synopsis: r[P3]=r[P1]|r[P2] */
#define OP_ShiftLeft 104 /* same as TK_LSHIFT, synopsis: r[P3]=r[P2]<0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
-#define OP_AggInverse 160 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */
-#define OP_AggStep 161 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggStep1 162 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggValue 163 /* synopsis: r[P3]=value N=P2 */
-#define OP_AggFinal 164 /* synopsis: accum=r[P1] N=P2 */
-#define OP_Expire 165
-#define OP_CursorLock 166
-#define OP_CursorUnlock 167
-#define OP_TableLock 168 /* synopsis: iDb=P1 root=P2 write=P3 */
-#define OP_VBegin 169
-#define OP_VCreate 170
-#define OP_VDestroy 171
-#define OP_VOpen 172
-#define OP_VInitIn 173 /* synopsis: r[P2]=ValueList(P1,P3) */
-#define OP_VColumn 174 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 175
-#define OP_Pagecount 176
-#define OP_MaxPgcnt 177
-#define OP_FilterAdd 178 /* synopsis: filter(P1) += key(P3@P4) */
-#define OP_Trace 179
-#define OP_CursorHint 180
-#define OP_ReleaseReg 181 /* synopsis: release r[P1@P2] mask P3 */
-#define OP_Noop 182
-#define OP_Explain 183
-#define OP_Abortable 184
+#define OP_DropTrigger 154
+#define OP_IntegrityCk 155
+#define OP_RowSetAdd 156 /* synopsis: rowset(P1)=r[P2] */
+#define OP_Param 157
+#define OP_FkCounter 158 /* synopsis: fkctr[P1]+=P2 */
+#define OP_MemMax 159 /* synopsis: r[P1]=max(r[P1],r[P2]) */
+#define OP_OffsetLimit 160 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
+#define OP_AggInverse 161 /* synopsis: accum=r[P3] inverse(r[P2@P5]) */
+#define OP_AggStep 162 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggStep1 163 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggValue 164 /* synopsis: r[P3]=value N=P2 */
+#define OP_AggFinal 165 /* synopsis: accum=r[P1] N=P2 */
+#define OP_Expire 166
+#define OP_CursorLock 167
+#define OP_CursorUnlock 168
+#define OP_TableLock 169 /* synopsis: iDb=P1 root=P2 write=P3 */
+#define OP_VBegin 170
+#define OP_VCreate 171
+#define OP_VDestroy 172
+#define OP_VOpen 173
+#define OP_VInitIn 174 /* synopsis: r[P2]=ValueList(P1,P3) */
+#define OP_VColumn 175 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 176
+#define OP_Pagecount 177
+#define OP_MaxPgcnt 178
+#define OP_ClrSubtype 179 /* synopsis: r[P1].subtype = 0 */
+#define OP_FilterAdd 180 /* synopsis: filter(P1) += key(P3@P4) */
+#define OP_Trace 181
+#define OP_CursorHint 182
+#define OP_ReleaseReg 183 /* synopsis: release r[P1@P2] mask P3 */
+#define OP_Noop 184
+#define OP_Explain 185
+#define OP_Abortable 186
/* Properties such as "out2" or "jump" that are specified in
** comments following the "case" for each opcode in the vdbe.c
@@ -15709,30 +16143,30 @@ typedef struct VdbeOpList VdbeOpList;
#define OPFLG_OUT2 0x10 /* out2: P2 is an output */
#define OPFLG_OUT3 0x20 /* out3: P3 is an output */
#define OPFLG_INITIALIZER {\
-/* 0 */ 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x10,\
-/* 8 */ 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x03, 0x03,\
-/* 16 */ 0x01, 0x01, 0x03, 0x12, 0x03, 0x03, 0x01, 0x09,\
-/* 24 */ 0x09, 0x09, 0x09, 0x01, 0x09, 0x09, 0x09, 0x09,\
-/* 32 */ 0x09, 0x09, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\
-/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x23, 0x0b, 0x01,\
-/* 48 */ 0x01, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
-/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x01, 0x01, 0x01,\
+/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,\
+/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\
+/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x09, 0x09, 0x09,\
+/* 24 */ 0x09, 0x01, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,\
+/* 32 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\
+/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x01, 0x23, 0x0b,\
+/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
+/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x01,\
/* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\
-/* 72 */ 0x10, 0x10, 0x10, 0x00, 0x10, 0x10, 0x00, 0x00,\
-/* 80 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02,\
-/* 88 */ 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00, 0x00,\
-/* 96 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x26, 0x26,\
+/* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\
+/* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\
+/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00,\
+/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x26, 0x26,\
/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\
/* 112 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00,\
-/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\
-/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\
-/* 136 */ 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00, 0x10,\
-/* 144 */ 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 152 */ 0x00, 0x10, 0x00, 0x06, 0x10, 0x00, 0x04, 0x1a,\
-/* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00,\
-/* 176 */ 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 184 */ 0x00,}
+/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\
+/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\
+/* 136 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\
+/* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\
+/* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\
+/* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\
+/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\
+/* 184 */ 0x00, 0x00, 0x00,}
/* The resolve3P2Values() routine is able to run faster if it knows
** the value of the largest JUMP opcode. The smaller the maximum
@@ -15778,8 +16212,10 @@ SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p);
#endif
#if defined(SQLITE_DEBUG)
SQLITE_PRIVATE void sqlite3VdbeVerifyAbortable(Vdbe *p, int);
+SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn(Vdbe*,int,int,int);
#else
# define sqlite3VdbeVerifyAbortable(A,B)
+# define sqlite3VdbeNoJumpsOutsideSubrtn(A,B,C,D)
#endif
SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp,int iLineno);
#ifndef SQLITE_OMIT_EXPLAIN
@@ -15806,6 +16242,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1);
SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2);
SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, int addr, int P3);
SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u16 P5);
+SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe*, int);
SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr);
SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe*, int addr);
SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr);
@@ -15820,11 +16257,11 @@ SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe*, void *pP4, int p4type);
SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*);
SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int);
SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int);
+SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetLastOp(Vdbe*);
SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Parse*);
SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*);
SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe*);
SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe*);
-SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3*,Vdbe*);
SQLITE_PRIVATE void sqlite3VdbeMakeReady(Vdbe*,Parse*);
SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe*);
SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe*, int);
@@ -16169,290 +16606,6 @@ SQLITE_PRIVATE int sqlite3PCacheIsDirty(PCache *pCache);
/************** End of pcache.h **********************************************/
/************** Continuing where we left off in sqliteInt.h ******************/
-/************** Include os.h in the middle of sqliteInt.h ********************/
-/************** Begin file os.h **********************************************/
-/*
-** 2001 September 16
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This header file (together with is companion C source-code file
-** "os.c") attempt to abstract the underlying operating system so that
-** the SQLite library will work on both POSIX and windows systems.
-**
-** This header file is #include-ed by sqliteInt.h and thus ends up
-** being included by every source file.
-*/
-#ifndef _SQLITE_OS_H_
-#define _SQLITE_OS_H_
-
-/*
-** Attempt to automatically detect the operating system and setup the
-** necessary pre-processor macros for it.
-*/
-/************** Include os_setup.h in the middle of os.h *********************/
-/************** Begin file os_setup.h ****************************************/
-/*
-** 2013 November 25
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains pre-processor directives related to operating system
-** detection and/or setup.
-*/
-#ifndef SQLITE_OS_SETUP_H
-#define SQLITE_OS_SETUP_H
-
-/*
-** Figure out if we are dealing with Unix, Windows, or some other operating
-** system.
-**
-** After the following block of preprocess macros, all of SQLITE_OS_UNIX,
-** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of
-** the three will be 1. The other two will be 0.
-*/
-#if defined(SQLITE_OS_OTHER)
-# if SQLITE_OS_OTHER==1
-# undef SQLITE_OS_UNIX
-# define SQLITE_OS_UNIX 0
-# undef SQLITE_OS_WIN
-# define SQLITE_OS_WIN 0
-# else
-# undef SQLITE_OS_OTHER
-# endif
-#endif
-#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER)
-# define SQLITE_OS_OTHER 0
-# ifndef SQLITE_OS_WIN
-# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \
- defined(__MINGW32__) || defined(__BORLANDC__)
-# define SQLITE_OS_WIN 1
-# define SQLITE_OS_UNIX 0
-# else
-# define SQLITE_OS_WIN 0
-# define SQLITE_OS_UNIX 1
-# endif
-# else
-# define SQLITE_OS_UNIX 0
-# endif
-#else
-# ifndef SQLITE_OS_WIN
-# define SQLITE_OS_WIN 0
-# endif
-#endif
-
-#endif /* SQLITE_OS_SETUP_H */
-
-/************** End of os_setup.h ********************************************/
-/************** Continuing where we left off in os.h *************************/
-
-/* If the SET_FULLSYNC macro is not defined above, then make it
-** a no-op
-*/
-#ifndef SET_FULLSYNC
-# define SET_FULLSYNC(x,y)
-#endif
-
-/* Maximum pathname length. Note: FILENAME_MAX defined by stdio.h
-*/
-#ifndef SQLITE_MAX_PATHLEN
-# define SQLITE_MAX_PATHLEN FILENAME_MAX
-#endif
-
-/*
-** The default size of a disk sector
-*/
-#ifndef SQLITE_DEFAULT_SECTOR_SIZE
-# define SQLITE_DEFAULT_SECTOR_SIZE 4096
-#endif
-
-/*
-** Temporary files are named starting with this prefix followed by 16 random
-** alphanumeric characters, and no file extension. They are stored in the
-** OS's standard temporary file directory, and are deleted prior to exit.
-** If sqlite is being embedded in another program, you may wish to change the
-** prefix to reflect your program's name, so that if your program exits
-** prematurely, old temporary files can be easily identified. This can be done
-** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line.
-**
-** 2006-10-31: The default prefix used to be "sqlite_". But then
-** Mcafee started using SQLite in their anti-virus product and it
-** started putting files with the "sqlite" name in the c:/temp folder.
-** This annoyed many windows users. Those users would then do a
-** Google search for "sqlite", find the telephone numbers of the
-** developers and call to wake them up at night and complain.
-** For this reason, the default name prefix is changed to be "sqlite"
-** spelled backwards. So the temp files are still identified, but
-** anybody smart enough to figure out the code is also likely smart
-** enough to know that calling the developer will not help get rid
-** of the file.
-*/
-#ifndef SQLITE_TEMP_FILE_PREFIX
-# define SQLITE_TEMP_FILE_PREFIX "etilqs_"
-#endif
-
-/*
-** The following values may be passed as the second argument to
-** sqlite3OsLock(). The various locks exhibit the following semantics:
-**
-** SHARED: Any number of processes may hold a SHARED lock simultaneously.
-** RESERVED: A single process may hold a RESERVED lock on a file at
-** any time. Other processes may hold and obtain new SHARED locks.
-** PENDING: A single process may hold a PENDING lock on a file at
-** any one time. Existing SHARED locks may persist, but no new
-** SHARED locks may be obtained by other processes.
-** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks.
-**
-** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a
-** process that requests an EXCLUSIVE lock may actually obtain a PENDING
-** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to
-** sqlite3OsLock().
-*/
-#define NO_LOCK 0
-#define SHARED_LOCK 1
-#define RESERVED_LOCK 2
-#define PENDING_LOCK 3
-#define EXCLUSIVE_LOCK 4
-
-/*
-** File Locking Notes: (Mostly about windows but also some info for Unix)
-**
-** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because
-** those functions are not available. So we use only LockFile() and
-** UnlockFile().
-**
-** LockFile() prevents not just writing but also reading by other processes.
-** A SHARED_LOCK is obtained by locking a single randomly-chosen
-** byte out of a specific range of bytes. The lock byte is obtained at
-** random so two separate readers can probably access the file at the
-** same time, unless they are unlucky and choose the same lock byte.
-** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range.
-** There can only be one writer. A RESERVED_LOCK is obtained by locking
-** a single byte of the file that is designated as the reserved lock byte.
-** A PENDING_LOCK is obtained by locking a designated byte different from
-** the RESERVED_LOCK byte.
-**
-** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available,
-** which means we can use reader/writer locks. When reader/writer locks
-** are used, the lock is placed on the same range of bytes that is used
-** for probabilistic locking in Win95/98/ME. Hence, the locking scheme
-** will support two or more Win95 readers or two or more WinNT readers.
-** But a single Win95 reader will lock out all WinNT readers and a single
-** WinNT reader will lock out all other Win95 readers.
-**
-** The following #defines specify the range of bytes used for locking.
-** SHARED_SIZE is the number of bytes available in the pool from which
-** a random byte is selected for a shared lock. The pool of bytes for
-** shared locks begins at SHARED_FIRST.
-**
-** The same locking strategy and
-** byte ranges are used for Unix. This leaves open the possibility of having
-** clients on win95, winNT, and unix all talking to the same shared file
-** and all locking correctly. To do so would require that samba (or whatever
-** tool is being used for file sharing) implements locks correctly between
-** windows and unix. I'm guessing that isn't likely to happen, but by
-** using the same locking range we are at least open to the possibility.
-**
-** Locking in windows is manditory. For this reason, we cannot store
-** actual data in the bytes used for locking. The pager never allocates
-** the pages involved in locking therefore. SHARED_SIZE is selected so
-** that all locks will fit on a single page even at the minimum page size.
-** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE
-** is set high so that we don't have to allocate an unused page except
-** for very large databases. But one should test the page skipping logic
-** by setting PENDING_BYTE low and running the entire regression suite.
-**
-** Changing the value of PENDING_BYTE results in a subtly incompatible
-** file format. Depending on how it is changed, you might not notice
-** the incompatibility right away, even running a full regression test.
-** The default location of PENDING_BYTE is the first byte past the
-** 1GB boundary.
-**
-*/
-#ifdef SQLITE_OMIT_WSD
-# define PENDING_BYTE (0x40000000)
-#else
-# define PENDING_BYTE sqlite3PendingByte
-#endif
-#define RESERVED_BYTE (PENDING_BYTE+1)
-#define SHARED_FIRST (PENDING_BYTE+2)
-#define SHARED_SIZE 510
-
-/*
-** Wrapper around OS specific sqlite3_os_init() function.
-*/
-SQLITE_PRIVATE int sqlite3OsInit(void);
-
-/*
-** Functions for accessing sqlite3_file methods
-*/
-SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*);
-SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset);
-SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset);
-SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size);
-SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int);
-SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize);
-SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int);
-SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int);
-SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut);
-SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*);
-SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*);
-#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0
-SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id);
-SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id);
-#ifndef SQLITE_OMIT_WAL
-SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **);
-SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int);
-SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id);
-SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int);
-#endif /* SQLITE_OMIT_WAL */
-SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **);
-SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *);
-
-
-/*
-** Functions for accessing sqlite3_vfs methods
-*/
-SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *);
-SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int);
-SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut);
-SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *);
-#ifndef SQLITE_OMIT_LOAD_EXTENSION
-SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *);
-SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *);
-SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void);
-SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *);
-#endif /* SQLITE_OMIT_LOAD_EXTENSION */
-SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *);
-SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int);
-SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*);
-SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*);
-
-/*
-** Convenience functions for opening and closing files using
-** sqlite3_malloc() to obtain space for the file-handle structure.
-*/
-SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*);
-SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *);
-
-#endif /* _SQLITE_OS_H_ */
-
-/************** End of os.h **************************************************/
-/************** Continuing where we left off in sqliteInt.h ******************/
/************** Include mutex.h in the middle of sqliteInt.h *****************/
/************** Begin file mutex.h *******************************************/
/*
@@ -16698,6 +16851,7 @@ struct Lookaside {
#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
void *pStart; /* First byte of available memory space */
void *pEnd; /* First byte past end of available space */
+ void *pTrueEnd; /* True value of pEnd, when db->pnBytesFreed!=0 */
};
struct LookasideSlot {
LookasideSlot *pNext; /* Next buffer in the list of free buffers */
@@ -17039,6 +17193,10 @@ struct sqlite3 {
#define SQLITE_BloomFilter 0x00080000 /* Use a Bloom filter on searches */
#define SQLITE_BloomPulldown 0x00100000 /* Run Bloom filters early */
#define SQLITE_BalancedMerge 0x00200000 /* Balance multi-way merges */
+#define SQLITE_ReleaseReg 0x00400000 /* Use OP_ReleaseReg for testing */
+#define SQLITE_FlttnUnionAll 0x00800000 /* Disable the UNION ALL flattener */
+ /* TH3 expects this value ^^^^^^^^^^ See flatten04.test */
+#define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */
#define SQLITE_AllOpts 0xffffffff /* All optimizations */
/*
@@ -17141,7 +17299,7 @@ struct FuncDestructor {
#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
** single query - might change over time */
#define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */
-#define SQLITE_FUNC_OFFSET 0x8000 /* Built-in sqlite_offset() function */
+/* 0x8000 -- available for reuse */
#define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */
#define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */
#define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */
@@ -17158,6 +17316,7 @@ struct FuncDestructor {
#define INLINEFUNC_expr_compare 3
#define INLINEFUNC_affinity 4
#define INLINEFUNC_iif 5
+#define INLINEFUNC_sqlite_offset 6
#define INLINEFUNC_unlikely 99 /* Default case */
/*
@@ -17384,6 +17543,7 @@ struct Column {
#define COLFLAG_NOTAVAIL 0x0080 /* STORED column not yet calculated */
#define COLFLAG_BUSY 0x0100 /* Blocks recursion on GENERATED columns */
#define COLFLAG_HASCOLL 0x0200 /* Has collating sequence name in zCnName */
+#define COLFLAG_NOEXPAND 0x0400 /* Omit this column when expanding "*" */
#define COLFLAG_GENERATED 0x0060 /* Combo: _STORED, _VIRTUAL */
#define COLFLAG_NOINSERT 0x0062 /* Combo: _HIDDEN, _STORED, _VIRTUAL */
@@ -17609,7 +17769,7 @@ struct Table {
#ifndef SQLITE_OMIT_VIRTUALTABLE
# define IsVirtual(X) ((X)->eTabType==TABTYP_VTAB)
# define ExprIsVtab(X) \
- ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->eTabType==TABTYP_VTAB)
+ ((X)->op==TK_COLUMN && (X)->y.pTab->eTabType==TABTYP_VTAB)
#else
# define IsVirtual(X) 0
# define ExprIsVtab(X) 0
@@ -17790,6 +17950,11 @@ struct KeyInfo {
struct UnpackedRecord {
KeyInfo *pKeyInfo; /* Collation and sort-order information */
Mem *aMem; /* Values */
+ union {
+ char *z; /* Cache of aMem[0].z for vdbeRecordCompareString() */
+ i64 i; /* Cache of aMem[0].u.i for vdbeRecordCompareInt() */
+ } u;
+ int n; /* Cache of aMem[0].n used by vdbeRecordCompareString() */
u16 nField; /* Number of entries in apMem[] */
i8 default_rc; /* Comparison result if keys are equal */
u8 errCode; /* Error detected by xRecordCompare (CORRUPT or NOMEM) */
@@ -17821,10 +17986,22 @@ struct UnpackedRecord {
** The Index.onError field determines whether or not the indexed columns
** must be unique and what to do if they are not. When Index.onError=OE_None,
** it means this is not a unique index. Otherwise it is a unique index
-** and the value of Index.onError indicate the which conflict resolution
-** algorithm to employ whenever an attempt is made to insert a non-unique
+** and the value of Index.onError indicates which conflict resolution
+** algorithm to employ when an attempt is made to insert a non-unique
** element.
**
+** The colNotIdxed bitmask is used in combination with SrcItem.colUsed
+** for a fast test to see if an index can serve as a covering index.
+** colNotIdxed has a 1 bit for every column of the original table that
+** is *not* available in the index. Thus the expression
+** "colUsed & colNotIdxed" will be non-zero if the index is not a
+** covering index. The most significant bit of of colNotIdxed will always
+** be true (note-20221022-a). If a column beyond the 63rd column of the
+** table is used, the "colUsed & colNotIdxed" test will always be non-zero
+** and we have to assume either that the index is not covering, or use
+** an alternative (slower) algorithm to determine whether or not
+** the index is covering.
+**
** While parsing a CREATE TABLE or CREATE INDEX statement in order to
** generate VDBE code (as opposed to parsing one read from an sqlite_schema
** table as part of parsing an existing database schema), transient instances
@@ -17860,6 +18037,8 @@ struct Index {
unsigned bNoQuery:1; /* Do not use this index to optimize queries */
unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */
unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */
+ unsigned bHasExpr:1; /* Index contains an expression, either a literal
+ ** expression, or a reference to a VIRTUAL column */
#ifdef SQLITE_ENABLE_STAT4
int nSample; /* Number of elements in aSample[] */
int nSampleCol; /* Size of IndexSample.anEq[] and so on */
@@ -17868,7 +18047,7 @@ struct Index {
tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */
tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */
#endif
- Bitmask colNotIdxed; /* 0 for unindexed columns in pTab */
+ Bitmask colNotIdxed; /* Unindexed columns in pTab */
};
/*
@@ -18098,7 +18277,7 @@ struct Expr {
** TK_SELECT_COLUMN: column of the result vector */
i16 iAgg; /* Which entry in pAggInfo->aCol[] or ->aFunc[] */
union {
- int iRightJoinTable; /* If EP_FromJoin, the right table of the join */
+ int iJoin; /* If EP_OuterON or EP_InnerON, the right table */
int iOfst; /* else: start of token from start of statement */
} w;
AggInfo *pAggInfo; /* Used by TK_AGG_COLUMN and TK_AGG_FUNCTION */
@@ -18119,29 +18298,29 @@ struct Expr {
** EP_Agg == NC_HasAgg == SF_HasAgg
** EP_Win == NC_HasWin
*/
-#define EP_FromJoin 0x000001 /* Originates in ON/USING clause of outer join */
-#define EP_Distinct 0x000002 /* Aggregate function with DISTINCT keyword */
-#define EP_HasFunc 0x000004 /* Contains one or more functions of any kind */
-#define EP_FixedCol 0x000008 /* TK_Column with a known fixed value */
+#define EP_OuterON 0x000001 /* Originates in ON/USING clause of outer join */
+#define EP_InnerON 0x000002 /* Originates in ON/USING of an inner join */
+#define EP_Distinct 0x000004 /* Aggregate function with DISTINCT keyword */
+#define EP_HasFunc 0x000008 /* Contains one or more functions of any kind */
#define EP_Agg 0x000010 /* Contains one or more aggregate functions */
-#define EP_VarSelect 0x000020 /* pSelect is correlated, not constant */
-#define EP_DblQuoted 0x000040 /* token.z was originally in "..." */
-#define EP_InfixFunc 0x000080 /* True for an infix function: LIKE, GLOB, etc */
-#define EP_Collate 0x000100 /* Tree contains a TK_COLLATE operator */
-#define EP_Commuted 0x000200 /* Comparison operator has been commuted */
-#define EP_IntValue 0x000400 /* Integer value contained in u.iValue */
-#define EP_xIsSelect 0x000800 /* x.pSelect is valid (otherwise x.pList is) */
-#define EP_Skip 0x001000 /* Operator does not contribute to affinity */
-#define EP_Reduced 0x002000 /* Expr struct EXPR_REDUCEDSIZE bytes only */
-#define EP_TokenOnly 0x004000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */
+#define EP_FixedCol 0x000020 /* TK_Column with a known fixed value */
+#define EP_VarSelect 0x000040 /* pSelect is correlated, not constant */
+#define EP_DblQuoted 0x000080 /* token.z was originally in "..." */
+#define EP_InfixFunc 0x000100 /* True for an infix function: LIKE, GLOB, etc */
+#define EP_Collate 0x000200 /* Tree contains a TK_COLLATE operator */
+#define EP_Commuted 0x000400 /* Comparison operator has been commuted */
+#define EP_IntValue 0x000800 /* Integer value contained in u.iValue */
+#define EP_xIsSelect 0x001000 /* x.pSelect is valid (otherwise x.pList is) */
+#define EP_Skip 0x002000 /* Operator does not contribute to affinity */
+#define EP_Reduced 0x004000 /* Expr struct EXPR_REDUCEDSIZE bytes only */
#define EP_Win 0x008000 /* Contains window functions */
-#define EP_MemToken 0x010000 /* Need to sqlite3DbFree() Expr.zToken */
-#define EP_IfNullRow 0x020000 /* The TK_IF_NULL_ROW opcode */
-#define EP_Unlikely 0x040000 /* unlikely() or likelihood() function */
-#define EP_ConstFunc 0x080000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
-#define EP_CanBeNull 0x100000 /* Can be null despite NOT NULL constraint */
-#define EP_Subquery 0x200000 /* Tree contains a TK_SELECT operator */
- /* 0x400000 // Available */
+#define EP_TokenOnly 0x010000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */
+ /* 0x020000 // Available for reuse */
+#define EP_IfNullRow 0x040000 /* The TK_IF_NULL_ROW opcode */
+#define EP_Unlikely 0x080000 /* unlikely() or likelihood() function */
+#define EP_ConstFunc 0x100000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */
+#define EP_CanBeNull 0x200000 /* Can be null despite NOT NULL constraint */
+#define EP_Subquery 0x400000 /* Tree contains a TK_SELECT operator */
#define EP_Leaf 0x800000 /* Expr.pLeft, .pRight, .u.pSelect all NULL */
#define EP_WinFunc 0x1000000 /* TK_FUNCTION with Expr.y.pWin set */
#define EP_Subrtn 0x2000000 /* Uses Expr.y.sub. TK_IN, _SELECT, or _EXISTS */
@@ -18164,8 +18343,8 @@ struct Expr {
#define ExprHasAllProperty(E,P) (((E)->flags&(P))==(P))
#define ExprSetProperty(E,P) (E)->flags|=(P)
#define ExprClearProperty(E,P) (E)->flags&=~(P)
-#define ExprAlwaysTrue(E) (((E)->flags&(EP_FromJoin|EP_IsTrue))==EP_IsTrue)
-#define ExprAlwaysFalse(E) (((E)->flags&(EP_FromJoin|EP_IsFalse))==EP_IsFalse)
+#define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue)
+#define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse)
/* Macros used to ensure that the correct members of unions are accessed
** in Expr.
@@ -18252,12 +18431,18 @@ struct ExprList {
struct ExprList_item { /* For each expression in the list */
Expr *pExpr; /* The parse tree for this expression */
char *zEName; /* Token associated with this expression */
- u8 sortFlags; /* Mask of KEYINFO_ORDER_* flags */
- unsigned eEName :2; /* Meaning of zEName */
- unsigned done :1; /* A flag to indicate when processing is finished */
- unsigned reusable :1; /* Constant expression is reusable */
- unsigned bSorterRef :1; /* Defer evaluation until after sorting */
- unsigned bNulls: 1; /* True if explicit "NULLS FIRST/LAST" */
+ struct {
+ u8 sortFlags; /* Mask of KEYINFO_ORDER_* flags */
+ unsigned eEName :2; /* Meaning of zEName */
+ unsigned done :1; /* Indicates when processing is finished */
+ unsigned reusable :1; /* Constant expression is reusable */
+ unsigned bSorterRef :1; /* Defer evaluation until after sorting */
+ unsigned bNulls :1; /* True if explicit "NULLS FIRST/LAST" */
+ unsigned bUsed :1; /* This column used in a SF_NestedFrom subquery */
+ unsigned bUsingTerm:1; /* Term from the USING clause of a NestedFrom */
+ unsigned bNoExpand: 1; /* Term is an auxiliary in NestedFrom and should
+ ** not be expanded by "*" in parent queries */
+ } fg;
union {
struct { /* Used by any ExprList other than Parse.pConsExpr */
u16 iOrderByCol; /* For ORDER BY, column number in result set */
@@ -18292,17 +18477,37 @@ struct ExprList {
** If "a" is the k-th column of table "t", then IdList.a[0].idx==k.
*/
struct IdList {
+ int nId; /* Number of identifiers on the list */
+ u8 eU4; /* Which element of a.u4 is valid */
struct IdList_item {
char *zName; /* Name of the identifier */
- int idx; /* Index in some Table.aCol[] of a column named zName */
- } *a;
- int nId; /* Number of identifiers on the list */
+ union {
+ int idx; /* Index in some Table.aCol[] of a column named zName */
+ Expr *pExpr; /* Expr to implement a USING variable -- NOT USED */
+ } u4;
+ } a[1];
};
+/*
+** Allowed values for IdList.eType, which determines which value of the a.u4
+** is valid.
+*/
+#define EU4_NONE 0 /* Does not use IdList.a.u4 */
+#define EU4_IDX 1 /* Uses IdList.a.u4.idx */
+#define EU4_EXPR 2 /* Uses IdList.a.u4.pExpr -- NOT CURRENTLY USED */
+
/*
** The SrcItem object represents a single term in the FROM clause of a query.
** The SrcList object is mostly an array of SrcItems.
**
+** The jointype starts out showing the join type between the current table
+** and the next table on the list. The parser builds the list this way.
+** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each
+** jointype expresses the join between the table and the previous table.
+**
+** In the colUsed field, the high-order bit (bit 63) is set if the table
+** contains more than 63 columns and the 64-th or later column is used.
+**
** Union member validity:
**
** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc
@@ -18326,44 +18531,48 @@ struct SrcItem {
unsigned isIndexedBy :1; /* True if there is an INDEXED BY clause */
unsigned isTabFunc :1; /* True if table-valued-function syntax */
unsigned isCorrelated :1; /* True if sub-query is correlated */
+ unsigned isMaterialized:1; /* This is a materialized view */
unsigned viaCoroutine :1; /* Implemented as a co-routine */
unsigned isRecursive :1; /* True for recursive reference in WITH */
unsigned fromDDL :1; /* Comes from sqlite_schema */
unsigned isCte :1; /* This is a CTE */
unsigned notCte :1; /* This item may not match a CTE */
+ unsigned isUsing :1; /* u3.pUsing is valid */
+ unsigned isOn :1; /* u3.pOn was once valid and non-NULL */
+ unsigned isSynthUsing :1; /* u3.pUsing is synthensized from NATURAL */
+ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */
} fg;
int iCursor; /* The VDBE cursor number used to access this table */
- Expr *pOn; /* The ON clause of a join */
- IdList *pUsing; /* The USING clause of a join */
- Bitmask colUsed; /* Bit N (1< The ON clause of a join */
+ IdList *pUsing; /* fg.isUsing==1 => The USING clause of a join */
+ } u3;
+ Bitmask colUsed; /* Bit N set if column N used. Details above for N>62 */
union {
char *zIndexedBy; /* Identifier from "INDEXED BY " clause */
ExprList *pFuncArg; /* Arguments to table-valued-function */
} u1;
union {
Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */
- CteUse *pCteUse; /* CTE Usage info info fg.isCte is true */
+ CteUse *pCteUse; /* CTE Usage info when fg.isCte is true */
} u2;
};
/*
-** The following structure describes the FROM clause of a SELECT statement.
-** Each table or subquery in the FROM clause is a separate element of
-** the SrcList.a[] array.
-**
-** With the addition of multiple database support, the following structure
-** can also be used to describe a particular table such as the table that
-** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL,
-** such a table must be a simple name: ID. But in SQLite, the table can
-** now be identified by a database name, a dot, then the table name: ID.ID.
-**
-** The jointype starts out showing the join type between the current table
-** and the next table on the list. The parser builds the list this way.
-** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each
-** jointype expresses the join between the table and the previous table.
+** The OnOrUsing object represents either an ON clause or a USING clause.
+** It can never be both at the same time, but it can be neither.
+*/
+struct OnOrUsing {
+ Expr *pOn; /* The ON clause of a join */
+ IdList *pUsing; /* The USING clause of a join */
+};
+
+/*
+** This object represents one or more tables that are the source of
+** content for an SQL statement. For example, a single SrcList object
+** is used to hold the FROM clause of a SELECT statement. SrcList also
+** represents the target tables for DELETE, INSERT, and UPDATE statements.
**
-** In the colUsed field, the high-order bit (bit 63) is set if the table
-** contains more than 63 columns and the 64-th or later column is used.
*/
struct SrcList {
int nSrc; /* Number of tables or subqueries in the FROM clause */
@@ -18374,14 +18583,15 @@ struct SrcList {
/*
** Permitted values of the SrcList.a.jointype field
*/
-#define JT_INNER 0x0001 /* Any kind of inner or cross join */
-#define JT_CROSS 0x0002 /* Explicit use of the CROSS keyword */
-#define JT_NATURAL 0x0004 /* True for a "natural" join */
-#define JT_LEFT 0x0008 /* Left outer join */
-#define JT_RIGHT 0x0010 /* Right outer join */
-#define JT_OUTER 0x0020 /* The "OUTER" keyword is present */
-#define JT_ERROR 0x0040 /* unknown or unsupported join type */
-
+#define JT_INNER 0x01 /* Any kind of inner or cross join */
+#define JT_CROSS 0x02 /* Explicit use of the CROSS keyword */
+#define JT_NATURAL 0x04 /* True for a "natural" join */
+#define JT_LEFT 0x08 /* Left outer join */
+#define JT_RIGHT 0x10 /* Right outer join */
+#define JT_OUTER 0x20 /* The "OUTER" keyword is present */
+#define JT_LTORJ 0x40 /* One of the LEFT operands of a RIGHT JOIN
+ ** Mnemonic: Left Table Of Right Join */
+#define JT_ERROR 0x80 /* unknown or unsupported join type */
/*
** Flags appropriate for the wctrlFlags parameter of sqlite3WhereBegin()
@@ -18404,7 +18614,7 @@ struct SrcList {
#define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */
#define WHERE_AGG_DISTINCT 0x0400 /* Query is "SELECT agg(DISTINCT ...)" */
#define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */
- /* 0x1000 not currently used */
+#define WHERE_RIGHT_JOIN 0x1000 /* Processing a RIGHT JOIN */
/* 0x2000 not currently used */
#define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */
/* 0x8000 not currently used */
@@ -18600,6 +18810,9 @@ struct Select {
#define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */
#define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */
+/* True if S exists and has SF_NestedFrom */
+#define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0)
+
/*
** The results of a SELECT can be distributed in several ways, as defined
** by one of the following macros. The "SRT" prefix means "SELECT Result
@@ -18704,7 +18917,7 @@ struct SelectDest {
int iSDParm2; /* A second parameter for the eDest disposal method */
int iSdst; /* Base register where results are written */
int nSdst; /* Number of registers allocated */
- char *zAffSdst; /* Affinity used when eDest==SRT_Set */
+ char *zAffSdst; /* Affinity used for SRT_Set, SRT_Table, and similar */
ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */
};
@@ -18769,6 +18982,28 @@ struct TriggerPrg {
# define DbMaskNonZero(M) (M)!=0
#endif
+/*
+** For each index X that has as one of its arguments either an expression
+** or the name of a virtual generated column, and if X is in scope such that
+** the value of the expression can simply be read from the index, then
+** there is an instance of this object on the Parse.pIdxExpr list.
+**
+** During code generation, while generating code to evaluate expressions,
+** this list is consulted and if a matching expression is found, the value
+** is read from the index rather than being recomputed.
+*/
+struct IndexedExpr {
+ Expr *pExpr; /* The expression contained in the index */
+ int iDataCur; /* The data cursor associated with the index */
+ int iIdxCur; /* The index cursor */
+ int iIdxCol; /* The index column that contains value of pExpr */
+ u8 bMaybeNullRow; /* True if we need an OP_IfNullRow check */
+ IndexedExpr *pIENext; /* Next in a list of all indexed expressions */
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ const char *zIdxName; /* Name of index, used only for bytecode comments */
+#endif
+};
+
/*
** An instance of the ParseCleanup object specifies an operation that
** should be performed after parsing to deallocation resources obtained
@@ -18810,7 +19045,8 @@ struct Parse {
u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */
u8 okConstFactor; /* OK to factor out constants */
u8 disableLookaside; /* Number of times lookaside has been disabled */
- u8 disableVtab; /* Disable all virtual tables for this parse */
+ u8 prepFlags; /* SQLITE_PREPARE_* flags */
+ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */
#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */
#endif
@@ -18826,6 +19062,7 @@ struct Parse {
int nLabelAlloc; /* Number of slots in aLabel */
int *aLabel; /* Space to hold the labels */
ExprList *pConstExpr;/* Constant expressions */
+ IndexedExpr *pIdxExpr;/* List of expressions used by active indexes */
Token constraintName;/* Name of the constraint currently being parsed */
yDbMask writeMask; /* Start a write transaction on these databases */
yDbMask cookieMask; /* Bitmask of schema verified databases */
@@ -18983,20 +19220,20 @@ struct AuthContext {
#define OPFLAG_PREFORMAT 0x80 /* OP_Insert uses preformatted cell */
/*
- * Each trigger present in the database schema is stored as an instance of
- * struct Trigger.
- *
- * Pointers to instances of struct Trigger are stored in two ways.
- * 1. In the "trigHash" hash table (part of the sqlite3* that represents the
- * database). This allows Trigger structures to be retrieved by name.
- * 2. All triggers associated with a single table form a linked list, using the
- * pNext member of struct Trigger. A pointer to the first element of the
- * linked list is stored as the "pTrigger" member of the associated
- * struct Table.
- *
- * The "step_list" member points to the first element of a linked list
- * containing the SQL statements specified as the trigger program.
- */
+** Each trigger present in the database schema is stored as an instance of
+** struct Trigger.
+**
+** Pointers to instances of struct Trigger are stored in two ways.
+** 1. In the "trigHash" hash table (part of the sqlite3* that represents the
+** database). This allows Trigger structures to be retrieved by name.
+** 2. All triggers associated with a single table form a linked list, using the
+** pNext member of struct Trigger. A pointer to the first element of the
+** linked list is stored as the "pTrigger" member of the associated
+** struct Table.
+**
+** The "step_list" member points to the first element of a linked list
+** containing the SQL statements specified as the trigger program.
+*/
struct Trigger {
char *zName; /* The name of the trigger */
char *table; /* The table or view to which the trigger applies */
@@ -19023,43 +19260,48 @@ struct Trigger {
#define TRIGGER_AFTER 2
/*
- * An instance of struct TriggerStep is used to store a single SQL statement
- * that is a part of a trigger-program.
- *
- * Instances of struct TriggerStep are stored in a singly linked list (linked
- * using the "pNext" member) referenced by the "step_list" member of the
- * associated struct Trigger instance. The first element of the linked list is
- * the first step of the trigger-program.
- *
- * The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or
- * "SELECT" statement. The meanings of the other members is determined by the
- * value of "op" as follows:
- *
- * (op == TK_INSERT)
- * orconf -> stores the ON CONFLICT algorithm
- * pSelect -> If this is an INSERT INTO ... SELECT ... statement, then
- * this stores a pointer to the SELECT statement. Otherwise NULL.
- * zTarget -> Dequoted name of the table to insert into.
- * pExprList -> If this is an INSERT INTO ... VALUES ... statement, then
- * this stores values to be inserted. Otherwise NULL.
- * pIdList -> If this is an INSERT INTO ... () VALUES ...
- * statement, then this stores the column-names to be
- * inserted into.
- *
- * (op == TK_DELETE)
- * zTarget -> Dequoted name of the table to delete from.
- * pWhere -> The WHERE clause of the DELETE statement if one is specified.
- * Otherwise NULL.
- *
- * (op == TK_UPDATE)
- * zTarget -> Dequoted name of the table to update.
- * pWhere -> The WHERE clause of the UPDATE statement if one is specified.
- * Otherwise NULL.
- * pExprList -> A list of the columns to update and the expressions to update
- * them to. See sqlite3Update() documentation of "pChanges"
- * argument.
- *
- */
+** An instance of struct TriggerStep is used to store a single SQL statement
+** that is a part of a trigger-program.
+**
+** Instances of struct TriggerStep are stored in a singly linked list (linked
+** using the "pNext" member) referenced by the "step_list" member of the
+** associated struct Trigger instance. The first element of the linked list is
+** the first step of the trigger-program.
+**
+** The "op" member indicates whether this is a "DELETE", "INSERT", "UPDATE" or
+** "SELECT" statement. The meanings of the other members is determined by the
+** value of "op" as follows:
+**
+** (op == TK_INSERT)
+** orconf -> stores the ON CONFLICT algorithm
+** pSelect -> The content to be inserted - either a SELECT statement or
+** a VALUES clause.
+** zTarget -> Dequoted name of the table to insert into.
+** pIdList -> If this is an INSERT INTO ... () VALUES ...
+** statement, then this stores the column-names to be
+** inserted into.
+** pUpsert -> The ON CONFLICT clauses for an Upsert
+**
+** (op == TK_DELETE)
+** zTarget -> Dequoted name of the table to delete from.
+** pWhere -> The WHERE clause of the DELETE statement if one is specified.
+** Otherwise NULL.
+**
+** (op == TK_UPDATE)
+** zTarget -> Dequoted name of the table to update.
+** pWhere -> The WHERE clause of the UPDATE statement if one is specified.
+** Otherwise NULL.
+** pExprList -> A list of the columns to update and the expressions to update
+** them to. See sqlite3Update() documentation of "pChanges"
+** argument.
+**
+** (op == TK_SELECT)
+** pSelect -> The SELECT statement
+**
+** (op == TK_RETURNING)
+** pExprList -> The list of expressions that follow the RETURNING keyword.
+**
+*/
struct TriggerStep {
u8 op; /* One of TK_DELETE, TK_UPDATE, TK_INSERT, TK_SELECT,
** or TK_RETURNING */
@@ -19256,15 +19498,15 @@ struct Walker {
struct RefSrcList *pRefSrcList; /* sqlite3ReferencesSrcList() */
int *aiCol; /* array of column indexes */
struct IdxCover *pIdxCover; /* Check for index coverage */
- struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */
ExprList *pGroupBy; /* GROUP BY clause */
Select *pSelect; /* HAVING to WHERE clause ctx */
struct WindowRewrite *pRewrite; /* Window rewrite context */
struct WhereConst *pConst; /* WHERE clause constants */
struct RenameCtx *pRename; /* RENAME COLUMN context */
struct Table *pTab; /* Table of generated column */
+ struct CoveringIndexCheck *pCovIdxCk; /* Check for covering index */
SrcItem *pSrcItem; /* A single FROM clause item */
- DbFixer *pFix;
+ DbFixer *pFix; /* See sqlite3FixSelect() */
} u;
};
@@ -19414,7 +19656,7 @@ struct Window {
Window **ppThis; /* Pointer to this object in Select.pWin list */
Window *pNextWin; /* Next window function belonging to this SELECT */
Expr *pFilter; /* The FILTER expression */
- FuncDef *pFunc; /* The function */
+ FuncDef *pWFunc; /* The function */
int iEphCsr; /* Partition buffer or Peer buffer */
int regAccum; /* Accumulator */
int regResult; /* Interim result */
@@ -19570,6 +19812,7 @@ SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64);
SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64);
SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*);
SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3*, void*);
+SQLITE_PRIVATE void sqlite3DbNNFreeNN(sqlite3*, void*);
SQLITE_PRIVATE int sqlite3MallocSize(const void*);
SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, const void*);
SQLITE_PRIVATE void *sqlite3PageMalloc(int);
@@ -19590,12 +19833,16 @@ SQLITE_PRIVATE int sqlite3HeapNearlyFull(void);
*/
#ifdef SQLITE_USE_ALLOCA
# define sqlite3StackAllocRaw(D,N) alloca(N)
+# define sqlite3StackAllocRawNN(D,N) alloca(N)
# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N)
# define sqlite3StackFree(D,P)
+# define sqlite3StackFreeNN(D,P)
#else
# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N)
+# define sqlite3StackAllocRawNN(D,N) sqlite3DbMallocRawNN(D,N)
# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N)
# define sqlite3StackFree(D,P) sqlite3DbFree(D,P)
+# define sqlite3StackFreeNN(D,P) sqlite3DbFreeNN(D,P)
#endif
/* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they
@@ -19669,18 +19916,53 @@ SQLITE_PRIVATE void *sqlite3TestTextToPtr(const char*);
#endif
#if defined(SQLITE_DEBUG)
+SQLITE_PRIVATE void sqlite3TreeViewLine(TreeView*, const char *zFormat, ...);
SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView*, const Expr*, u8);
SQLITE_PRIVATE void sqlite3TreeViewBareExprList(TreeView*, const ExprList*, const char*);
SQLITE_PRIVATE void sqlite3TreeViewExprList(TreeView*, const ExprList*, u8, const char*);
+SQLITE_PRIVATE void sqlite3TreeViewBareIdList(TreeView*, const IdList*, const char*);
+SQLITE_PRIVATE void sqlite3TreeViewIdList(TreeView*, const IdList*, u8, const char*);
+SQLITE_PRIVATE void sqlite3TreeViewColumnList(TreeView*, const Column*, int, u8);
SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView*, const SrcList*);
SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView*, const Select*, u8);
SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView*, const With*, u8);
+SQLITE_PRIVATE void sqlite3TreeViewUpsert(TreeView*, const Upsert*, u8);
+#if TREETRACE_ENABLED
+SQLITE_PRIVATE void sqlite3TreeViewDelete(const With*, const SrcList*, const Expr*,
+ const ExprList*,const Expr*, const Trigger*);
+SQLITE_PRIVATE void sqlite3TreeViewInsert(const With*, const SrcList*,
+ const IdList*, const Select*, const ExprList*,
+ int, const Upsert*, const Trigger*);
+SQLITE_PRIVATE void sqlite3TreeViewUpdate(const With*, const SrcList*, const ExprList*,
+ const Expr*, int, const ExprList*, const Expr*,
+ const Upsert*, const Trigger*);
+#endif
+#ifndef SQLITE_OMIT_TRIGGER
+SQLITE_PRIVATE void sqlite3TreeViewTriggerStep(TreeView*, const TriggerStep*, u8, u8);
+SQLITE_PRIVATE void sqlite3TreeViewTrigger(TreeView*, const Trigger*, u8, u8);
+#endif
#ifndef SQLITE_OMIT_WINDOWFUNC
SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView*, const Window*, u8);
SQLITE_PRIVATE void sqlite3TreeViewWinFunc(TreeView*, const Window*, u8);
#endif
+SQLITE_PRIVATE void sqlite3ShowExpr(const Expr*);
+SQLITE_PRIVATE void sqlite3ShowExprList(const ExprList*);
+SQLITE_PRIVATE void sqlite3ShowIdList(const IdList*);
+SQLITE_PRIVATE void sqlite3ShowSrcList(const SrcList*);
+SQLITE_PRIVATE void sqlite3ShowSelect(const Select*);
+SQLITE_PRIVATE void sqlite3ShowWith(const With*);
+SQLITE_PRIVATE void sqlite3ShowUpsert(const Upsert*);
+#ifndef SQLITE_OMIT_TRIGGER
+SQLITE_PRIVATE void sqlite3ShowTriggerStep(const TriggerStep*);
+SQLITE_PRIVATE void sqlite3ShowTriggerStepList(const TriggerStep*);
+SQLITE_PRIVATE void sqlite3ShowTrigger(const Trigger*);
+SQLITE_PRIVATE void sqlite3ShowTriggerList(const Trigger*);
+#endif
+#ifndef SQLITE_OMIT_WINDOWFUNC
+SQLITE_PRIVATE void sqlite3ShowWindow(const Window*);
+SQLITE_PRIVATE void sqlite3ShowWinFunc(const Window*);
+#endif
#endif
-
SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*);
SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...);
@@ -19829,13 +20111,14 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(Parse*, SrcList*, int, int);
SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, SrcList *p2);
SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(Parse*, SrcList*, Token*, Token*);
SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(Parse*, SrcList*, Token*, Token*,
- Token*, Select*, Expr*, IdList*);
+ Token*, Select*, OnOrUsing*);
SQLITE_PRIVATE void sqlite3SrcListIndexedBy(Parse *, SrcList *, Token *);
SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse*, SrcList*, ExprList*);
SQLITE_PRIVATE int sqlite3IndexedByLookup(Parse *, SrcItem *);
-SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList*);
+SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(Parse*,SrcList*);
SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse*, SrcList*);
SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3*, IdList*);
+SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3*, OnOrUsing*);
SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3*, SrcList*);
SQLITE_PRIVATE Index *sqlite3AllocateIndexObject(sqlite3*,i16,int,char**);
SQLITE_PRIVATE void sqlite3CreateIndex(Parse*,Token*,Token*,SrcList*,ExprList*,int,Token*,
@@ -20033,7 +20316,8 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc(Parse*, TriggerStep*);
SQLITE_PRIVATE int sqlite3JoinType(Parse*, Token*, Token*, Token*);
SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol);
-SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr*,int);
+SQLITE_PRIVATE void sqlite3SrcItemColumnUsed(SrcItem*,int);
+SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr*,int,u32);
SQLITE_PRIVATE void sqlite3CreateForeignKey(Parse*, ExprList*, Token*, ExprList*, int);
SQLITE_PRIVATE void sqlite3DeferForeignKey(Parse*, int);
#ifndef SQLITE_OMIT_AUTHORIZATION
@@ -20057,6 +20341,7 @@ SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*);
SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*);
SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*);
SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64);
+SQLITE_PRIVATE i64 sqlite3RealToI64(double);
SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*);
SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8);
SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*);
@@ -20102,6 +20387,7 @@ SQLITE_PRIVATE int sqlite3VarintLen(u64 v);
SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*);
+SQLITE_PRIVATE char *sqlite3TableAffinityStr(sqlite3*,const Table*);
SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int);
SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2);
SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity);
@@ -20173,7 +20459,6 @@ SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[];
SQLITE_PRIVATE const char sqlite3StrBINARY[];
SQLITE_PRIVATE const unsigned char sqlite3StdTypeLen[];
SQLITE_PRIVATE const char sqlite3StdTypeAffinity[];
-SQLITE_PRIVATE const char sqlite3StdTypeMap[];
SQLITE_PRIVATE const char *sqlite3StdType[];
SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[];
SQLITE_PRIVATE const unsigned char *sqlite3aLTb;
@@ -20379,7 +20664,7 @@ SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *);
SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*);
#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \
&& !defined(SQLITE_OMIT_VIRTUALTABLE)
-SQLITE_PRIVATE void sqlite3VtabWriteAll(sqlite3_index_info*);
+SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info*);
#endif
SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*);
SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int);
@@ -20617,6 +20902,10 @@ SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*);
SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt);
#endif
+#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)
+SQLITE_PRIVATE int sqlite3KvvfsInit(void);
+#endif
+
#endif /* SQLITEINT_H */
/************** End of sqliteInt.h *******************************************/
@@ -20848,7 +21137,7 @@ SQLITE_API extern int sqlite3_open_file_count;
** autoconf-based build
*/
#if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H)
-/* #include "config.h" */
+/* #include "sqlite_cfg.h" */
#define SQLITECONFIG_H 1
#endif
@@ -21013,6 +21302,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
"DISABLE_SKIPAHEAD_DISTINCT",
#endif
+#ifdef SQLITE_DQS
+ "DQS=" CTIMEOPT_VAL(SQLITE_DQS),
+#endif
#ifdef SQLITE_ENABLE_8_3_NAMES
"ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES),
#endif
@@ -21127,9 +21419,6 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_ENABLE_RTREE
"ENABLE_RTREE",
#endif
-#ifdef SQLITE_ENABLE_SELECTTRACE
- "ENABLE_SELECTTRACE",
-#endif
#ifdef SQLITE_ENABLE_SESSION
"ENABLE_SESSION",
#endif
@@ -21151,6 +21440,9 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
"ENABLE_STMT_SCANSTATUS",
#endif
+#ifdef SQLITE_ENABLE_TREETRACE
+ "ENABLE_TREETRACE",
+#endif
#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION
"ENABLE_UNKNOWN_SQL_FUNCTION",
#endif
@@ -21503,9 +21795,6 @@ static const char * const sqlite3azCompileOpt[] = {
#ifdef SQLITE_OMIT_XFER_OPT
"OMIT_XFER_OPT",
#endif
-#ifdef SQLITE_PCACHE_SEPARATE_HEADER
- "PCACHE_SEPARATE_HEADER",
-#endif
#ifdef SQLITE_PERFORMANCE_TRACE
"PERFORMANCE_TRACE",
#endif
@@ -21900,6 +22189,9 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
0x7ffffffe, /* iOnceResetThreshold */
SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */
0, /* iPrngSeed */
+#ifdef SQLITE_DEBUG
+ {0,0,0,0,0,0} /* aTune */
+#endif
};
/*
@@ -21954,7 +22246,7 @@ SQLITE_PRIVATE int sqlite3PendingByte = 0x40000000;
/*
** Tracing flags set by SQLITE_TESTCTRL_TRACEFLAGS.
*/
-SQLITE_PRIVATE u32 sqlite3SelectTrace = 0;
+SQLITE_PRIVATE u32 sqlite3TreeTrace = 0;
SQLITE_PRIVATE u32 sqlite3WhereTrace = 0;
/* #include "opcodes.h" */
@@ -21982,10 +22274,6 @@ SQLITE_PRIVATE const char sqlite3StrBINARY[] = "BINARY";
**
** sqlite3StdTypeAffinity[] The affinity associated with each entry
** in sqlite3StdType[].
-**
-** sqlite3StdTypeMap[] The type value (as returned from
-** sqlite3_column_type() or sqlite3_value_type())
-** for each entry in sqlite3StdType[].
*/
SQLITE_PRIVATE const unsigned char sqlite3StdTypeLen[] = { 3, 4, 3, 7, 4, 4 };
SQLITE_PRIVATE const char sqlite3StdTypeAffinity[] = {
@@ -21996,14 +22284,6 @@ SQLITE_PRIVATE const char sqlite3StdTypeAffinity[] = {
SQLITE_AFF_REAL,
SQLITE_AFF_TEXT
};
-SQLITE_PRIVATE const char sqlite3StdTypeMap[] = {
- 0,
- SQLITE_BLOB,
- SQLITE_INTEGER,
- SQLITE_INTEGER,
- SQLITE_FLOAT,
- SQLITE_TEXT
-};
SQLITE_PRIVATE const char *sqlite3StdType[] = {
"ANY",
"BLOB",
@@ -22121,7 +22401,7 @@ struct VdbeCursor {
Bool isEphemeral:1; /* True for an ephemeral table */
Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */
Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
- Bool hasBeenDuped:1; /* This cursor was source or target of OP_OpenDup */
+ Bool noReuse:1; /* OpenEphemeral may not reuse this cursor */
u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */
union { /* pBtx for isEphermeral. pAltMap otherwise */
Btree *pBtx; /* Separate file holding temporary table */
@@ -22169,6 +22449,11 @@ struct VdbeCursor {
u32 aType[1]; /* Type values record decode. MUST BE LAST */
};
+/* Return true if P is a null-only cursor
+*/
+#define IsNullCursor(P) \
+ ((P)->eCurType==CURTYPE_PSEUDO && (P)->nullRow && (P)->seekResult==0)
+
/*
** A value for VdbeCursor.cacheStatus that means the cache is always invalid.
@@ -22243,16 +22528,16 @@ struct sqlite3_value {
const char *zPType; /* Pointer type when MEM_Term|MEM_Subtype|MEM_Null */
FuncDef *pDef; /* Used only when flags==MEM_Agg */
} u;
+ char *z; /* String or BLOB value */
+ int n; /* Number of characters in string value, excluding '\0' */
u16 flags; /* Some combination of MEM_Null, MEM_Str, MEM_Dyn, etc. */
u8 enc; /* SQLITE_UTF8, SQLITE_UTF16BE, SQLITE_UTF16LE */
u8 eSubtype; /* Subtype for this value */
- int n; /* Number of characters in string value, excluding '\0' */
- char *z; /* String or BLOB value */
/* ShallowCopy only needs to copy the information above */
- char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */
+ sqlite3 *db; /* The associated database connection */
int szMalloc; /* Size of the zMalloc allocation */
u32 uTemp; /* Transient storage for serial_type in OP_MakeRecord */
- sqlite3 *db; /* The associated database connection */
+ char *zMalloc; /* Space to hold MEM_Str or MEM_Blob if szMalloc>0 */
void (*xDel)(void*);/* Destructor for Mem.z - only valid if MEM_Dyn */
#ifdef SQLITE_DEBUG
Mem *pScopyFrom; /* This Mem is a shallow copy of pScopyFrom */
@@ -22264,11 +22549,43 @@ struct sqlite3_value {
** Size of struct Mem not including the Mem.zMalloc member or anything that
** follows.
*/
-#define MEMCELLSIZE offsetof(Mem,zMalloc)
+#define MEMCELLSIZE offsetof(Mem,db)
-/* One or more of the following flags are set to indicate the validOK
+/* One or more of the following flags are set to indicate the
** representations of the value stored in the Mem struct.
**
+** * MEM_Null An SQL NULL value
+**
+** * MEM_Null|MEM_Zero An SQL NULL with the virtual table
+** UPDATE no-change flag set
+**
+** * MEM_Null|MEM_Term| An SQL NULL, but also contains a
+** MEM_Subtype pointer accessible using
+** sqlite3_value_pointer().
+**
+** * MEM_Null|MEM_Cleared Special SQL NULL that compares non-equal
+** to other NULLs even using the IS operator.
+**
+** * MEM_Str A string, stored in Mem.z with
+** length Mem.n. Zero-terminated if
+** MEM_Term is set. This flag is
+** incompatible with MEM_Blob and
+** MEM_Null, but can appear with MEM_Int,
+** MEM_Real, and MEM_IntReal.
+**
+** * MEM_Blob A blob, stored in Mem.z length Mem.n.
+** Incompatible with MEM_Str, MEM_Null,
+** MEM_Int, MEM_Real, and MEM_IntReal.
+**
+** * MEM_Blob|MEM_Zero A blob in Mem.z of length Mem.n plus
+** MEM.u.i extra 0x00 bytes at the end.
+**
+** * MEM_Int Integer stored in Mem.u.i.
+**
+** * MEM_Real Real stored in Mem.u.r.
+**
+** * MEM_IntReal Real stored as an integer in Mem.u.i.
+**
** If the MEM_Null flag is set, then the value is an SQL NULL value.
** For a pointer type created using sqlite3_bind_pointer() or
** sqlite3_result_pointer() the MEM_Term and MEM_Subtype flags are also set.
@@ -22279,6 +22596,7 @@ struct sqlite3_value {
** set, then the string is nul terminated. The MEM_Int and MEM_Real
** flags may coexist with the MEM_Str flag.
*/
+#define MEM_Undefined 0x0000 /* Value is undefined */
#define MEM_Null 0x0001 /* Value is NULL (or a pointer) */
#define MEM_Str 0x0002 /* Value is a string */
#define MEM_Int 0x0004 /* Value is an integer */
@@ -22286,28 +22604,24 @@ struct sqlite3_value {
#define MEM_Blob 0x0010 /* Value is a BLOB */
#define MEM_IntReal 0x0020 /* MEM_Int that stringifies like MEM_Real */
#define MEM_AffMask 0x003f /* Mask of affinity bits */
+
+/* Extra bits that modify the meanings of the core datatypes above
+*/
#define MEM_FromBind 0x0040 /* Value originates from sqlite3_bind() */
-#define MEM_Undefined 0x0080 /* Value is undefined */
+ /* 0x0080 // Available */
#define MEM_Cleared 0x0100 /* NULL set by OP_Null, not from data */
-#define MEM_TypeMask 0xc1bf /* Mask of type bits */
-
+#define MEM_Term 0x0200 /* String in Mem.z is zero terminated */
+#define MEM_Zero 0x0400 /* Mem.i contains count of 0s appended to blob */
+#define MEM_Subtype 0x0800 /* Mem.eSubtype is valid */
+#define MEM_TypeMask 0x0dbf /* Mask of type bits */
-/* Whenever Mem contains a valid string or blob representation, one of
-** the following flags must be set to determine the memory management
-** policy for Mem.z. The MEM_Term flag tells us whether or not the
-** string is \000 or \u0000 terminated
+/* Bits that determine the storage for Mem.z for a string or blob or
+** aggregate accumulator.
*/
-#define MEM_Term 0x0200 /* String in Mem.z is zero terminated */
-#define MEM_Dyn 0x0400 /* Need to call Mem.xDel() on Mem.z */
-#define MEM_Static 0x0800 /* Mem.z points to a static string */
-#define MEM_Ephem 0x1000 /* Mem.z points to an ephemeral string */
-#define MEM_Agg 0x2000 /* Mem.z points to an agg function context */
-#define MEM_Zero 0x4000 /* Mem.i contains count of 0s appended to blob */
-#define MEM_Subtype 0x8000 /* Mem.eSubtype is valid */
-#ifdef SQLITE_OMIT_INCRBLOB
- #undef MEM_Zero
- #define MEM_Zero 0x0000
-#endif
+#define MEM_Dyn 0x1000 /* Need to call Mem.xDel() on Mem.z */
+#define MEM_Static 0x2000 /* Mem.z points to a static string */
+#define MEM_Ephem 0x4000 /* Mem.z points to an ephemeral string */
+#define MEM_Agg 0x8000 /* Mem.z points to an agg function context */
/* Return TRUE if Mem X contains dynamically allocated content - anything
** that needs to be deallocated to avoid a leak.
@@ -22329,11 +22643,15 @@ struct sqlite3_value {
&& (X)->n==0 && (X)->u.nZero==0)
/*
-** Return true if a memory cell is not marked as invalid. This macro
+** Return true if a memory cell has been initialized and is valid.
** is for use inside assert() statements only.
+**
+** A Memory cell is initialized if at least one of the
+** MEM_Null, MEM_Str, MEM_Int, MEM_Real, MEM_Blob, or MEM_IntReal bits
+** is set. It is "undefined" if all those bits are zero.
*/
#ifdef SQLITE_DEBUG
-#define memIsValid(M) ((M)->flags & MEM_Undefined)==0
+#define memIsValid(M) ((M)->flags & MEM_AffMask)!=0
#endif
/*
@@ -22371,6 +22689,7 @@ struct sqlite3_context {
Vdbe *pVdbe; /* The VM that owns this context */
int iOp; /* Instruction number of OP_Function */
int isError; /* Error code returned by the function. */
+ u8 enc; /* Encoding to use for results */
u8 skipFlag; /* Skip accumulator loading if true */
u8 argc; /* Number of arguments */
sqlite3_value *argv[1]; /* Argument set */
@@ -22416,10 +22735,9 @@ struct DblquoteStr {
*/
struct Vdbe {
sqlite3 *db; /* The database connection that owns this statement */
- Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
+ Vdbe **ppVPrev,*pVNext; /* Linked list of VDBEs with the same Vdbe.db */
Parse *pParse; /* Parsing context used to create this Vdbe */
ynVar nVar; /* Number of entries in aVar[] */
- u32 iVdbeMagic; /* Magic number defining state of the SQL statement */
int nMem; /* Number of memory locations currently allocated */
int nCursor; /* Number of slots in apCsr[] */
u32 cacheCtr; /* VdbeCursor row cache generation counter */
@@ -22457,11 +22775,10 @@ struct Vdbe {
u8 errorAction; /* Recovery action to do in case of an error */
u8 minWriteFileFormat; /* Minimum file format for writable database files */
u8 prepFlags; /* SQLITE_PREPARE_* flags */
- u8 doingRerun; /* True if rerunning after an auto-reprepare */
+ u8 eVdbeState; /* On of the VDBE_*_STATE values */
bft expired:2; /* 1: recompile VM immediately 2: when convenient */
bft explain:2; /* True if EXPLAIN present on SQL command */
bft changeCntOn:1; /* True to update the change-counter */
- bft runOnlyOnce:1; /* Automatically expire on reset */
bft usesStmtJournal:1; /* True if uses a statement journal */
bft readOnly:1; /* True for statements that do not write */
bft bIsReader:1; /* True for statements that read */
@@ -22488,13 +22805,12 @@ struct Vdbe {
};
/*
-** The following are allowed values for Vdbe.magic
+** The following are allowed values for Vdbe.eVdbeState
*/
-#define VDBE_MAGIC_INIT 0x16bceaa5 /* Building a VDBE program */
-#define VDBE_MAGIC_RUN 0x2df20da3 /* VDBE is ready to execute */
-#define VDBE_MAGIC_HALT 0x319c2973 /* VDBE has completed execution */
-#define VDBE_MAGIC_RESET 0x48fa9f76 /* Reset and ready to run again */
-#define VDBE_MAGIC_DEAD 0x5606c3c8 /* The VDBE has been deallocated */
+#define VDBE_INIT_STATE 0 /* Prepared statement under construction */
+#define VDBE_READY_STATE 1 /* Ready to run but not yet started */
+#define VDBE_RUN_STATE 2 /* Run in progress */
+#define VDBE_HALT_STATE 3 /* Finished. Need reset() or finalize() */
/*
** Structure used to store the context required by the
@@ -22535,18 +22851,31 @@ struct ValueList {
sqlite3_value *pOut; /* Register to hold each decoded output value */
};
+/* Size of content associated with serial types that fit into a
+** single-byte varint.
+*/
+#ifndef SQLITE_AMALGAMATION
+SQLITE_PRIVATE const u8 sqlite3SmallTypeSizes[];
+#endif
+
/*
** Function prototypes
*/
SQLITE_PRIVATE void sqlite3VdbeError(Vdbe*, const char *, ...);
SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *, VdbeCursor*);
+SQLITE_PRIVATE void sqlite3VdbeFreeCursorNN(Vdbe*,VdbeCursor*);
void sqliteVdbePopStack(Vdbe*,int);
+SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeHandleMovedCursor(VdbeCursor *p);
SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeFinishMoveto(VdbeCursor*);
-SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor**, u32*);
SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor*);
SQLITE_PRIVATE u32 sqlite3VdbeSerialTypeLen(u32);
SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8);
-SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(unsigned char*, Mem*, u32);
+#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
+SQLITE_PRIVATE u64 sqlite3FloatSwap(u64 in);
+# define swapMixedEndianFloat(X) X = sqlite3FloatSwap(X)
+#else
+# define swapMixedEndianFloat(X)
+#endif
SQLITE_PRIVATE void sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*);
SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3*, AuxData**, int, int);
@@ -22604,6 +22933,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem*,u8,u8);
SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemFromBtreeZeroOffset(BtCursor*,u32,Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p);
+SQLITE_PRIVATE void sqlite3VdbeMemReleaseMalloc(Mem*p);
SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*);
#ifndef SQLITE_OMIT_WINDOWFUNC
SQLITE_PRIVATE int sqlite3VdbeMemAggValue(Mem*, Mem*, FuncDef*);
@@ -22963,6 +23293,8 @@ SQLITE_API int sqlite3_db_status(
sqlite3BtreeEnterAll(db);
db->pnBytesFreed = &nByte;
+ assert( db->lookaside.pEnd==db->lookaside.pTrueEnd );
+ db->lookaside.pEnd = db->lookaside.pStart;
for(i=0; inDb; i++){
Schema *pSchema = db->aDb[i].pSchema;
if( ALWAYS(pSchema!=0) ){
@@ -22988,6 +23320,7 @@ SQLITE_API int sqlite3_db_status(
}
}
db->pnBytesFreed = 0;
+ db->lookaside.pEnd = db->lookaside.pTrueEnd;
sqlite3BtreeLeaveAll(db);
*pHighwater = 0;
@@ -23005,10 +23338,12 @@ SQLITE_API int sqlite3_db_status(
int nByte = 0; /* Used to accumulate return value */
db->pnBytesFreed = &nByte;
- for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){
- sqlite3VdbeClearObject(db, pVdbe);
- sqlite3DbFree(db, pVdbe);
+ assert( db->lookaside.pEnd==db->lookaside.pTrueEnd );
+ db->lookaside.pEnd = db->lookaside.pStart;
+ for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pVNext){
+ sqlite3VdbeDelete(pVdbe);
}
+ db->lookaside.pEnd = db->lookaside.pTrueEnd;
db->pnBytesFreed = 0;
*pHighwater = 0; /* IMP: R-64479-57858 */
@@ -23344,7 +23679,7 @@ static void computeJD(DateTime *p){
p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000);
p->validJD = 1;
if( p->validHMS ){
- p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000);
+ p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000 + 0.5);
if( p->validTZ ){
p->iJD -= p->tz*60000;
p->validYMD = 0;
@@ -23853,7 +24188,7 @@ static int parseModifier(
*/
if( sqlite3_strnicmp(z, "weekday ", 8)==0
&& sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8)>0
- && (n=(int)r)==r && n>=0 && r<7 ){
+ && r>=0.0 && r<7.0 && (n=(int)r)==r ){
sqlite3_int64 Z;
computeYMD_HMS(p);
p->validTZ = 0;
@@ -24534,9 +24869,11 @@ SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){
}
SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){
DO_OS_MALLOC_TEST(id);
+ assert( lockType>=SQLITE_LOCK_SHARED && lockType<=SQLITE_LOCK_EXCLUSIVE );
return id->pMethods->xLock(id, lockType);
}
SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){
+ assert( lockType==SQLITE_LOCK_NONE || lockType==SQLITE_LOCK_SHARED );
return id->pMethods->xUnlock(id, lockType);
}
SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){
@@ -24651,6 +24988,7 @@ SQLITE_PRIVATE int sqlite3OsOpen(
** down into the VFS layer. Some SQLITE_OPEN_ flags (for example,
** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before
** reaching the VFS. */
+ assert( zPath || (flags & SQLITE_OPEN_EXCLUSIVE) );
rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x1087f7f, pFlagsOut);
assert( rc==SQLITE_OK || pFile->pMethods==0 );
return rc;
@@ -26962,8 +27300,17 @@ static void *memsys5Realloc(void *pPrior, int nBytes){
*/
static int memsys5Roundup(int n){
int iFullSz;
- if( n > 0x40000000 ) return 0;
- for(iFullSz=mem5.szAtom; iFullSz0x10000000 ){
+ if( n>0x40000000 ) return 0;
+ if( n>0x20000000 ) return 0x40000000;
+ return 0x20000000;
+ }
+ for(iFullSz=mem5.szAtom*8; iFullSz=(i64)n ) return iFullSz/2;
return iFullSz;
}
@@ -28864,18 +29211,34 @@ static void mallocWithAlarm(int n, void **pp){
*pp = p;
}
+/*
+** Maximum size of any single memory allocation.
+**
+** This is not a limit on the total amount of memory used. This is
+** a limit on the size parameter to sqlite3_malloc() and sqlite3_realloc().
+**
+** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391
+** This provides a 256-byte safety margin for defense against 32-bit
+** signed integer overflow bugs when computing memory allocation sizes.
+** Parnoid applications might want to reduce the maximum allocation size
+** further for an even larger safety margin. 0x3fffffff or 0x0fffffff
+** or even smaller would be reasonable upper bounds on the size of a memory
+** allocations for most applications.
+*/
+#ifndef SQLITE_MAX_ALLOCATION_SIZE
+# define SQLITE_MAX_ALLOCATION_SIZE 2147483391
+#endif
+#if SQLITE_MAX_ALLOCATION_SIZE>2147483391
+# error Maximum size for SQLITE_MAX_ALLOCATION_SIZE is 2147483391
+#endif
+
/*
** Allocate memory. This routine is like sqlite3_malloc() except that it
** assumes the memory subsystem has already been initialized.
*/
SQLITE_PRIVATE void *sqlite3Malloc(u64 n){
void *p;
- if( n==0 || n>=0x7fffff00 ){
- /* A memory allocation of a number of bytes which is near the maximum
- ** signed integer value might cause an integer overflow inside of the
- ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving
- ** 255 bytes of overhead. SQLite itself will never use anything near
- ** this amount. The only way to reach the limit is with sqlite3_malloc() */
+ if( n==0 || n>SQLITE_MAX_ALLOCATION_SIZE ){
p = 0;
}else if( sqlite3GlobalConfig.bMemstat ){
sqlite3_mutex_enter(mem0.mutex);
@@ -28911,7 +29274,7 @@ SQLITE_API void *sqlite3_malloc64(sqlite3_uint64 n){
*/
#ifndef SQLITE_OMIT_LOOKASIDE
static int isLookaside(sqlite3 *db, const void *p){
- return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pEnd);
+ return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pTrueEnd);
}
#else
#define isLookaside(A,B) 0
@@ -28935,18 +29298,16 @@ static int lookasideMallocSize(sqlite3 *db, const void *p){
SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, const void *p){
assert( p!=0 );
#ifdef SQLITE_DEBUG
- if( db==0 || !isLookaside(db,p) ){
- if( db==0 ){
- assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
- assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
- }else{
- assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
- }
+ if( db==0 ){
+ assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) );
+ assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) );
+ }else if( !isLookaside(db,p) ){
+ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
}
#endif
if( db ){
- if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){
+ if( ((uptr)p)<(uptr)(db->lookaside.pTrueEnd) ){
#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){
assert( sqlite3_mutex_held(db->mutex) );
@@ -29002,14 +29363,11 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){
assert( db==0 || sqlite3_mutex_held(db->mutex) );
assert( p!=0 );
if( db ){
- if( db->pnBytesFreed ){
- measureAllocationSize(db, p);
- return;
- }
if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){
#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){
LookasideSlot *pBuf = (LookasideSlot*)p;
+ assert( db->pnBytesFreed==0 );
#ifdef SQLITE_DEBUG
memset(p, 0xaa, LOOKASIDE_SMALL); /* Trash freed content */
#endif
@@ -29020,6 +29378,7 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){
#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
if( ((uptr)p)>=(uptr)(db->lookaside.pStart) ){
LookasideSlot *pBuf = (LookasideSlot*)p;
+ assert( db->pnBytesFreed==0 );
#ifdef SQLITE_DEBUG
memset(p, 0xaa, db->lookaside.szTrue); /* Trash freed content */
#endif
@@ -29028,6 +29387,10 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){
return;
}
}
+ if( db->pnBytesFreed ){
+ measureAllocationSize(db, p);
+ return;
+ }
}
assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
@@ -29035,6 +29398,43 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){
sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
sqlite3_free(p);
}
+SQLITE_PRIVATE void sqlite3DbNNFreeNN(sqlite3 *db, void *p){
+ assert( db!=0 );
+ assert( sqlite3_mutex_held(db->mutex) );
+ assert( p!=0 );
+ if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){
+#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
+ if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){
+ LookasideSlot *pBuf = (LookasideSlot*)p;
+ assert( db->pnBytesFreed==0 );
+#ifdef SQLITE_DEBUG
+ memset(p, 0xaa, LOOKASIDE_SMALL); /* Trash freed content */
+#endif
+ pBuf->pNext = db->lookaside.pSmallFree;
+ db->lookaside.pSmallFree = pBuf;
+ return;
+ }
+#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
+ if( ((uptr)p)>=(uptr)(db->lookaside.pStart) ){
+ LookasideSlot *pBuf = (LookasideSlot*)p;
+ assert( db->pnBytesFreed==0 );
+#ifdef SQLITE_DEBUG
+ memset(p, 0xaa, db->lookaside.szTrue); /* Trash freed content */
+#endif
+ pBuf->pNext = db->lookaside.pFree;
+ db->lookaside.pFree = pBuf;
+ return;
+ }
+ }
+ if( db->pnBytesFreed ){
+ measureAllocationSize(db, p);
+ return;
+ }
+ assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) );
+ sqlite3MemdebugSetType(p, MEMTYPE_HEAP);
+ sqlite3_free(p);
+}
SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){
assert( db==0 || sqlite3_mutex_held(db->mutex) );
if( p ) sqlite3DbFreeNN(db, p);
@@ -29370,8 +29770,13 @@ SQLITE_PRIVATE void *sqlite3OomFault(sqlite3 *db){
}
DisableLookaside;
if( db->pParse ){
+ Parse *pParse;
sqlite3ErrorMsg(db->pParse, "out of memory");
db->pParse->rc = SQLITE_NOMEM_BKPT;
+ for(pParse=db->pParse->pOuterParse; pParse; pParse = pParse->pOuterParse){
+ pParse->nErr++;
+ pParse->rc = SQLITE_NOMEM;
+ }
}
}
return 0;
@@ -30237,8 +30642,8 @@ SQLITE_API void sqlite3_str_vappendf(
case etSQLESCAPE: /* %q: Escape ' characters */
case etSQLESCAPE2: /* %Q: Escape ' and enclose in '...' */
case etSQLESCAPE3: { /* %w: Escape " characters */
- int i, j, k, n, isnull;
- int needQuote;
+ i64 i, j, k, n;
+ int needQuote, isnull;
char ch;
char q = ((xtype==etSQLESCAPE3)?'"':'\''); /* Quote character */
char *escarg;
@@ -30318,8 +30723,14 @@ SQLITE_API void sqlite3_str_vappendf(
sqlite3_str_appendall(pAccum, pItem->zName);
}else if( pItem->zAlias ){
sqlite3_str_appendall(pAccum, pItem->zAlias);
- }else if( ALWAYS(pItem->pSelect) ){
- sqlite3_str_appendf(pAccum, "SUBQUERY %u", pItem->pSelect->selId);
+ }else{
+ Select *pSel = pItem->pSelect;
+ assert( pSel!=0 );
+ if( pSel->selFlags & SF_NestedFrom ){
+ sqlite3_str_appendf(pAccum, "(join-%u)", pSel->selId);
+ }else{
+ sqlite3_str_appendf(pAccum, "(subquery-%u)", pSel->selId);
+ }
}
length = width = 0;
break;
@@ -30382,7 +30793,9 @@ SQLITE_PRIVATE void sqlite3RecordErrorByteOffset(sqlite3 *db, const char *z){
** as the error offset.
*/
SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3 *db, const Expr *pExpr){
- while( pExpr && (ExprHasProperty(pExpr,EP_FromJoin) || pExpr->w.iOfst<=0) ){
+ while( pExpr
+ && (ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) || pExpr->w.iOfst<=0)
+ ){
pExpr = pExpr->pLeft;
}
if( pExpr==0 ) return;
@@ -30842,40 +31255,44 @@ SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){
** Add a new subitem to the tree. The moreToFollow flag indicates that this
** is not the last item in the tree.
*/
-static TreeView *sqlite3TreeViewPush(TreeView *p, u8 moreToFollow){
+static void sqlite3TreeViewPush(TreeView **pp, u8 moreToFollow){
+ TreeView *p = *pp;
if( p==0 ){
- p = sqlite3_malloc64( sizeof(*p) );
- if( p==0 ) return 0;
+ *pp = p = sqlite3_malloc64( sizeof(*p) );
+ if( p==0 ) return;
memset(p, 0, sizeof(*p));
}else{
p->iLevel++;
}
assert( moreToFollow==0 || moreToFollow==1 );
- if( p->iLevelbLine) ) p->bLine[p->iLevel] = moreToFollow;
- return p;
+ if( p->iLevel<(int)sizeof(p->bLine) ) p->bLine[p->iLevel] = moreToFollow;
}
/*
** Finished with one layer of the tree
*/
-static void sqlite3TreeViewPop(TreeView *p){
+static void sqlite3TreeViewPop(TreeView **pp){
+ TreeView *p = *pp;
if( p==0 ) return;
p->iLevel--;
- if( p->iLevel<0 ) sqlite3_free(p);
+ if( p->iLevel<0 ){
+ sqlite3_free(p);
+ *pp = 0;
+ }
}
/*
** Generate a single line of output for the tree, with a prefix that contains
** all the appropriate tree lines
*/
-static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){
+SQLITE_PRIVATE void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){
va_list ap;
int i;
StrAccum acc;
- char zBuf[500];
+ char zBuf[1000];
sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
if( p ){
- for(i=0; iiLevel && ibLine)-1; i++){
+ for(i=0; iiLevel && i<(int)sizeof(p->bLine)-1; i++){
sqlite3_str_append(&acc, p->bLine[i] ? "| " : " ", 4);
}
sqlite3_str_append(&acc, p->bLine[i] ? "|-- " : "'-- ", 4);
@@ -30896,10 +31313,57 @@ static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){
** Shorthand for starting a new tree item that consists of a single label
*/
static void sqlite3TreeViewItem(TreeView *p, const char *zLabel,u8 moreFollows){
- p = sqlite3TreeViewPush(p, moreFollows);
+ sqlite3TreeViewPush(&p, moreFollows);
sqlite3TreeViewLine(p, "%s", zLabel);
}
+/*
+** Show a list of Column objects in tree format.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewColumnList(
+ TreeView *pView,
+ const Column *aCol,
+ int nCol,
+ u8 moreToFollow
+){
+ int i;
+ sqlite3TreeViewPush(&pView, moreToFollow);
+ sqlite3TreeViewLine(pView, "COLUMNS");
+ for(i=0; inCte>0 ){
- pView = sqlite3TreeViewPush(pView, 1);
+ sqlite3TreeViewPush(&pView, moreToFollow);
for(i=0; inCte; i++){
StrAccum x;
char zLine[1000];
@@ -30929,6 +31393,10 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m
}
sqlite3_str_appendf(&x, ")");
}
+ if( pCte->eM10d!=M10d_Any ){
+ sqlite3_str_appendf(&x, " %sMATERIALIZED",
+ pCte->eM10d==M10d_No ? "NOT " : "");
+ }
if( pCte->pUse ){
sqlite3_str_appendf(&x, " (pUse=0x%p, nUse=%d)", pCte->pUse,
pCte->pUse->nUse);
@@ -30936,9 +31404,9 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m
sqlite3StrAccumFinish(&x);
sqlite3TreeViewItem(pView, zLine, inCte-1);
sqlite3TreeViewSelect(pView, pCte->pSelect, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
}
@@ -30947,10 +31415,12 @@ SQLITE_PRIVATE void sqlite3TreeViewWith(TreeView *pView, const With *pWith, u8 m
*/
SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc){
int i;
+ if( pSrc==0 ) return;
for(i=0; inSrc; i++){
const SrcItem *pItem = &pSrc->a[i];
StrAccum x;
- char zLine[100];
+ int n = 0;
+ char zLine[1000];
sqlite3StrAccumInit(&x, 0, zLine, sizeof(zLine), 0);
x.printfFlags |= SQLITE_PRINTF_INTERNAL;
sqlite3_str_appendf(&x, "{%d:*} %!S", pItem->iCursor, pItem);
@@ -30958,26 +31428,48 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc)
sqlite3_str_appendf(&x, " tab=%Q nCol=%d ptr=%p used=%llx",
pItem->pTab->zName, pItem->pTab->nCol, pItem->pTab, pItem->colUsed);
}
- if( pItem->fg.jointype & JT_LEFT ){
+ if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==(JT_LEFT|JT_RIGHT) ){
+ sqlite3_str_appendf(&x, " FULL-OUTER-JOIN");
+ }else if( pItem->fg.jointype & JT_LEFT ){
sqlite3_str_appendf(&x, " LEFT-JOIN");
+ }else if( pItem->fg.jointype & JT_RIGHT ){
+ sqlite3_str_appendf(&x, " RIGHT-JOIN");
}else if( pItem->fg.jointype & JT_CROSS ){
sqlite3_str_appendf(&x, " CROSS-JOIN");
}
+ if( pItem->fg.jointype & JT_LTORJ ){
+ sqlite3_str_appendf(&x, " LTORJ");
+ }
if( pItem->fg.fromDDL ){
sqlite3_str_appendf(&x, " DDL");
}
if( pItem->fg.isCte ){
sqlite3_str_appendf(&x, " CteUse=0x%p", pItem->u2.pCteUse);
}
+ if( pItem->fg.isOn || (pItem->fg.isUsing==0 && pItem->u3.pOn!=0) ){
+ sqlite3_str_appendf(&x, " ON");
+ }
sqlite3StrAccumFinish(&x);
sqlite3TreeViewItem(pView, zLine, inSrc-1);
+ n = 0;
+ if( pItem->pSelect ) n++;
+ if( pItem->fg.isTabFunc ) n++;
+ if( pItem->fg.isUsing ) n++;
+ if( pItem->fg.isUsing ){
+ sqlite3TreeViewIdList(pView, pItem->u3.pUsing, (--n)>0, "USING");
+ }
if( pItem->pSelect ){
- sqlite3TreeViewSelect(pView, pItem->pSelect, 0);
+ if( pItem->pTab ){
+ Table *pTab = pItem->pTab;
+ sqlite3TreeViewColumnList(pView, pTab->aCol, pTab->nCol, 1);
+ }
+ assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) );
+ sqlite3TreeViewSelect(pView, pItem->pSelect, (--n)>0);
}
if( pItem->fg.isTabFunc ){
sqlite3TreeViewExprList(pView, pItem->u1.pFuncArg, 0, "func-args:");
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
}
@@ -30991,11 +31483,11 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
sqlite3TreeViewLine(pView, "nil-SELECT");
return;
}
- pView = sqlite3TreeViewPush(pView, moreToFollow);
+ sqlite3TreeViewPush(&pView, moreToFollow);
if( p->pWith ){
sqlite3TreeViewWith(pView, p->pWith, 1);
cnt = 1;
- sqlite3TreeViewPush(pView, 1);
+ sqlite3TreeViewPush(&pView, 1);
}
do{
if( p->selFlags & SF_WhereBegin ){
@@ -31009,7 +31501,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
(int)p->nSelectRow
);
}
- if( cnt++ ) sqlite3TreeViewPop(pView);
+ if( cnt++ ) sqlite3TreeViewPop(&pView);
if( p->pPrior ){
n = 1000;
}else{
@@ -31032,24 +31524,24 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
#ifndef SQLITE_OMIT_WINDOWFUNC
if( p->pWin ){
Window *pX;
- pView = sqlite3TreeViewPush(pView, (n--)>0);
+ sqlite3TreeViewPush(&pView, (n--)>0);
sqlite3TreeViewLine(pView, "window-functions");
for(pX=p->pWin; pX; pX=pX->pNextWin){
sqlite3TreeViewWinFunc(pView, pX, pX->pNextWin!=0);
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
#endif
if( p->pSrc && p->pSrc->nSrc ){
- pView = sqlite3TreeViewPush(pView, (n--)>0);
+ sqlite3TreeViewPush(&pView, (n--)>0);
sqlite3TreeViewLine(pView, "FROM");
sqlite3TreeViewSrcList(pView, p->pSrc);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
if( p->pWhere ){
sqlite3TreeViewItem(pView, "WHERE", (n--)>0);
sqlite3TreeViewExpr(pView, p->pWhere, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
if( p->pGroupBy ){
sqlite3TreeViewExprList(pView, p->pGroupBy, (n--)>0, "GROUPBY");
@@ -31057,7 +31549,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
if( p->pHaving ){
sqlite3TreeViewItem(pView, "HAVING", (n--)>0);
sqlite3TreeViewExpr(pView, p->pHaving, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
#ifndef SQLITE_OMIT_WINDOWFUNC
if( p->pWinDefn ){
@@ -31066,7 +31558,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
for(pX=p->pWinDefn; pX; pX=pX->pNextWin){
sqlite3TreeViewWindow(pView, pX, pX->pNextWin!=0);
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
#endif
if( p->pOrderBy ){
@@ -31078,9 +31570,9 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
if( p->pLimit->pRight ){
sqlite3TreeViewItem(pView, "OFFSET", (n--)>0);
sqlite3TreeViewExpr(pView, p->pLimit->pRight, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
if( p->pPrior ){
const char *zOp = "UNION";
@@ -31093,7 +31585,7 @@ SQLITE_PRIVATE void sqlite3TreeViewSelect(TreeView *pView, const Select *p, u8 m
}
p = p->pPrior;
}while( p!=0 );
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
#ifndef SQLITE_OMIT_WINDOWFUNC
@@ -31109,24 +31601,24 @@ SQLITE_PRIVATE void sqlite3TreeViewBound(
switch( eBound ){
case TK_UNBOUNDED: {
sqlite3TreeViewItem(pView, "UNBOUNDED", moreToFollow);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
break;
}
case TK_CURRENT: {
sqlite3TreeViewItem(pView, "CURRENT", moreToFollow);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
break;
}
case TK_PRECEDING: {
sqlite3TreeViewItem(pView, "PRECEDING", moreToFollow);
sqlite3TreeViewExpr(pView, pExpr, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
break;
}
case TK_FOLLOWING: {
sqlite3TreeViewItem(pView, "FOLLOWING", moreToFollow);
sqlite3TreeViewExpr(pView, pExpr, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
break;
}
}
@@ -31139,12 +31631,13 @@ SQLITE_PRIVATE void sqlite3TreeViewBound(
*/
SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u8 more){
int nElement = 0;
+ if( pWin==0 ) return;
if( pWin->pFilter ){
sqlite3TreeViewItem(pView, "FILTER", 1);
sqlite3TreeViewExpr(pView, pWin->pFilter, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
- pView = sqlite3TreeViewPush(pView, more);
+ sqlite3TreeViewPush(&pView, more);
if( pWin->zName ){
sqlite3TreeViewLine(pView, "OVER %s (%p)", pWin->zName, pWin);
}else{
@@ -31155,9 +31648,9 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
if( pWin->eFrmType ) nElement++;
if( pWin->eExclude ) nElement++;
if( pWin->zBase ){
- sqlite3TreeViewPush(pView, (--nElement)>0);
+ sqlite3TreeViewPush(&pView, (--nElement)>0);
sqlite3TreeViewLine(pView, "window: %s", pWin->zBase);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
if( pWin->pPartition ){
sqlite3TreeViewExprList(pView, pWin->pPartition, nElement>0,"PARTITION-BY");
@@ -31175,7 +31668,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
sqlite3TreeViewItem(pView, zBuf, (--nElement)>0);
sqlite3TreeViewBound(pView, pWin->eStart, pWin->pStart, 1);
sqlite3TreeViewBound(pView, pWin->eEnd, pWin->pEnd, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
if( pWin->eExclude ){
char zBuf[30];
@@ -31190,11 +31683,11 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
zExclude = zBuf;
break;
}
- sqlite3TreeViewPush(pView, 0);
+ sqlite3TreeViewPush(&pView, 0);
sqlite3TreeViewLine(pView, "EXCLUDE %s", zExclude);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
#endif /* SQLITE_OMIT_WINDOWFUNC */
@@ -31203,11 +31696,12 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u
** Generate a human-readable explanation for a Window Function object
*/
SQLITE_PRIVATE void sqlite3TreeViewWinFunc(TreeView *pView, const Window *pWin, u8 more){
- pView = sqlite3TreeViewPush(pView, more);
+ if( pWin==0 ) return;
+ sqlite3TreeViewPush(&pView, more);
sqlite3TreeViewLine(pView, "WINFUNC %s(%d)",
- pWin->pFunc->zName, pWin->pFunc->nArg);
+ pWin->pWFunc->zName, pWin->pWFunc->nArg);
sqlite3TreeViewWindow(pView, pWin, 0);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
#endif /* SQLITE_OMIT_WINDOWFUNC */
@@ -31218,10 +31712,10 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
const char *zBinOp = 0; /* Binary operator */
const char *zUniOp = 0; /* Unary operator */
char zFlgs[200];
- pView = sqlite3TreeViewPush(pView, moreToFollow);
+ sqlite3TreeViewPush(&pView, moreToFollow);
if( pExpr==0 ){
sqlite3TreeViewLine(pView, "nil");
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
return;
}
if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags ){
@@ -31229,8 +31723,11 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3StrAccumInit(&x, 0, zFlgs, sizeof(zFlgs), 0);
sqlite3_str_appendf(&x, " fg.af=%x.%c",
pExpr->flags, pExpr->affExpr ? pExpr->affExpr : 'n');
- if( ExprHasProperty(pExpr, EP_FromJoin) ){
- sqlite3_str_appendf(&x, " iRJT=%d", pExpr->w.iRightJoinTable);
+ if( ExprHasProperty(pExpr, EP_OuterON) ){
+ sqlite3_str_appendf(&x, " outer.iJoin=%d", pExpr->w.iJoin);
+ }
+ if( ExprHasProperty(pExpr, EP_InnerON) ){
+ sqlite3_str_appendf(&x, " inner.iJoin=%d", pExpr->w.iJoin);
}
if( ExprHasProperty(pExpr, EP_FromDDL) ){
sqlite3_str_appendf(&x, " DDL");
@@ -31454,7 +31951,17 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
break;
}
case TK_IN: {
- sqlite3TreeViewLine(pView, "IN flags=0x%x", pExpr->flags);
+ sqlite3_str *pStr = sqlite3_str_new(0);
+ char *z;
+ sqlite3_str_appendf(pStr, "IN flags=0x%x", pExpr->flags);
+ if( pExpr->iTable ) sqlite3_str_appendf(pStr, " iTable=%d",pExpr->iTable);
+ if( ExprHasProperty(pExpr, EP_Subrtn) ){
+ sqlite3_str_appendf(pStr, " subrtn(%d,%d)",
+ pExpr->y.sub.regReturn, pExpr->y.sub.iAddr);
+ }
+ z = sqlite3_str_finish(pStr);
+ sqlite3TreeViewLine(pView, z);
+ sqlite3_free(z);
sqlite3TreeViewExpr(pView, pExpr->pLeft, 1);
if( ExprUseXSelect(pExpr) ){
sqlite3TreeViewSelect(pView, pExpr->x.pSelect, 0);
@@ -31578,7 +32085,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewLine(pView, "%s%s", zUniOp, zFlgs);
sqlite3TreeViewExpr(pView, pExpr->pLeft, 0);
}
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
@@ -31600,13 +32107,25 @@ SQLITE_PRIVATE void sqlite3TreeViewBareExprList(
int j = pList->a[i].u.x.iOrderByCol;
char *zName = pList->a[i].zEName;
int moreToFollow = inExpr - 1;
- if( pList->a[i].eEName!=ENAME_NAME ) zName = 0;
if( j || zName ){
- sqlite3TreeViewPush(pView, moreToFollow);
+ sqlite3TreeViewPush(&pView, moreToFollow);
moreToFollow = 0;
sqlite3TreeViewLine(pView, 0);
if( zName ){
- fprintf(stdout, "AS %s ", zName);
+ switch( pList->a[i].fg.eEName ){
+ default:
+ fprintf(stdout, "AS %s ", zName);
+ break;
+ case ENAME_TAB:
+ fprintf(stdout, "TABLE-ALIAS-NAME(\"%s\") ", zName);
+ if( pList->a[i].fg.bUsed ) fprintf(stdout, "(used) ");
+ if( pList->a[i].fg.bUsingTerm ) fprintf(stdout, "(USING-term) ");
+ if( pList->a[i].fg.bNoExpand ) fprintf(stdout, "(NoExpand) ");
+ break;
+ case ENAME_SPAN:
+ fprintf(stdout, "SPAN(\"%s\") ", zName);
+ break;
+ }
}
if( j ){
fprintf(stdout, "iOrderByCol=%d", j);
@@ -31616,7 +32135,7 @@ SQLITE_PRIVATE void sqlite3TreeViewBareExprList(
}
sqlite3TreeViewExpr(pView, pList->a[i].pExpr, moreToFollow);
if( j || zName ){
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
}
}
}
@@ -31627,10 +32146,377 @@ SQLITE_PRIVATE void sqlite3TreeViewExprList(
u8 moreToFollow,
const char *zLabel
){
- pView = sqlite3TreeViewPush(pView, moreToFollow);
+ sqlite3TreeViewPush(&pView, moreToFollow);
sqlite3TreeViewBareExprList(pView, pList, zLabel);
- sqlite3TreeViewPop(pView);
+ sqlite3TreeViewPop(&pView);
+}
+
+/*
+** Generate a human-readable explanation of an id-list.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewBareIdList(
+ TreeView *pView,
+ const IdList *pList,
+ const char *zLabel
+){
+ if( zLabel==0 || zLabel[0]==0 ) zLabel = "LIST";
+ if( pList==0 ){
+ sqlite3TreeViewLine(pView, "%s (empty)", zLabel);
+ }else{
+ int i;
+ sqlite3TreeViewLine(pView, "%s", zLabel);
+ for(i=0; inId; i++){
+ char *zName = pList->a[i].zName;
+ int moreToFollow = inId - 1;
+ if( zName==0 ) zName = "(null)";
+ sqlite3TreeViewPush(&pView, moreToFollow);
+ sqlite3TreeViewLine(pView, 0);
+ if( pList->eU4==EU4_NONE ){
+ fprintf(stdout, "%s\n", zName);
+ }else if( pList->eU4==EU4_IDX ){
+ fprintf(stdout, "%s (%d)\n", zName, pList->a[i].u4.idx);
+ }else{
+ assert( pList->eU4==EU4_EXPR );
+ if( pList->a[i].u4.pExpr==0 ){
+ fprintf(stdout, "%s (pExpr=NULL)\n", zName);
+ }else{
+ fprintf(stdout, "%s\n", zName);
+ sqlite3TreeViewPush(&pView, inId-1);
+ sqlite3TreeViewExpr(pView, pList->a[i].u4.pExpr, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ }
+ sqlite3TreeViewPop(&pView);
+ }
+ }
+}
+SQLITE_PRIVATE void sqlite3TreeViewIdList(
+ TreeView *pView,
+ const IdList *pList,
+ u8 moreToFollow,
+ const char *zLabel
+){
+ sqlite3TreeViewPush(&pView, moreToFollow);
+ sqlite3TreeViewBareIdList(pView, pList, zLabel);
+ sqlite3TreeViewPop(&pView);
+}
+
+/*
+** Generate a human-readable explanation of a list of Upsert objects
+*/
+SQLITE_PRIVATE void sqlite3TreeViewUpsert(
+ TreeView *pView,
+ const Upsert *pUpsert,
+ u8 moreToFollow
+){
+ if( pUpsert==0 ) return;
+ sqlite3TreeViewPush(&pView, moreToFollow);
+ while( pUpsert ){
+ int n;
+ sqlite3TreeViewPush(&pView, pUpsert->pNextUpsert!=0 || moreToFollow);
+ sqlite3TreeViewLine(pView, "ON CONFLICT DO %s",
+ pUpsert->isDoUpdate ? "UPDATE" : "NOTHING");
+ n = (pUpsert->pUpsertSet!=0) + (pUpsert->pUpsertWhere!=0);
+ sqlite3TreeViewExprList(pView, pUpsert->pUpsertTarget, (n--)>0, "TARGET");
+ sqlite3TreeViewExprList(pView, pUpsert->pUpsertSet, (n--)>0, "SET");
+ if( pUpsert->pUpsertWhere ){
+ sqlite3TreeViewItem(pView, "WHERE", (n--)>0);
+ sqlite3TreeViewExpr(pView, pUpsert->pUpsertWhere, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ sqlite3TreeViewPop(&pView);
+ pUpsert = pUpsert->pNextUpsert;
+ }
+ sqlite3TreeViewPop(&pView);
+}
+
+#if TREETRACE_ENABLED
+/*
+** Generate a human-readable diagram of the data structure that go
+** into generating an DELETE statement.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewDelete(
+ const With *pWith,
+ const SrcList *pTabList,
+ const Expr *pWhere,
+ const ExprList *pOrderBy,
+ const Expr *pLimit,
+ const Trigger *pTrigger
+){
+ int n = 0;
+ TreeView *pView = 0;
+ sqlite3TreeViewPush(&pView, 0);
+ sqlite3TreeViewLine(pView, "DELETE");
+ if( pWith ) n++;
+ if( pTabList ) n++;
+ if( pWhere ) n++;
+ if( pOrderBy ) n++;
+ if( pLimit ) n++;
+ if( pTrigger ) n++;
+ if( pWith ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewWith(pView, pWith, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pTabList ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "FROM");
+ sqlite3TreeViewSrcList(pView, pTabList);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pWhere ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "WHERE");
+ sqlite3TreeViewExpr(pView, pWhere, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pOrderBy ){
+ sqlite3TreeViewExprList(pView, pOrderBy, (--n)>0, "ORDER-BY");
+ }
+ if( pLimit ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "LIMIT");
+ sqlite3TreeViewExpr(pView, pLimit, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pTrigger ){
+ sqlite3TreeViewTrigger(pView, pTrigger, (--n)>0, 1);
+ }
+ sqlite3TreeViewPop(&pView);
}
+#endif /* TREETRACE_ENABLED */
+
+#if TREETRACE_ENABLED
+/*
+** Generate a human-readable diagram of the data structure that go
+** into generating an INSERT statement.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewInsert(
+ const With *pWith,
+ const SrcList *pTabList,
+ const IdList *pColumnList,
+ const Select *pSelect,
+ const ExprList *pExprList,
+ int onError,
+ const Upsert *pUpsert,
+ const Trigger *pTrigger
+){
+ TreeView *pView = 0;
+ int n = 0;
+ const char *zLabel = "INSERT";
+ switch( onError ){
+ case OE_Replace: zLabel = "REPLACE"; break;
+ case OE_Ignore: zLabel = "INSERT OR IGNORE"; break;
+ case OE_Rollback: zLabel = "INSERT OR ROLLBACK"; break;
+ case OE_Abort: zLabel = "INSERT OR ABORT"; break;
+ case OE_Fail: zLabel = "INSERT OR FAIL"; break;
+ }
+ sqlite3TreeViewPush(&pView, 0);
+ sqlite3TreeViewLine(pView, zLabel);
+ if( pWith ) n++;
+ if( pTabList ) n++;
+ if( pColumnList ) n++;
+ if( pSelect ) n++;
+ if( pExprList ) n++;
+ if( pUpsert ) n++;
+ if( pTrigger ) n++;
+ if( pWith ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewWith(pView, pWith, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pTabList ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "INTO");
+ sqlite3TreeViewSrcList(pView, pTabList);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pColumnList ){
+ sqlite3TreeViewIdList(pView, pColumnList, (--n)>0, "COLUMNS");
+ }
+ if( pSelect ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "DATA-SOURCE");
+ sqlite3TreeViewSelect(pView, pSelect, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pExprList ){
+ sqlite3TreeViewExprList(pView, pExprList, (--n)>0, "VALUES");
+ }
+ if( pUpsert ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "UPSERT");
+ sqlite3TreeViewUpsert(pView, pUpsert, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pTrigger ){
+ sqlite3TreeViewTrigger(pView, pTrigger, (--n)>0, 1);
+ }
+ sqlite3TreeViewPop(&pView);
+}
+#endif /* TREETRACE_ENABLED */
+
+#if TREETRACE_ENABLED
+/*
+** Generate a human-readable diagram of the data structure that go
+** into generating an UPDATE statement.
+*/
+SQLITE_PRIVATE void sqlite3TreeViewUpdate(
+ const With *pWith,
+ const SrcList *pTabList,
+ const ExprList *pChanges,
+ const Expr *pWhere,
+ int onError,
+ const ExprList *pOrderBy,
+ const Expr *pLimit,
+ const Upsert *pUpsert,
+ const Trigger *pTrigger
+){
+ int n = 0;
+ TreeView *pView = 0;
+ const char *zLabel = "UPDATE";
+ switch( onError ){
+ case OE_Replace: zLabel = "UPDATE OR REPLACE"; break;
+ case OE_Ignore: zLabel = "UPDATE OR IGNORE"; break;
+ case OE_Rollback: zLabel = "UPDATE OR ROLLBACK"; break;
+ case OE_Abort: zLabel = "UPDATE OR ABORT"; break;
+ case OE_Fail: zLabel = "UPDATE OR FAIL"; break;
+ }
+ sqlite3TreeViewPush(&pView, 0);
+ sqlite3TreeViewLine(pView, zLabel);
+ if( pWith ) n++;
+ if( pTabList ) n++;
+ if( pChanges ) n++;
+ if( pWhere ) n++;
+ if( pOrderBy ) n++;
+ if( pLimit ) n++;
+ if( pUpsert ) n++;
+ if( pTrigger ) n++;
+ if( pWith ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewWith(pView, pWith, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pTabList ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "FROM");
+ sqlite3TreeViewSrcList(pView, pTabList);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pChanges ){
+ sqlite3TreeViewExprList(pView, pChanges, (--n)>0, "SET");
+ }
+ if( pWhere ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "WHERE");
+ sqlite3TreeViewExpr(pView, pWhere, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pOrderBy ){
+ sqlite3TreeViewExprList(pView, pOrderBy, (--n)>0, "ORDER-BY");
+ }
+ if( pLimit ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "LIMIT");
+ sqlite3TreeViewExpr(pView, pLimit, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pUpsert ){
+ sqlite3TreeViewPush(&pView, (--n)>0);
+ sqlite3TreeViewLine(pView, "UPSERT");
+ sqlite3TreeViewUpsert(pView, pUpsert, 0);
+ sqlite3TreeViewPop(&pView);
+ }
+ if( pTrigger ){
+ sqlite3TreeViewTrigger(pView, pTrigger, (--n)>0, 1);
+ }
+ sqlite3TreeViewPop(&pView);
+}
+#endif /* TREETRACE_ENABLED */
+
+#ifndef SQLITE_OMIT_TRIGGER
+/*
+** Show a human-readable graph of a TriggerStep
+*/
+SQLITE_PRIVATE void sqlite3TreeViewTriggerStep(
+ TreeView *pView,
+ const TriggerStep *pStep,
+ u8 moreToFollow,
+ u8 showFullList
+){
+ int cnt = 0;
+ if( pStep==0 ) return;
+ sqlite3TreeViewPush(&pView,
+ moreToFollow || (showFullList && pStep->pNext!=0));
+ do{
+ if( cnt++ && pStep->pNext==0 ){
+ sqlite3TreeViewPop(&pView);
+ sqlite3TreeViewPush(&pView, 0);
+ }
+ sqlite3TreeViewLine(pView, "%s", pStep->zSpan ? pStep->zSpan : "RETURNING");
+ }while( showFullList && (pStep = pStep->pNext)!=0 );
+ sqlite3TreeViewPop(&pView);
+}
+
+/*
+** Show a human-readable graph of a Trigger
+*/
+SQLITE_PRIVATE void sqlite3TreeViewTrigger(
+ TreeView *pView,
+ const Trigger *pTrigger,
+ u8 moreToFollow,
+ u8 showFullList
+){
+ int cnt = 0;
+ if( pTrigger==0 ) return;
+ sqlite3TreeViewPush(&pView,
+ moreToFollow || (showFullList && pTrigger->pNext!=0));
+ do{
+ if( cnt++ && pTrigger->pNext==0 ){
+ sqlite3TreeViewPop(&pView);
+ sqlite3TreeViewPush(&pView, 0);
+ }
+ sqlite3TreeViewLine(pView, "TRIGGER %s", pTrigger->zName);
+ sqlite3TreeViewPush(&pView, 0);
+ sqlite3TreeViewTriggerStep(pView, pTrigger->step_list, 0, 1);
+ sqlite3TreeViewPop(&pView);
+ }while( showFullList && (pTrigger = pTrigger->pNext)!=0 );
+ sqlite3TreeViewPop(&pView);
+}
+#endif /* SQLITE_OMIT_TRIGGER */
+
+
+/*
+** These simplified versions of the tree-view routines omit unnecessary
+** parameters. These variants are intended to be used from a symbolic
+** debugger, such as "gdb", during interactive debugging sessions.
+**
+** This routines are given external linkage so that they will always be
+** accessible to the debugging, and to avoid warnings about unused
+** functions. But these routines only exist in debugging builds, so they
+** do not contaminate the interface.
+*/
+SQLITE_PRIVATE void sqlite3ShowExpr(const Expr *p){ sqlite3TreeViewExpr(0,p,0); }
+SQLITE_PRIVATE void sqlite3ShowExprList(const ExprList *p){ sqlite3TreeViewExprList(0,p,0,0);}
+SQLITE_PRIVATE void sqlite3ShowIdList(const IdList *p){ sqlite3TreeViewIdList(0,p,0,0); }
+SQLITE_PRIVATE void sqlite3ShowSrcList(const SrcList *p){ sqlite3TreeViewSrcList(0,p); }
+SQLITE_PRIVATE void sqlite3ShowSelect(const Select *p){ sqlite3TreeViewSelect(0,p,0); }
+SQLITE_PRIVATE void sqlite3ShowWith(const With *p){ sqlite3TreeViewWith(0,p,0); }
+SQLITE_PRIVATE void sqlite3ShowUpsert(const Upsert *p){ sqlite3TreeViewUpsert(0,p,0); }
+#ifndef SQLITE_OMIT_TRIGGER
+SQLITE_PRIVATE void sqlite3ShowTriggerStep(const TriggerStep *p){
+ sqlite3TreeViewTriggerStep(0,p,0,0);
+}
+SQLITE_PRIVATE void sqlite3ShowTriggerStepList(const TriggerStep *p){
+ sqlite3TreeViewTriggerStep(0,p,0,1);
+}
+SQLITE_PRIVATE void sqlite3ShowTrigger(const Trigger *p){ sqlite3TreeViewTrigger(0,p,0,0); }
+SQLITE_PRIVATE void sqlite3ShowTriggerList(const Trigger *p){ sqlite3TreeViewTrigger(0,p,0,1);}
+#endif
+#ifndef SQLITE_OMIT_WINDOWFUNC
+SQLITE_PRIVATE void sqlite3ShowWindow(const Window *p){ sqlite3TreeViewWindow(0,p,0); }
+SQLITE_PRIVATE void sqlite3ShowWinFunc(const Window *p){ sqlite3TreeViewWinFunc(0,p,0); }
+#endif
#endif /* SQLITE_DEBUG */
@@ -31660,16 +32546,41 @@ SQLITE_PRIVATE void sqlite3TreeViewExprList(
** This structure is the current state of the generator.
*/
static SQLITE_WSD struct sqlite3PrngType {
- unsigned char isInit; /* True if initialized */
- unsigned char i, j; /* State variables */
- unsigned char s[256]; /* State variables */
+ u32 s[16]; /* 64 bytes of chacha20 state */
+ u8 out[64]; /* Output bytes */
+ u8 n; /* Output bytes remaining */
} sqlite3Prng;
+
+/* The RFC-7539 ChaCha20 block function
+*/
+#define ROTL(a,b) (((a) << (b)) | ((a) >> (32 - (b))))
+#define QR(a, b, c, d) ( \
+ a += b, d ^= a, d = ROTL(d,16), \
+ c += d, b ^= c, b = ROTL(b,12), \
+ a += b, d ^= a, d = ROTL(d, 8), \
+ c += d, b ^= c, b = ROTL(b, 7))
+static void chacha_block(u32 *out, const u32 *in){
+ int i;
+ u32 x[16];
+ memcpy(x, in, 64);
+ for(i=0; i<10; i++){
+ QR(x[0], x[4], x[ 8], x[12]);
+ QR(x[1], x[5], x[ 9], x[13]);
+ QR(x[2], x[6], x[10], x[14]);
+ QR(x[3], x[7], x[11], x[15]);
+ QR(x[0], x[5], x[10], x[15]);
+ QR(x[1], x[6], x[11], x[12]);
+ QR(x[2], x[7], x[ 8], x[13]);
+ QR(x[3], x[4], x[ 9], x[14]);
+ }
+ for(i=0; i<16; i++) out[i] = x[i]+in[i];
+}
+
/*
** Return N random bytes.
*/
SQLITE_API void sqlite3_randomness(int N, void *pBuf){
- unsigned char t;
unsigned char *zBuf = pBuf;
/* The "wsdPrng" macro will resolve to the pseudo-random number generator
@@ -31699,53 +32610,46 @@ SQLITE_API void sqlite3_randomness(int N, void *pBuf){
sqlite3_mutex_enter(mutex);
if( N<=0 || pBuf==0 ){
- wsdPrng.isInit = 0;
+ wsdPrng.s[0] = 0;
sqlite3_mutex_leave(mutex);
return;
}
/* Initialize the state of the random number generator once,
- ** the first time this routine is called. The seed value does
- ** not need to contain a lot of randomness since we are not
- ** trying to do secure encryption or anything like that...
- **
- ** Nothing in this file or anywhere else in SQLite does any kind of
- ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random
- ** number generator) not as an encryption device.
+ ** the first time this routine is called.
*/
- if( !wsdPrng.isInit ){
+ if( wsdPrng.s[0]==0 ){
sqlite3_vfs *pVfs = sqlite3_vfs_find(0);
- int i;
- char k[256];
- wsdPrng.j = 0;
- wsdPrng.i = 0;
+ static const u32 chacha20_init[] = {
+ 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574
+ };
+ memcpy(&wsdPrng.s[0], chacha20_init, 16);
if( NEVER(pVfs==0) ){
- memset(k, 0, sizeof(k));
+ memset(&wsdPrng.s[4], 0, 44);
}else{
- sqlite3OsRandomness(pVfs, 256, k);
- }
- for(i=0; i<256; i++){
- wsdPrng.s[i] = (u8)i;
- }
- for(i=0; i<256; i++){
- wsdPrng.j += wsdPrng.s[i] + k[i];
- t = wsdPrng.s[wsdPrng.j];
- wsdPrng.s[wsdPrng.j] = wsdPrng.s[i];
- wsdPrng.s[i] = t;
+ sqlite3OsRandomness(pVfs, 44, (char*)&wsdPrng.s[4]);
}
- wsdPrng.isInit = 1;
+ wsdPrng.s[15] = wsdPrng.s[12];
+ wsdPrng.s[12] = 0;
+ wsdPrng.n = 0;
}
assert( N>0 );
- do{
- wsdPrng.i++;
- t = wsdPrng.s[wsdPrng.i];
- wsdPrng.j += t;
- wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j];
- wsdPrng.s[wsdPrng.j] = t;
- t += wsdPrng.s[wsdPrng.i];
- *(zBuf++) = wsdPrng.s[t];
- }while( --N );
+ while( 1 /* exit by break */ ){
+ if( N<=wsdPrng.n ){
+ memcpy(zBuf, &wsdPrng.out[wsdPrng.n-N], N);
+ wsdPrng.n -= N;
+ break;
+ }
+ if( wsdPrng.n>0 ){
+ memcpy(zBuf, wsdPrng.out, wsdPrng.n);
+ N -= wsdPrng.n;
+ zBuf += wsdPrng.n;
+ }
+ wsdPrng.s[12]++;
+ chacha_block((u32*)wsdPrng.out, wsdPrng.s);
+ wsdPrng.n = 64;
+ }
sqlite3_mutex_leave(mutex);
}
@@ -32785,7 +33689,7 @@ SQLITE_PRIVATE void sqlite3ErrorMsg(Parse *pParse, const char *zFormat, ...){
va_list ap;
sqlite3 *db = pParse->db;
assert( db!=0 );
- assert( db->pParse==pParse );
+ assert( db->pParse==pParse || db->pParse->pToplevel==pParse );
db->errByteOffset = -2;
va_start(ap, zFormat);
zMsg = sqlite3VMPrintf(db, zFormat, ap);
@@ -34598,53 +35502,53 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 0 */ "Savepoint" OpHelp(""),
/* 1 */ "AutoCommit" OpHelp(""),
/* 2 */ "Transaction" OpHelp(""),
- /* 3 */ "SorterNext" OpHelp(""),
- /* 4 */ "Prev" OpHelp(""),
- /* 5 */ "Next" OpHelp(""),
- /* 6 */ "Checkpoint" OpHelp(""),
- /* 7 */ "JournalMode" OpHelp(""),
- /* 8 */ "Vacuum" OpHelp(""),
- /* 9 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"),
- /* 10 */ "VUpdate" OpHelp("data=r[P3@P2]"),
- /* 11 */ "Goto" OpHelp(""),
- /* 12 */ "Gosub" OpHelp(""),
- /* 13 */ "InitCoroutine" OpHelp(""),
- /* 14 */ "Yield" OpHelp(""),
- /* 15 */ "MustBeInt" OpHelp(""),
- /* 16 */ "Jump" OpHelp(""),
- /* 17 */ "Once" OpHelp(""),
- /* 18 */ "If" OpHelp(""),
+ /* 3 */ "Checkpoint" OpHelp(""),
+ /* 4 */ "JournalMode" OpHelp(""),
+ /* 5 */ "Vacuum" OpHelp(""),
+ /* 6 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"),
+ /* 7 */ "VUpdate" OpHelp("data=r[P3@P2]"),
+ /* 8 */ "Init" OpHelp("Start at P2"),
+ /* 9 */ "Goto" OpHelp(""),
+ /* 10 */ "Gosub" OpHelp(""),
+ /* 11 */ "InitCoroutine" OpHelp(""),
+ /* 12 */ "Yield" OpHelp(""),
+ /* 13 */ "MustBeInt" OpHelp(""),
+ /* 14 */ "Jump" OpHelp(""),
+ /* 15 */ "Once" OpHelp(""),
+ /* 16 */ "If" OpHelp(""),
+ /* 17 */ "IfNot" OpHelp(""),
+ /* 18 */ "IsType" OpHelp("if typeof(P1.P3) in P5 goto P2"),
/* 19 */ "Not" OpHelp("r[P2]= !r[P1]"),
- /* 20 */ "IfNot" OpHelp(""),
- /* 21 */ "IsNullOrType" OpHelp("if typeof(r[P1]) IN (P3,5) goto P2"),
- /* 22 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"),
- /* 23 */ "SeekLT" OpHelp("key=r[P3@P4]"),
- /* 24 */ "SeekLE" OpHelp("key=r[P3@P4]"),
- /* 25 */ "SeekGE" OpHelp("key=r[P3@P4]"),
- /* 26 */ "SeekGT" OpHelp("key=r[P3@P4]"),
- /* 27 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"),
- /* 28 */ "IfNoHope" OpHelp("key=r[P3@P4]"),
- /* 29 */ "NoConflict" OpHelp("key=r[P3@P4]"),
- /* 30 */ "NotFound" OpHelp("key=r[P3@P4]"),
- /* 31 */ "Found" OpHelp("key=r[P3@P4]"),
- /* 32 */ "SeekRowid" OpHelp("intkey=r[P3]"),
- /* 33 */ "NotExists" OpHelp("intkey=r[P3]"),
- /* 34 */ "Last" OpHelp(""),
- /* 35 */ "IfSmaller" OpHelp(""),
- /* 36 */ "SorterSort" OpHelp(""),
- /* 37 */ "Sort" OpHelp(""),
- /* 38 */ "Rewind" OpHelp(""),
- /* 39 */ "IdxLE" OpHelp("key=r[P3@P4]"),
- /* 40 */ "IdxGT" OpHelp("key=r[P3@P4]"),
- /* 41 */ "IdxLT" OpHelp("key=r[P3@P4]"),
- /* 42 */ "IdxGE" OpHelp("key=r[P3@P4]"),
+ /* 20 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"),
+ /* 21 */ "SeekLT" OpHelp("key=r[P3@P4]"),
+ /* 22 */ "SeekLE" OpHelp("key=r[P3@P4]"),
+ /* 23 */ "SeekGE" OpHelp("key=r[P3@P4]"),
+ /* 24 */ "SeekGT" OpHelp("key=r[P3@P4]"),
+ /* 25 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"),
+ /* 26 */ "IfNoHope" OpHelp("key=r[P3@P4]"),
+ /* 27 */ "NoConflict" OpHelp("key=r[P3@P4]"),
+ /* 28 */ "NotFound" OpHelp("key=r[P3@P4]"),
+ /* 29 */ "Found" OpHelp("key=r[P3@P4]"),
+ /* 30 */ "SeekRowid" OpHelp("intkey=r[P3]"),
+ /* 31 */ "NotExists" OpHelp("intkey=r[P3]"),
+ /* 32 */ "Last" OpHelp(""),
+ /* 33 */ "IfSmaller" OpHelp(""),
+ /* 34 */ "SorterSort" OpHelp(""),
+ /* 35 */ "Sort" OpHelp(""),
+ /* 36 */ "Rewind" OpHelp(""),
+ /* 37 */ "SorterNext" OpHelp(""),
+ /* 38 */ "Prev" OpHelp(""),
+ /* 39 */ "Next" OpHelp(""),
+ /* 40 */ "IdxLE" OpHelp("key=r[P3@P4]"),
+ /* 41 */ "IdxGT" OpHelp("key=r[P3@P4]"),
+ /* 42 */ "IdxLT" OpHelp("key=r[P3@P4]"),
/* 43 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"),
/* 44 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"),
- /* 45 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
- /* 46 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
- /* 47 */ "Program" OpHelp(""),
- /* 48 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
- /* 49 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
+ /* 45 */ "IdxGE" OpHelp("key=r[P3@P4]"),
+ /* 46 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"),
+ /* 47 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"),
+ /* 48 */ "Program" OpHelp(""),
+ /* 49 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
/* 50 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"),
/* 51 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"),
/* 52 */ "Ne" OpHelp("IF r[P3]!=r[P1]"),
@@ -34654,12 +35558,12 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 56 */ "Lt" OpHelp("IF r[P3]=r[P1]"),
/* 58 */ "ElseEq" OpHelp(""),
- /* 59 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"),
- /* 60 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
- /* 61 */ "IncrVacuum" OpHelp(""),
- /* 62 */ "VNext" OpHelp(""),
- /* 63 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"),
- /* 64 */ "Init" OpHelp("Start at P2"),
+ /* 59 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
+ /* 60 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"),
+ /* 61 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
+ /* 62 */ "IncrVacuum" OpHelp(""),
+ /* 63 */ "VNext" OpHelp(""),
+ /* 64 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"),
/* 65 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"),
/* 66 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"),
/* 67 */ "Return" OpHelp(""),
@@ -34669,34 +35573,34 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 71 */ "Integer" OpHelp("r[P2]=P1"),
/* 72 */ "Int64" OpHelp("r[P2]=P4"),
/* 73 */ "String" OpHelp("r[P2]='P4' (len=P1)"),
- /* 74 */ "Null" OpHelp("r[P2..P3]=NULL"),
- /* 75 */ "SoftNull" OpHelp("r[P1]=NULL"),
- /* 76 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
- /* 77 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
- /* 78 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
- /* 79 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
- /* 80 */ "SCopy" OpHelp("r[P2]=r[P1]"),
- /* 81 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
- /* 82 */ "FkCheck" OpHelp(""),
- /* 83 */ "ResultRow" OpHelp("output=r[P1@P2]"),
- /* 84 */ "CollSeq" OpHelp(""),
- /* 85 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
- /* 86 */ "RealAffinity" OpHelp(""),
- /* 87 */ "Cast" OpHelp("affinity(r[P1])"),
- /* 88 */ "Permutation" OpHelp(""),
- /* 89 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
- /* 90 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"),
- /* 91 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"),
- /* 92 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"),
- /* 93 */ "Column" OpHelp("r[P3]=PX"),
- /* 94 */ "TypeCheck" OpHelp("typecheck(r[P1@P2])"),
- /* 95 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
- /* 96 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
- /* 97 */ "Count" OpHelp("r[P2]=count()"),
- /* 98 */ "ReadCookie" OpHelp(""),
- /* 99 */ "SetCookie" OpHelp(""),
- /* 100 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
- /* 101 */ "OpenRead" OpHelp("root=P2 iDb=P3"),
+ /* 74 */ "BeginSubrtn" OpHelp("r[P2]=NULL"),
+ /* 75 */ "Null" OpHelp("r[P2..P3]=NULL"),
+ /* 76 */ "SoftNull" OpHelp("r[P1]=NULL"),
+ /* 77 */ "Blob" OpHelp("r[P2]=P4 (len=P1)"),
+ /* 78 */ "Variable" OpHelp("r[P2]=parameter(P1,P4)"),
+ /* 79 */ "Move" OpHelp("r[P2@P3]=r[P1@P3]"),
+ /* 80 */ "Copy" OpHelp("r[P2@P3+1]=r[P1@P3+1]"),
+ /* 81 */ "SCopy" OpHelp("r[P2]=r[P1]"),
+ /* 82 */ "IntCopy" OpHelp("r[P2]=r[P1]"),
+ /* 83 */ "FkCheck" OpHelp(""),
+ /* 84 */ "ResultRow" OpHelp("output=r[P1@P2]"),
+ /* 85 */ "CollSeq" OpHelp(""),
+ /* 86 */ "AddImm" OpHelp("r[P1]=r[P1]+P2"),
+ /* 87 */ "RealAffinity" OpHelp(""),
+ /* 88 */ "Cast" OpHelp("affinity(r[P1])"),
+ /* 89 */ "Permutation" OpHelp(""),
+ /* 90 */ "Compare" OpHelp("r[P1@P3] <-> r[P2@P3]"),
+ /* 91 */ "IsTrue" OpHelp("r[P2] = coalesce(r[P1]==TRUE,P3) ^ P4"),
+ /* 92 */ "ZeroOrNull" OpHelp("r[P2] = 0 OR NULL"),
+ /* 93 */ "Offset" OpHelp("r[P3] = sqlite_offset(P1)"),
+ /* 94 */ "Column" OpHelp("r[P3]=PX cursor P1 column P2"),
+ /* 95 */ "TypeCheck" OpHelp("typecheck(r[P1@P2])"),
+ /* 96 */ "Affinity" OpHelp("affinity(r[P1@P2])"),
+ /* 97 */ "MakeRecord" OpHelp("r[P3]=mkrec(r[P1@P2])"),
+ /* 98 */ "Count" OpHelp("r[P2]=count()"),
+ /* 99 */ "ReadCookie" OpHelp(""),
+ /* 100 */ "SetCookie" OpHelp(""),
+ /* 101 */ "ReopenIdx" OpHelp("root=P2 iDb=P3"),
/* 102 */ "BitAnd" OpHelp("r[P3]=r[P1]&r[P2]"),
/* 103 */ "BitOr" OpHelp("r[P3]=r[P1]|r[P2]"),
/* 104 */ "ShiftLeft" OpHelp("r[P3]=r[P2]<0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
- /* 160 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"),
- /* 161 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 162 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 163 */ "AggValue" OpHelp("r[P3]=value N=P2"),
- /* 164 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
- /* 165 */ "Expire" OpHelp(""),
- /* 166 */ "CursorLock" OpHelp(""),
- /* 167 */ "CursorUnlock" OpHelp(""),
- /* 168 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
- /* 169 */ "VBegin" OpHelp(""),
- /* 170 */ "VCreate" OpHelp(""),
- /* 171 */ "VDestroy" OpHelp(""),
- /* 172 */ "VOpen" OpHelp(""),
- /* 173 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"),
- /* 174 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 175 */ "VRename" OpHelp(""),
- /* 176 */ "Pagecount" OpHelp(""),
- /* 177 */ "MaxPgcnt" OpHelp(""),
- /* 178 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"),
- /* 179 */ "Trace" OpHelp(""),
- /* 180 */ "CursorHint" OpHelp(""),
- /* 181 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
- /* 182 */ "Noop" OpHelp(""),
- /* 183 */ "Explain" OpHelp(""),
- /* 184 */ "Abortable" OpHelp(""),
+ /* 154 */ "DropTrigger" OpHelp(""),
+ /* 155 */ "IntegrityCk" OpHelp(""),
+ /* 156 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
+ /* 157 */ "Param" OpHelp(""),
+ /* 158 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
+ /* 159 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
+ /* 160 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
+ /* 161 */ "AggInverse" OpHelp("accum=r[P3] inverse(r[P2@P5])"),
+ /* 162 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 163 */ "AggStep1" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 164 */ "AggValue" OpHelp("r[P3]=value N=P2"),
+ /* 165 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
+ /* 166 */ "Expire" OpHelp(""),
+ /* 167 */ "CursorLock" OpHelp(""),
+ /* 168 */ "CursorUnlock" OpHelp(""),
+ /* 169 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
+ /* 170 */ "VBegin" OpHelp(""),
+ /* 171 */ "VCreate" OpHelp(""),
+ /* 172 */ "VDestroy" OpHelp(""),
+ /* 173 */ "VOpen" OpHelp(""),
+ /* 174 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"),
+ /* 175 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 176 */ "VRename" OpHelp(""),
+ /* 177 */ "Pagecount" OpHelp(""),
+ /* 178 */ "MaxPgcnt" OpHelp(""),
+ /* 179 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"),
+ /* 180 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"),
+ /* 181 */ "Trace" OpHelp(""),
+ /* 182 */ "CursorHint" OpHelp(""),
+ /* 183 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"),
+ /* 184 */ "Noop" OpHelp(""),
+ /* 185 */ "Explain" OpHelp(""),
+ /* 186 */ "Abortable" OpHelp(""),
};
return azName[i];
}
#endif
/************** End of opcodes.c *********************************************/
+/************** Begin file os_kv.c *******************************************/
+/*
+** 2022-09-06
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains an experimental VFS layer that operates on a
+** Key/Value storage engine where both keys and values must be pure
+** text.
+*/
+/* #include */
+#if SQLITE_OS_KV || (SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL))
+
+/*****************************************************************************
+** Debugging logic
+*/
+
+/* SQLITE_KV_TRACE() is used for tracing calls to kvstorage routines. */
+#if 0
+#define SQLITE_KV_TRACE(X) printf X
+#else
+#define SQLITE_KV_TRACE(X)
+#endif
+
+/* SQLITE_KV_LOG() is used for tracing calls to the VFS interface */
+#if 0
+#define SQLITE_KV_LOG(X) printf X
+#else
+#define SQLITE_KV_LOG(X)
+#endif
+
+
+/*
+** Forward declaration of objects used by this VFS implementation
+*/
+typedef struct KVVfsFile KVVfsFile;
+
+/* A single open file. There are only two files represented by this
+** VFS - the database and the rollback journal.
+*/
+struct KVVfsFile {
+ sqlite3_file base; /* IO methods */
+ const char *zClass; /* Storage class */
+ int isJournal; /* True if this is a journal file */
+ unsigned int nJrnl; /* Space allocated for aJrnl[] */
+ char *aJrnl; /* Journal content */
+ int szPage; /* Last known page size */
+ sqlite3_int64 szDb; /* Database file size. -1 means unknown */
+};
+
+/*
+** Methods for KVVfsFile
+*/
+static int kvvfsClose(sqlite3_file*);
+static int kvvfsReadDb(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
+static int kvvfsReadJrnl(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst);
+static int kvvfsWriteDb(sqlite3_file*,const void*,int iAmt, sqlite3_int64);
+static int kvvfsWriteJrnl(sqlite3_file*,const void*,int iAmt, sqlite3_int64);
+static int kvvfsTruncateDb(sqlite3_file*, sqlite3_int64 size);
+static int kvvfsTruncateJrnl(sqlite3_file*, sqlite3_int64 size);
+static int kvvfsSyncDb(sqlite3_file*, int flags);
+static int kvvfsSyncJrnl(sqlite3_file*, int flags);
+static int kvvfsFileSizeDb(sqlite3_file*, sqlite3_int64 *pSize);
+static int kvvfsFileSizeJrnl(sqlite3_file*, sqlite3_int64 *pSize);
+static int kvvfsLock(sqlite3_file*, int);
+static int kvvfsUnlock(sqlite3_file*, int);
+static int kvvfsCheckReservedLock(sqlite3_file*, int *pResOut);
+static int kvvfsFileControlDb(sqlite3_file*, int op, void *pArg);
+static int kvvfsFileControlJrnl(sqlite3_file*, int op, void *pArg);
+static int kvvfsSectorSize(sqlite3_file*);
+static int kvvfsDeviceCharacteristics(sqlite3_file*);
+
+/*
+** Methods for sqlite3_vfs
+*/
+static int kvvfsOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *);
+static int kvvfsDelete(sqlite3_vfs*, const char *zName, int syncDir);
+static int kvvfsAccess(sqlite3_vfs*, const char *zName, int flags, int *);
+static int kvvfsFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut);
+static void *kvvfsDlOpen(sqlite3_vfs*, const char *zFilename);
+static int kvvfsRandomness(sqlite3_vfs*, int nByte, char *zOut);
+static int kvvfsSleep(sqlite3_vfs*, int microseconds);
+static int kvvfsCurrentTime(sqlite3_vfs*, double*);
+static int kvvfsCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*);
+
+static sqlite3_vfs sqlite3OsKvvfsObject = {
+ 1, /* iVersion */
+ sizeof(KVVfsFile), /* szOsFile */
+ 1024, /* mxPathname */
+ 0, /* pNext */
+ "kvvfs", /* zName */
+ 0, /* pAppData */
+ kvvfsOpen, /* xOpen */
+ kvvfsDelete, /* xDelete */
+ kvvfsAccess, /* xAccess */
+ kvvfsFullPathname, /* xFullPathname */
+ kvvfsDlOpen, /* xDlOpen */
+ 0, /* xDlError */
+ 0, /* xDlSym */
+ 0, /* xDlClose */
+ kvvfsRandomness, /* xRandomness */
+ kvvfsSleep, /* xSleep */
+ kvvfsCurrentTime, /* xCurrentTime */
+ 0, /* xGetLastError */
+ kvvfsCurrentTimeInt64 /* xCurrentTimeInt64 */
+};
+
+/* Methods for sqlite3_file objects referencing a database file
+*/
+static sqlite3_io_methods kvvfs_db_io_methods = {
+ 1, /* iVersion */
+ kvvfsClose, /* xClose */
+ kvvfsReadDb, /* xRead */
+ kvvfsWriteDb, /* xWrite */
+ kvvfsTruncateDb, /* xTruncate */
+ kvvfsSyncDb, /* xSync */
+ kvvfsFileSizeDb, /* xFileSize */
+ kvvfsLock, /* xLock */
+ kvvfsUnlock, /* xUnlock */
+ kvvfsCheckReservedLock, /* xCheckReservedLock */
+ kvvfsFileControlDb, /* xFileControl */
+ kvvfsSectorSize, /* xSectorSize */
+ kvvfsDeviceCharacteristics, /* xDeviceCharacteristics */
+ 0, /* xShmMap */
+ 0, /* xShmLock */
+ 0, /* xShmBarrier */
+ 0, /* xShmUnmap */
+ 0, /* xFetch */
+ 0 /* xUnfetch */
+};
+
+/* Methods for sqlite3_file objects referencing a rollback journal
+*/
+static sqlite3_io_methods kvvfs_jrnl_io_methods = {
+ 1, /* iVersion */
+ kvvfsClose, /* xClose */
+ kvvfsReadJrnl, /* xRead */
+ kvvfsWriteJrnl, /* xWrite */
+ kvvfsTruncateJrnl, /* xTruncate */
+ kvvfsSyncJrnl, /* xSync */
+ kvvfsFileSizeJrnl, /* xFileSize */
+ kvvfsLock, /* xLock */
+ kvvfsUnlock, /* xUnlock */
+ kvvfsCheckReservedLock, /* xCheckReservedLock */
+ kvvfsFileControlJrnl, /* xFileControl */
+ kvvfsSectorSize, /* xSectorSize */
+ kvvfsDeviceCharacteristics, /* xDeviceCharacteristics */
+ 0, /* xShmMap */
+ 0, /* xShmLock */
+ 0, /* xShmBarrier */
+ 0, /* xShmUnmap */
+ 0, /* xFetch */
+ 0 /* xUnfetch */
+};
+
+/****** Storage subsystem **************************************************/
+#include
+#include
+#include
+
+/* Forward declarations for the low-level storage engine
+*/
+static int kvstorageWrite(const char*, const char *zKey, const char *zData);
+static int kvstorageDelete(const char*, const char *zKey);
+static int kvstorageRead(const char*, const char *zKey, char *zBuf, int nBuf);
+#define KVSTORAGE_KEY_SZ 32
+
+/* Expand the key name with an appropriate prefix and put the result
+** zKeyOut[]. The zKeyOut[] buffer is assumed to hold at least
+** KVSTORAGE_KEY_SZ bytes.
+*/
+static void kvstorageMakeKey(
+ const char *zClass,
+ const char *zKeyIn,
+ char *zKeyOut
+){
+ sqlite3_snprintf(KVSTORAGE_KEY_SZ, zKeyOut, "kvvfs-%s-%s", zClass, zKeyIn);
+}
+
+/* Write content into a key. zClass is the particular namespace of the
+** underlying key/value store to use - either "local" or "session".
+**
+** Both zKey and zData are zero-terminated pure text strings.
+**
+** Return the number of errors.
+*/
+static int kvstorageWrite(
+ const char *zClass,
+ const char *zKey,
+ const char *zData
+){
+ FILE *fd;
+ char zXKey[KVSTORAGE_KEY_SZ];
+ kvstorageMakeKey(zClass, zKey, zXKey);
+ fd = fopen(zXKey, "wb");
+ if( fd ){
+ SQLITE_KV_TRACE(("KVVFS-WRITE %-15s (%d) %.50s%s\n", zXKey,
+ (int)strlen(zData), zData,
+ strlen(zData)>50 ? "..." : ""));
+ fputs(zData, fd);
+ fclose(fd);
+ return 0;
+ }else{
+ return 1;
+ }
+}
+
+/* Delete a key (with its corresponding data) from the key/value
+** namespace given by zClass. If the key does not previously exist,
+** this routine is a no-op.
+*/
+static int kvstorageDelete(const char *zClass, const char *zKey){
+ char zXKey[KVSTORAGE_KEY_SZ];
+ kvstorageMakeKey(zClass, zKey, zXKey);
+ unlink(zXKey);
+ SQLITE_KV_TRACE(("KVVFS-DELETE %-15s\n", zXKey));
+ return 0;
+}
+
+/* Read the value associated with a zKey from the key/value namespace given
+** by zClass and put the text data associated with that key in the first
+** nBuf bytes of zBuf[]. The value might be truncated if zBuf is not large
+** enough to hold it all. The value put into zBuf must always be zero
+** terminated, even if it gets truncated because nBuf is not large enough.
+**
+** Return the total number of bytes in the data, without truncation, and
+** not counting the final zero terminator. Return -1 if the key does
+** not exist.
+**
+** If nBuf<=0 then this routine simply returns the size of the data without
+** actually reading it.
+*/
+static int kvstorageRead(
+ const char *zClass,
+ const char *zKey,
+ char *zBuf,
+ int nBuf
+){
+ FILE *fd;
+ struct stat buf;
+ char zXKey[KVSTORAGE_KEY_SZ];
+ kvstorageMakeKey(zClass, zKey, zXKey);
+ if( access(zXKey, R_OK)!=0
+ || stat(zXKey, &buf)!=0
+ || !S_ISREG(buf.st_mode)
+ ){
+ SQLITE_KV_TRACE(("KVVFS-READ %-15s (-1)\n", zXKey));
+ return -1;
+ }
+ if( nBuf<=0 ){
+ return (int)buf.st_size;
+ }else if( nBuf==1 ){
+ zBuf[0] = 0;
+ SQLITE_KV_TRACE(("KVVFS-READ %-15s (%d)\n", zXKey,
+ (int)buf.st_size));
+ return (int)buf.st_size;
+ }
+ if( nBuf > buf.st_size + 1 ){
+ nBuf = buf.st_size + 1;
+ }
+ fd = fopen(zXKey, "rb");
+ if( fd==0 ){
+ SQLITE_KV_TRACE(("KVVFS-READ %-15s (-1)\n", zXKey));
+ return -1;
+ }else{
+ sqlite3_int64 n = fread(zBuf, 1, nBuf-1, fd);
+ fclose(fd);
+ zBuf[n] = 0;
+ SQLITE_KV_TRACE(("KVVFS-READ %-15s (%lld) %.50s%s\n", zXKey,
+ n, zBuf, n>50 ? "..." : ""));
+ return (int)n;
+ }
+}
+
+/*
+** An internal level of indirection which enables us to replace the
+** kvvfs i/o methods with JavaScript implementations in WASM builds.
+** Maintenance reminder: if this struct changes in any way, the JSON
+** rendering of its structure must be updated in
+** sqlite3_wasm_enum_json(). There are no binary compatibility
+** concerns, so it does not need an iVersion member. This file is
+** necessarily always compiled together with sqlite3_wasm_enum_json(),
+** and JS code dynamically creates the mapping of members based on
+** that JSON description.
+*/
+typedef struct sqlite3_kvvfs_methods sqlite3_kvvfs_methods;
+struct sqlite3_kvvfs_methods {
+ int (*xRead)(const char *zClass, const char *zKey, char *zBuf, int nBuf);
+ int (*xWrite)(const char *zClass, const char *zKey, const char *zData);
+ int (*xDelete)(const char *zClass, const char *zKey);
+ const int nKeySize;
+};
+
+/*
+** This object holds the kvvfs I/O methods which may be swapped out
+** for JavaScript-side implementations in WASM builds. In such builds
+** it cannot be const, but in native builds it should be so that
+** the compiler can hopefully optimize this level of indirection out.
+** That said, kvvfs is intended primarily for use in WASM builds.
+**
+** Note that this is not explicitly flagged as static because the
+** amalgamation build will tag it with SQLITE_PRIVATE.
+*/
+#ifndef SQLITE_WASM
+const
+#endif
+SQLITE_PRIVATE sqlite3_kvvfs_methods sqlite3KvvfsMethods = {
+kvstorageRead,
+kvstorageWrite,
+kvstorageDelete,
+KVSTORAGE_KEY_SZ
+};
+
+/****** Utility subroutines ************************************************/
+
+/*
+** Encode binary into the text encoded used to persist on disk.
+** The output text is stored in aOut[], which must be at least
+** nData+1 bytes in length.
+**
+** Return the actual length of the encoded text, not counting the
+** zero terminator at the end.
+**
+** Encoding format
+** ---------------
+**
+** * Non-zero bytes are encoded as upper-case hexadecimal
+**
+** * A sequence of one or more zero-bytes that are not at the
+** beginning of the buffer are encoded as a little-endian
+** base-26 number using a..z. "a" means 0. "b" means 1,
+** "z" means 25. "ab" means 26. "ac" means 52. And so forth.
+**
+** * Because there is no overlap between the encoding characters
+** of hexadecimal and base-26 numbers, it is always clear where
+** one stops and the next begins.
+*/
+static int kvvfsEncode(const char *aData, int nData, char *aOut){
+ int i, j;
+ const unsigned char *a = (const unsigned char*)aData;
+ for(i=j=0; i>4];
+ aOut[j++] = "0123456789ABCDEF"[c&0xf];
+ }else{
+ /* A sequence of 1 or more zeros is stored as a little-endian
+ ** base-26 number using a..z as the digits. So one zero is "b".
+ ** Two zeros is "c". 25 zeros is "z", 26 zeros is "ab", 27 is "bb",
+ ** and so forth.
+ */
+ int k;
+ for(k=1; i+k0 ){
+ aOut[j++] = 'a'+(k%26);
+ k /= 26;
+ }
+ }
+ }
+ aOut[j] = 0;
+ return j;
+}
+
+static const signed char kvvfsHexValue[256] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+};
+
+/*
+** Decode the text encoding back to binary. The binary content is
+** written into pOut, which must be at least nOut bytes in length.
+**
+** The return value is the number of bytes actually written into aOut[].
+*/
+static int kvvfsDecode(const char *a, char *aOut, int nOut){
+ int i, j;
+ int c;
+ const unsigned char *aIn = (const unsigned char*)a;
+ i = 0;
+ j = 0;
+ while( 1 ){
+ c = kvvfsHexValue[aIn[i]];
+ if( c<0 ){
+ int n = 0;
+ int mult = 1;
+ c = aIn[i];
+ if( c==0 ) break;
+ while( c>='a' && c<='z' ){
+ n += (c - 'a')*mult;
+ mult *= 26;
+ c = aIn[++i];
+ }
+ if( j+n>nOut ) return -1;
+ memset(&aOut[j], 0, n);
+ j += n;
+ c = aIn[i];
+ if( c==0 ) break;
+ }else{
+ aOut[j] = c<<4;
+ c = kvvfsHexValue[aIn[++i]];
+ if( c<0 ) break;
+ aOut[j++] += c;
+ i++;
+ }
+ }
+ return j;
+}
+
+/*
+** Decode a complete journal file. Allocate space in pFile->aJrnl
+** and store the decoding there. Or leave pFile->aJrnl set to NULL
+** if an error is encountered.
+**
+** The first few characters of the text encoding will be a little-endian
+** base-26 number (digits a..z) that is the total number of bytes
+** in the decoded journal file image. This base-26 number is followed
+** by a single space, then the encoding of the journal. The space
+** separator is required to act as a terminator for the base-26 number.
+*/
+static void kvvfsDecodeJournal(
+ KVVfsFile *pFile, /* Store decoding in pFile->aJrnl */
+ const char *zTxt, /* Text encoding. Zero-terminated */
+ int nTxt /* Bytes in zTxt, excluding zero terminator */
+){
+ unsigned int n = 0;
+ int c, i, mult;
+ i = 0;
+ mult = 1;
+ while( (c = zTxt[i++])>='a' && c<='z' ){
+ n += (zTxt[i] - 'a')*mult;
+ mult *= 26;
+ }
+ sqlite3_free(pFile->aJrnl);
+ pFile->aJrnl = sqlite3_malloc64( n );
+ if( pFile->aJrnl==0 ){
+ pFile->nJrnl = 0;
+ return;
+ }
+ pFile->nJrnl = n;
+ n = kvvfsDecode(zTxt+i, pFile->aJrnl, pFile->nJrnl);
+ if( nnJrnl ){
+ sqlite3_free(pFile->aJrnl);
+ pFile->aJrnl = 0;
+ pFile->nJrnl = 0;
+ }
+}
+
+/*
+** Read or write the "sz" element, containing the database file size.
+*/
+static sqlite3_int64 kvvfsReadFileSize(KVVfsFile *pFile){
+ char zData[50];
+ zData[0] = 0;
+ sqlite3KvvfsMethods.xRead(pFile->zClass, "sz", zData, sizeof(zData)-1);
+ return strtoll(zData, 0, 0);
+}
+static int kvvfsWriteFileSize(KVVfsFile *pFile, sqlite3_int64 sz){
+ char zData[50];
+ sqlite3_snprintf(sizeof(zData), zData, "%lld", sz);
+ return sqlite3KvvfsMethods.xWrite(pFile->zClass, "sz", zData);
+}
+
+/****** sqlite3_io_methods methods ******************************************/
+
+/*
+** Close an kvvfs-file.
+*/
+static int kvvfsClose(sqlite3_file *pProtoFile){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+
+ SQLITE_KV_LOG(("xClose %s %s\n", pFile->zClass,
+ pFile->isJournal ? "journal" : "db"));
+ sqlite3_free(pFile->aJrnl);
+ return SQLITE_OK;
+}
+
+/*
+** Read from the -journal file.
+*/
+static int kvvfsReadJrnl(
+ sqlite3_file *pProtoFile,
+ void *zBuf,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ KVVfsFile *pFile = (KVVfsFile*)pProtoFile;
+ assert( pFile->isJournal );
+ SQLITE_KV_LOG(("xRead('%s-journal',%d,%lld)\n", pFile->zClass, iAmt, iOfst));
+ if( pFile->aJrnl==0 ){
+ int szTxt = kvstorageRead(pFile->zClass, "jrnl", 0, 0);
+ char *aTxt;
+ if( szTxt<=4 ){
+ return SQLITE_IOERR;
+ }
+ aTxt = sqlite3_malloc64( szTxt+1 );
+ if( aTxt==0 ) return SQLITE_NOMEM;
+ kvstorageRead(pFile->zClass, "jrnl", aTxt, szTxt+1);
+ kvvfsDecodeJournal(pFile, aTxt, szTxt);
+ sqlite3_free(aTxt);
+ if( pFile->aJrnl==0 ) return SQLITE_IOERR;
+ }
+ if( iOfst+iAmt>pFile->nJrnl ){
+ return SQLITE_IOERR_SHORT_READ;
+ }
+ memcpy(zBuf, pFile->aJrnl+iOfst, iAmt);
+ return SQLITE_OK;
+}
+
+/*
+** Read from the database file.
+*/
+static int kvvfsReadDb(
+ sqlite3_file *pProtoFile,
+ void *zBuf,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ KVVfsFile *pFile = (KVVfsFile*)pProtoFile;
+ unsigned int pgno;
+ int got, n;
+ char zKey[30];
+ char aData[133073];
+ assert( iOfst>=0 );
+ assert( iAmt>=0 );
+ SQLITE_KV_LOG(("xRead('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst));
+ if( iOfst+iAmt>=512 ){
+ if( (iOfst % iAmt)!=0 ){
+ return SQLITE_IOERR_READ;
+ }
+ if( (iAmt & (iAmt-1))!=0 || iAmt<512 || iAmt>65536 ){
+ return SQLITE_IOERR_READ;
+ }
+ pFile->szPage = iAmt;
+ pgno = 1 + iOfst/iAmt;
+ }else{
+ pgno = 1;
+ }
+ sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno);
+ got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, aData, sizeof(aData)-1);
+ if( got<0 ){
+ n = 0;
+ }else{
+ aData[got] = 0;
+ if( iOfst+iAmt<512 ){
+ int k = iOfst+iAmt;
+ aData[k*2] = 0;
+ n = kvvfsDecode(aData, &aData[2000], sizeof(aData)-2000);
+ if( n>=iOfst+iAmt ){
+ memcpy(zBuf, &aData[2000+iOfst], iAmt);
+ n = iAmt;
+ }else{
+ n = 0;
+ }
+ }else{
+ n = kvvfsDecode(aData, zBuf, iAmt);
+ }
+ }
+ if( nzClass, iAmt, iOfst));
+ if( iEnd>=0x10000000 ) return SQLITE_FULL;
+ if( pFile->aJrnl==0 || pFile->nJrnlaJrnl, iEnd);
+ if( aNew==0 ){
+ return SQLITE_IOERR_NOMEM;
+ }
+ pFile->aJrnl = aNew;
+ if( pFile->nJrnlaJrnl+pFile->nJrnl, 0, iOfst-pFile->nJrnl);
+ }
+ pFile->nJrnl = iEnd;
+ }
+ memcpy(pFile->aJrnl+iOfst, zBuf, iAmt);
+ return SQLITE_OK;
+}
+
+/*
+** Write into the database file.
+*/
+static int kvvfsWriteDb(
+ sqlite3_file *pProtoFile,
+ const void *zBuf,
+ int iAmt,
+ sqlite_int64 iOfst
+){
+ KVVfsFile *pFile = (KVVfsFile*)pProtoFile;
+ unsigned int pgno;
+ char zKey[30];
+ char aData[131073];
+ SQLITE_KV_LOG(("xWrite('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst));
+ assert( iAmt>=512 && iAmt<=65536 );
+ assert( (iAmt & (iAmt-1))==0 );
+ assert( pFile->szPage<0 || pFile->szPage==iAmt );
+ pFile->szPage = iAmt;
+ pgno = 1 + iOfst/iAmt;
+ sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno);
+ kvvfsEncode(zBuf, iAmt, aData);
+ if( sqlite3KvvfsMethods.xWrite(pFile->zClass, zKey, aData) ){
+ return SQLITE_IOERR;
+ }
+ if( iOfst+iAmt > pFile->szDb ){
+ pFile->szDb = iOfst + iAmt;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Truncate an kvvfs-file.
+*/
+static int kvvfsTruncateJrnl(sqlite3_file *pProtoFile, sqlite_int64 size){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ SQLITE_KV_LOG(("xTruncate('%s-journal',%lld)\n", pFile->zClass, size));
+ assert( size==0 );
+ sqlite3KvvfsMethods.xDelete(pFile->zClass, "jrnl");
+ sqlite3_free(pFile->aJrnl);
+ pFile->aJrnl = 0;
+ pFile->nJrnl = 0;
+ return SQLITE_OK;
+}
+static int kvvfsTruncateDb(sqlite3_file *pProtoFile, sqlite_int64 size){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ if( pFile->szDb>size
+ && pFile->szPage>0
+ && (size % pFile->szPage)==0
+ ){
+ char zKey[50];
+ unsigned int pgno, pgnoMax;
+ SQLITE_KV_LOG(("xTruncate('%s-db',%lld)\n", pFile->zClass, size));
+ pgno = 1 + size/pFile->szPage;
+ pgnoMax = 2 + pFile->szDb/pFile->szPage;
+ while( pgno<=pgnoMax ){
+ sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno);
+ sqlite3KvvfsMethods.xDelete(pFile->zClass, zKey);
+ pgno++;
+ }
+ pFile->szDb = size;
+ return kvvfsWriteFileSize(pFile, size) ? SQLITE_IOERR : SQLITE_OK;
+ }
+ return SQLITE_IOERR;
+}
+
+/*
+** Sync an kvvfs-file.
+*/
+static int kvvfsSyncJrnl(sqlite3_file *pProtoFile, int flags){
+ int i, n;
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ char *zOut;
+ SQLITE_KV_LOG(("xSync('%s-journal')\n", pFile->zClass));
+ if( pFile->nJrnl<=0 ){
+ return kvvfsTruncateJrnl(pProtoFile, 0);
+ }
+ zOut = sqlite3_malloc64( pFile->nJrnl*2 + 50 );
+ if( zOut==0 ){
+ return SQLITE_IOERR_NOMEM;
+ }
+ n = pFile->nJrnl;
+ i = 0;
+ do{
+ zOut[i++] = 'a' + (n%26);
+ n /= 26;
+ }while( n>0 );
+ zOut[i++] = ' ';
+ kvvfsEncode(pFile->aJrnl, pFile->nJrnl, &zOut[i]);
+ i = sqlite3KvvfsMethods.xWrite(pFile->zClass, "jrnl", zOut);
+ sqlite3_free(zOut);
+ return i ? SQLITE_IOERR : SQLITE_OK;
+}
+static int kvvfsSyncDb(sqlite3_file *pProtoFile, int flags){
+ return SQLITE_OK;
+}
+
+/*
+** Return the current file-size of an kvvfs-file.
+*/
+static int kvvfsFileSizeJrnl(sqlite3_file *pProtoFile, sqlite_int64 *pSize){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ SQLITE_KV_LOG(("xFileSize('%s-journal')\n", pFile->zClass));
+ *pSize = pFile->nJrnl;
+ return SQLITE_OK;
+}
+static int kvvfsFileSizeDb(sqlite3_file *pProtoFile, sqlite_int64 *pSize){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ SQLITE_KV_LOG(("xFileSize('%s-db')\n", pFile->zClass));
+ if( pFile->szDb>=0 ){
+ *pSize = pFile->szDb;
+ }else{
+ *pSize = kvvfsReadFileSize(pFile);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Lock an kvvfs-file.
+*/
+static int kvvfsLock(sqlite3_file *pProtoFile, int eLock){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ assert( !pFile->isJournal );
+ SQLITE_KV_LOG(("xLock(%s,%d)\n", pFile->zClass, eLock));
+
+ if( eLock!=SQLITE_LOCK_NONE ){
+ pFile->szDb = kvvfsReadFileSize(pFile);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Unlock an kvvfs-file.
+*/
+static int kvvfsUnlock(sqlite3_file *pProtoFile, int eLock){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ assert( !pFile->isJournal );
+ SQLITE_KV_LOG(("xUnlock(%s,%d)\n", pFile->zClass, eLock));
+ if( eLock==SQLITE_LOCK_NONE ){
+ pFile->szDb = -1;
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Check if another file-handle holds a RESERVED lock on an kvvfs-file.
+*/
+static int kvvfsCheckReservedLock(sqlite3_file *pProtoFile, int *pResOut){
+ SQLITE_KV_LOG(("xCheckReservedLock\n"));
+ *pResOut = 0;
+ return SQLITE_OK;
+}
+
+/*
+** File control method. For custom operations on an kvvfs-file.
+*/
+static int kvvfsFileControlJrnl(sqlite3_file *pProtoFile, int op, void *pArg){
+ SQLITE_KV_LOG(("xFileControl(%d) on journal\n", op));
+ return SQLITE_NOTFOUND;
+}
+static int kvvfsFileControlDb(sqlite3_file *pProtoFile, int op, void *pArg){
+ SQLITE_KV_LOG(("xFileControl(%d) on database\n", op));
+ if( op==SQLITE_FCNTL_SYNC ){
+ KVVfsFile *pFile = (KVVfsFile *)pProtoFile;
+ int rc = SQLITE_OK;
+ SQLITE_KV_LOG(("xSync('%s-db')\n", pFile->zClass));
+ if( pFile->szDb>0 && 0!=kvvfsWriteFileSize(pFile, pFile->szDb) ){
+ rc = SQLITE_IOERR;
+ }
+ return rc;
+ }
+ return SQLITE_NOTFOUND;
+}
+
+/*
+** Return the sector-size in bytes for an kvvfs-file.
+*/
+static int kvvfsSectorSize(sqlite3_file *pFile){
+ return 512;
+}
+
+/*
+** Return the device characteristic flags supported by an kvvfs-file.
+*/
+static int kvvfsDeviceCharacteristics(sqlite3_file *pProtoFile){
+ return 0;
+}
+
+/****** sqlite3_vfs methods *************************************************/
+
+/*
+** Open an kvvfs file handle.
+*/
+static int kvvfsOpen(
+ sqlite3_vfs *pProtoVfs,
+ const char *zName,
+ sqlite3_file *pProtoFile,
+ int flags,
+ int *pOutFlags
+){
+ KVVfsFile *pFile = (KVVfsFile*)pProtoFile;
+ if( zName==0 ) zName = "";
+ SQLITE_KV_LOG(("xOpen(\"%s\")\n", zName));
+ if( strcmp(zName, "local")==0
+ || strcmp(zName, "session")==0
+ ){
+ pFile->isJournal = 0;
+ pFile->base.pMethods = &kvvfs_db_io_methods;
+ }else
+ if( strcmp(zName, "local-journal")==0
+ || strcmp(zName, "session-journal")==0
+ ){
+ pFile->isJournal = 1;
+ pFile->base.pMethods = &kvvfs_jrnl_io_methods;
+ }else{
+ return SQLITE_CANTOPEN;
+ }
+ if( zName[0]=='s' ){
+ pFile->zClass = "session";
+ }else{
+ pFile->zClass = "local";
+ }
+ pFile->aJrnl = 0;
+ pFile->nJrnl = 0;
+ pFile->szPage = -1;
+ pFile->szDb = -1;
+ return SQLITE_OK;
+}
+
+/*
+** Delete the file located at zPath. If the dirSync argument is true,
+** ensure the file-system modifications are synced to disk before
+** returning.
+*/
+static int kvvfsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){
+ if( strcmp(zPath, "local-journal")==0 ){
+ sqlite3KvvfsMethods.xDelete("local", "jrnl");
+ }else
+ if( strcmp(zPath, "session-journal")==0 ){
+ sqlite3KvvfsMethods.xDelete("session", "jrnl");
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Test for access permissions. Return true if the requested permission
+** is available, or false otherwise.
+*/
+static int kvvfsAccess(
+ sqlite3_vfs *pProtoVfs,
+ const char *zPath,
+ int flags,
+ int *pResOut
+){
+ SQLITE_KV_LOG(("xAccess(\"%s\")\n", zPath));
+ if( strcmp(zPath, "local-journal")==0 ){
+ *pResOut = sqlite3KvvfsMethods.xRead("local", "jrnl", 0, 0)>0;
+ }else
+ if( strcmp(zPath, "session-journal")==0 ){
+ *pResOut = sqlite3KvvfsMethods.xRead("session", "jrnl", 0, 0)>0;
+ }else
+ if( strcmp(zPath, "local")==0 ){
+ *pResOut = sqlite3KvvfsMethods.xRead("local", "sz", 0, 0)>0;
+ }else
+ if( strcmp(zPath, "session")==0 ){
+ *pResOut = sqlite3KvvfsMethods.xRead("session", "sz", 0, 0)>0;
+ }else
+ {
+ *pResOut = 0;
+ }
+ SQLITE_KV_LOG(("xAccess returns %d\n",*pResOut));
+ return SQLITE_OK;
+}
+
+/*
+** Populate buffer zOut with the full canonical pathname corresponding
+** to the pathname in zPath. zOut is guaranteed to point to a buffer
+** of at least (INST_MAX_PATHNAME+1) bytes.
+*/
+static int kvvfsFullPathname(
+ sqlite3_vfs *pVfs,
+ const char *zPath,
+ int nOut,
+ char *zOut
+){
+ size_t nPath;
+#ifdef SQLITE_OS_KV_ALWAYS_LOCAL
+ zPath = "local";
+#endif
+ nPath = strlen(zPath);
+ SQLITE_KV_LOG(("xFullPathname(\"%s\")\n", zPath));
+ if( nOut
+static int kvvfsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){
+ static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000;
+ struct timeval sNow;
+ (void)gettimeofday(&sNow, 0); /* Cannot fail given valid arguments */
+ *pTimeOut = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000;
+ return SQLITE_OK;
+}
+#endif /* SQLITE_OS_KV || SQLITE_OS_UNIX */
+
+#if SQLITE_OS_KV
+/*
+** This routine is called initialize the KV-vfs as the default VFS.
+*/
+SQLITE_API int sqlite3_os_init(void){
+ return sqlite3_vfs_register(&sqlite3OsKvvfsObject, 1);
+}
+SQLITE_API int sqlite3_os_end(void){
+ return SQLITE_OK;
+}
+#endif /* SQLITE_OS_KV */
+
+#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)
+SQLITE_PRIVATE int sqlite3KvvfsInit(void){
+ return sqlite3_vfs_register(&sqlite3OsKvvfsObject, 0);
+}
+#endif
+
+/************** End of os_kv.c ***********************************************/
/************** Begin file os_unix.c *****************************************/
/*
** 2004 May 22
@@ -34876,13 +36757,13 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/*
** standard include files.
*/
-#include
-#include
+#include /* amalgamator: keep */
+#include /* amalgamator: keep */
#include
#include
-#include
+#include /* amalgamator: keep */
/* #include */
-#include
+#include /* amalgamator: keep */
#include
#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
# include
@@ -35475,6 +37356,9 @@ static int robust_open(const char *z, int f, mode_t m){
break;
}
if( fd>=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break;
+ if( (f & (O_EXCL|O_CREAT))==(O_EXCL|O_CREAT) ){
+ (void)osUnlink(z);
+ }
osClose(fd);
sqlite3_log(SQLITE_WARNING,
"attempt to open \"%s\" as file descriptor %d", z, fd);
@@ -40644,6 +42528,7 @@ static const char *unixTempFileDir(void){
static int unixGetTempname(int nBuf, char *zBuf){
const char *zDir;
int iLimit = 0;
+ int rc = SQLITE_OK;
/* It's odd to simulate an io-error here, but really this is just
** using the io-error infrastructure to test that SQLite handles this
@@ -40652,18 +42537,26 @@ static int unixGetTempname(int nBuf, char *zBuf){
zBuf[0] = 0;
SimulateIOError( return SQLITE_IOERR );
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
zDir = unixTempFileDir();
- if( zDir==0 ) return SQLITE_IOERR_GETTEMPPATH;
- do{
- u64 r;
- sqlite3_randomness(sizeof(r), &r);
- assert( nBuf>2 );
- zBuf[nBuf-2] = 0;
- sqlite3_snprintf(nBuf, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX"%llx%c",
- zDir, r, 0);
- if( zBuf[nBuf-2]!=0 || (iLimit++)>10 ) return SQLITE_ERROR;
- }while( osAccess(zBuf,0)==0 );
- return SQLITE_OK;
+ if( zDir==0 ){
+ rc = SQLITE_IOERR_GETTEMPPATH;
+ }else{
+ do{
+ u64 r;
+ sqlite3_randomness(sizeof(r), &r);
+ assert( nBuf>2 );
+ zBuf[nBuf-2] = 0;
+ sqlite3_snprintf(nBuf, zBuf, "%s/"SQLITE_TEMP_FILE_PREFIX"%llx%c",
+ zDir, r, 0);
+ if( zBuf[nBuf-2]!=0 || (iLimit++)>10 ){
+ rc = SQLITE_ERROR;
+ break;
+ }
+ }while( osAccess(zBuf,0)==0 );
+ }
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
+ return rc;
}
#if SQLITE_ENABLE_LOCKING_STYLE && defined(__APPLE__)
@@ -40806,20 +42699,23 @@ static int findCreateFileMode(
**
** where NN is a decimal number. The NN naming schemes are
** used by the test_multiplex.c module.
+ **
+ ** In normal operation, the journal file name will always contain
+ ** a '-' character. However in 8+3 filename mode, or if a corrupt
+ ** rollback journal specifies a super-journal with a goofy name, then
+ ** the '-' might be missing or the '-' might be the first character in
+ ** the filename. In that case, just return SQLITE_OK with *pMode==0.
*/
nDb = sqlite3Strlen30(zPath) - 1;
- while( zPath[nDb]!='-' ){
- /* In normal operation, the journal file name will always contain
- ** a '-' character. However in 8+3 filename mode, or if a corrupt
- ** rollback journal specifies a super-journal with a goofy name, then
- ** the '-' might be missing. */
- if( nDb==0 || zPath[nDb]=='.' ) return SQLITE_OK;
+ while( nDb>0 && zPath[nDb]!='.' ){
+ if( zPath[nDb]=='-' ){
+ memcpy(zDb, zPath, nDb);
+ zDb[nDb] = '\0';
+ rc = getFileMode(zDb, pMode, pUid, pGid);
+ break;
+ }
nDb--;
}
- memcpy(zDb, zPath, nDb);
- zDb[nDb] = '\0';
-
- rc = getFileMode(zDb, pMode, pUid, pGid);
}else if( flags & SQLITE_OPEN_DELETEONCLOSE ){
*pMode = 0600;
}else if( flags & SQLITE_OPEN_URI ){
@@ -41209,86 +43105,99 @@ static int unixAccess(
}
/*
-** If the last component of the pathname in z[0]..z[j-1] is something
-** other than ".." then back it out and return true. If the last
-** component is empty or if it is ".." then return false.
+** A pathname under construction
*/
-static int unixBackupDir(const char *z, int *pJ){
- int j = *pJ;
- int i;
- if( j<=0 ) return 0;
- for(i=j-1; i>0 && z[i-1]!='/'; i--){}
- if( i==0 ) return 0;
- if( z[i]=='.' && i==j-2 && z[i+1]=='.' ) return 0;
- *pJ = i-1;
- return 1;
-}
+typedef struct DbPath DbPath;
+struct DbPath {
+ int rc; /* Non-zero following any error */
+ int nSymlink; /* Number of symlinks resolved */
+ char *zOut; /* Write the pathname here */
+ int nOut; /* Bytes of space available to zOut[] */
+ int nUsed; /* Bytes of zOut[] currently being used */
+};
+
+/* Forward reference */
+static void appendAllPathElements(DbPath*,const char*);
/*
-** Convert a relative pathname into a full pathname. Also
-** simplify the pathname as follows:
-**
-** Remove all instances of /./
-** Remove all isntances of /X/../ for any X
+** Append a single path element to the DbPath under construction
*/
-static int mkFullPathname(
- const char *zPath, /* Input path */
- char *zOut, /* Output buffer */
- int nOut /* Allocated size of buffer zOut */
+static void appendOnePathElement(
+ DbPath *pPath, /* Path under construction, to which to append zName */
+ const char *zName, /* Name to append to pPath. Not zero-terminated */
+ int nName /* Number of significant bytes in zName */
){
- int nPath = sqlite3Strlen30(zPath);
- int iOff = 0;
- int i, j;
- if( zPath[0]!='/' ){
- if( osGetcwd(zOut, nOut-2)==0 ){
- return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath);
+ assert( nName>0 );
+ assert( zName!=0 );
+ if( zName[0]=='.' ){
+ if( nName==1 ) return;
+ if( zName[1]=='.' && nName==2 ){
+ if( pPath->nUsed<=1 ){
+ pPath->rc = SQLITE_ERROR;
+ return;
+ }
+ assert( pPath->zOut[0]=='/' );
+ while( pPath->zOut[--pPath->nUsed]!='/' ){}
+ return;
}
- iOff = sqlite3Strlen30(zOut);
- zOut[iOff++] = '/';
}
- if( (iOff+nPath+1)>nOut ){
- /* SQLite assumes that xFullPathname() nul-terminates the output buffer
- ** even if it returns an error. */
- zOut[iOff] = '\0';
- return SQLITE_CANTOPEN_BKPT;
- }
- sqlite3_snprintf(nOut-iOff, &zOut[iOff], "%s", zPath);
-
- /* Remove duplicate '/' characters. Except, two // at the beginning
- ** of a pathname is allowed since this is important on windows. */
- for(i=j=1; zOut[i]; i++){
- zOut[j++] = zOut[i];
- while( zOut[i]=='/' && zOut[i+1]=='/' ) i++;
+ if( pPath->nUsed + nName + 2 >= pPath->nOut ){
+ pPath->rc = SQLITE_ERROR;
+ return;
}
- zOut[j] = 0;
-
- assert( zOut[0]=='/' );
- for(i=j=0; zOut[i]; i++){
- if( zOut[i]=='/' ){
- /* Skip over internal "/." directory components */
- if( zOut[i+1]=='.' && zOut[i+2]=='/' ){
- i += 1;
- continue;
+ pPath->zOut[pPath->nUsed++] = '/';
+ memcpy(&pPath->zOut[pPath->nUsed], zName, nName);
+ pPath->nUsed += nName;
+#if defined(HAVE_READLINK) && defined(HAVE_LSTAT)
+ if( pPath->rc==SQLITE_OK ){
+ const char *zIn;
+ struct stat buf;
+ pPath->zOut[pPath->nUsed] = 0;
+ zIn = pPath->zOut;
+ if( osLstat(zIn, &buf)!=0 ){
+ if( errno!=ENOENT ){
+ pPath->rc = unixLogError(SQLITE_CANTOPEN_BKPT, "lstat", zIn);
}
-
- /* If this is a "/.." directory component then back out the
- ** previous term of the directory if it is something other than "..".
- */
- if( zOut[i+1]=='.'
- && zOut[i+2]=='.'
- && zOut[i+3]=='/'
- && unixBackupDir(zOut, &j)
- ){
- i += 2;
- continue;
+ }else if( S_ISLNK(buf.st_mode) ){
+ ssize_t got;
+ char zLnk[SQLITE_MAX_PATHLEN+2];
+ if( pPath->nSymlink++ > SQLITE_MAX_SYMLINK ){
+ pPath->rc = SQLITE_CANTOPEN_BKPT;
+ return;
+ }
+ got = osReadlink(zIn, zLnk, sizeof(zLnk)-2);
+ if( got<=0 || got>=(ssize_t)sizeof(zLnk)-2 ){
+ pPath->rc = unixLogError(SQLITE_CANTOPEN_BKPT, "readlink", zIn);
+ return;
}
+ zLnk[got] = 0;
+ if( zLnk[0]=='/' ){
+ pPath->nUsed = 0;
+ }else{
+ pPath->nUsed -= nName + 1;
+ }
+ appendAllPathElements(pPath, zLnk);
}
- if( ALWAYS(j>=0) ) zOut[j] = zOut[i];
- j++;
}
- if( NEVER(j==0) ) zOut[j++] = '/';
- zOut[j] = 0;
- return SQLITE_OK;
+#endif
+}
+
+/*
+** Append all path elements in zPath to the DbPath under construction.
+*/
+static void appendAllPathElements(
+ DbPath *pPath, /* Path under construction, to which to append zName */
+ const char *zPath /* Path to append to pPath. Is zero-terminated */
+){
+ int i = 0;
+ int j = 0;
+ do{
+ while( zPath[i] && zPath[i]!='/' ){ i++; }
+ if( i>j ){
+ appendOnePathElement(pPath, &zPath[j], i-j);
+ }
+ j = i+1;
+ }while( zPath[i++] );
}
/*
@@ -41306,86 +43215,27 @@ static int unixFullPathname(
int nOut, /* Size of output buffer in bytes */
char *zOut /* Output buffer */
){
-#if !defined(HAVE_READLINK) || !defined(HAVE_LSTAT)
- return mkFullPathname(zPath, zOut, nOut);
-#else
- int rc = SQLITE_OK;
- int nByte;
- int nLink = 0; /* Number of symbolic links followed so far */
- const char *zIn = zPath; /* Input path for each iteration of loop */
- char *zDel = 0;
-
- assert( pVfs->mxPathname==MAX_PATHNAME );
+ DbPath path;
UNUSED_PARAMETER(pVfs);
-
- /* It's odd to simulate an io-error here, but really this is just
- ** using the io-error infrastructure to test that SQLite handles this
- ** function failing. This function could fail if, for example, the
- ** current working directory has been unlinked.
- */
- SimulateIOError( return SQLITE_ERROR );
-
- do {
-
- /* Call stat() on path zIn. Set bLink to true if the path is a symbolic
- ** link, or false otherwise. */
- int bLink = 0;
- struct stat buf;
- if( osLstat(zIn, &buf)!=0 ){
- if( errno!=ENOENT ){
- rc = unixLogError(SQLITE_CANTOPEN_BKPT, "lstat", zIn);
- }
- }else{
- bLink = S_ISLNK(buf.st_mode);
- }
-
- if( bLink ){
- nLink++;
- if( zDel==0 ){
- zDel = sqlite3_malloc(nOut);
- if( zDel==0 ) rc = SQLITE_NOMEM_BKPT;
- }else if( nLink>=SQLITE_MAX_SYMLINKS ){
- rc = SQLITE_CANTOPEN_BKPT;
- }
-
- if( rc==SQLITE_OK ){
- nByte = osReadlink(zIn, zDel, nOut-1);
- if( nByte<0 ){
- rc = unixLogError(SQLITE_CANTOPEN_BKPT, "readlink", zIn);
- }else{
- if( zDel[0]!='/' ){
- int n;
- for(n = sqlite3Strlen30(zIn); n>0 && zIn[n-1]!='/'; n--);
- if( nByte+n+1>nOut ){
- rc = SQLITE_CANTOPEN_BKPT;
- }else{
- memmove(&zDel[n], zDel, nByte+1);
- memcpy(zDel, zIn, n);
- nByte += n;
- }
- }
- zDel[nByte] = '\0';
- }
- }
-
- zIn = zDel;
- }
-
- assert( rc!=SQLITE_OK || zIn!=zOut || zIn[0]=='/' );
- if( rc==SQLITE_OK && zIn!=zOut ){
- rc = mkFullPathname(zIn, zOut, nOut);
+ path.rc = 0;
+ path.nUsed = 0;
+ path.nSymlink = 0;
+ path.nOut = nOut;
+ path.zOut = zOut;
+ if( zPath[0]!='/' ){
+ char zPwd[SQLITE_MAX_PATHLEN+2];
+ if( osGetcwd(zPwd, sizeof(zPwd)-2)==0 ){
+ return unixLogError(SQLITE_CANTOPEN_BKPT, "getcwd", zPath);
}
- if( bLink==0 ) break;
- zIn = zOut;
- }while( rc==SQLITE_OK );
-
- sqlite3_free(zDel);
- if( rc==SQLITE_OK && nLink ) rc = SQLITE_OK_SYMLINK;
- return rc;
-#endif /* HAVE_READLINK && HAVE_LSTAT */
+ appendAllPathElements(&path, zPwd);
+ }
+ appendAllPathElements(&path, zPath);
+ zOut[path.nUsed] = 0;
+ if( path.rc || path.nUsed<2 ) return SQLITE_CANTOPEN_BKPT;
+ if( path.nSymlink ) return SQLITE_OK_SYMLINK;
+ return SQLITE_OK;
}
-
#ifndef SQLITE_OMIT_LOAD_EXTENSION
/*
** Interfaces for opening a shared library, finding entry points
@@ -42881,8 +44731,16 @@ SQLITE_API int sqlite3_os_init(void){
/* Register all VFSes defined in the aVfs[] array */
for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){
+#ifdef SQLITE_DEFAULT_UNIX_VFS
+ sqlite3_vfs_register(&aVfs[i],
+ 0==strcmp(aVfs[i].zName,SQLITE_DEFAULT_UNIX_VFS));
+#else
sqlite3_vfs_register(&aVfs[i], i==0);
+#endif
}
+#ifdef SQLITE_OS_KV_OPTIONAL
+ sqlite3KvvfsInit();
+#endif
unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1);
#ifndef SQLITE_OMIT_WAL
@@ -44845,10 +46703,12 @@ SQLITE_API int sqlite3_win32_set_directory8(
const char *zValue /* New value for directory being set or reset */
){
char **ppDirectory = 0;
+ int rc;
#ifndef SQLITE_OMIT_AUTOINIT
- int rc = sqlite3_initialize();
+ rc = sqlite3_initialize();
if( rc ) return rc;
#endif
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( type==SQLITE_WIN32_DATA_DIRECTORY_TYPE ){
ppDirectory = &sqlite3_data_directory;
}else if( type==SQLITE_WIN32_TEMP_DIRECTORY_TYPE ){
@@ -44863,14 +46723,19 @@ SQLITE_API int sqlite3_win32_set_directory8(
if( zValue && zValue[0] ){
zCopy = sqlite3_mprintf("%s", zValue);
if ( zCopy==0 ){
- return SQLITE_NOMEM_BKPT;
+ rc = SQLITE_NOMEM_BKPT;
+ goto set_directory8_done;
}
}
sqlite3_free(*ppDirectory);
*ppDirectory = zCopy;
- return SQLITE_OK;
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_ERROR;
}
- return SQLITE_ERROR;
+set_directory8_done:
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
+ return rc;
}
/*
@@ -47644,6 +49509,19 @@ static int winMakeEndInDirSep(int nBuf, char *zBuf){
return 0;
}
+/*
+** If sqlite3_temp_directory is defined, take the mutex and return true.
+**
+** If sqlite3_temp_directory is NULL (undefined), omit the mutex and
+** return false.
+*/
+static int winTempDirDefined(void){
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
+ if( sqlite3_temp_directory!=0 ) return 1;
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
+ return 0;
+}
+
/*
** Create a temporary file name and store the resulting pointer into pzBuf.
** The pointer returned in pzBuf must be freed via sqlite3_free().
@@ -47680,20 +49558,23 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){
*/
nDir = nMax - (nPre + 15);
assert( nDir>0 );
- if( sqlite3_temp_directory ){
+ if( winTempDirDefined() ){
int nDirLen = sqlite3Strlen30(sqlite3_temp_directory);
if( nDirLen>0 ){
if( !winIsDirSep(sqlite3_temp_directory[nDirLen-1]) ){
nDirLen++;
}
if( nDirLen>nDir ){
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
sqlite3_free(zBuf);
OSTRACE(("TEMP-FILENAME rc=SQLITE_ERROR\n"));
return winLogError(SQLITE_ERROR, 0, "winGetTempname1", 0);
}
sqlite3_snprintf(nMax, zBuf, "%s", sqlite3_temp_directory);
}
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
}
+
#if defined(__CYGWIN__)
else{
static const char *azDirs[] = {
@@ -48482,7 +50363,7 @@ static BOOL winIsVerbatimPathname(
** pathname into zOut[]. zOut[] will be at least pVfs->mxPathname
** bytes in size.
*/
-static int winFullPathname(
+static int winFullPathnameNoMutex(
sqlite3_vfs *pVfs, /* Pointer to vfs object */
const char *zRelative, /* Possibly relative input path */
int nFull, /* Size of output buffer in bytes */
@@ -48661,6 +50542,20 @@ static int winFullPathname(
}
#endif
}
+static int winFullPathname(
+ sqlite3_vfs *pVfs, /* Pointer to vfs object */
+ const char *zRelative, /* Possibly relative input path */
+ int nFull, /* Size of output buffer in bytes */
+ char *zFull /* Output buffer */
+){
+ int rc;
+ MUTEX_LOGIC( sqlite3_mutex *pMutex; )
+ MUTEX_LOGIC( pMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR); )
+ sqlite3_mutex_enter(pMutex);
+ rc = winFullPathnameNoMutex(pVfs, zRelative, nFull, zFull);
+ sqlite3_mutex_leave(pMutex);
+ return rc;
+}
#ifndef SQLITE_OMIT_LOAD_EXTENSION
/*
@@ -49197,6 +51092,7 @@ static int memdbTruncate(sqlite3_file*, sqlite3_int64 size);
static int memdbSync(sqlite3_file*, int flags);
static int memdbFileSize(sqlite3_file*, sqlite3_int64 *pSize);
static int memdbLock(sqlite3_file*, int);
+static int memdbUnlock(sqlite3_file*, int);
/* static int memdbCheckReservedLock(sqlite3_file*, int *pResOut);// not used */
static int memdbFileControl(sqlite3_file*, int op, void *pArg);
/* static int memdbSectorSize(sqlite3_file*); // not used */
@@ -49255,7 +51151,7 @@ static const sqlite3_io_methods memdb_io_methods = {
memdbSync, /* xSync */
memdbFileSize, /* xFileSize */
memdbLock, /* xLock */
- memdbLock, /* xUnlock - same as xLock in this case */
+ memdbUnlock, /* xUnlock */
0, /* memdbCheckReservedLock, */ /* xCheckReservedLock */
memdbFileControl, /* xFileControl */
0, /* memdbSectorSize,*/ /* xSectorSize */
@@ -49456,39 +51352,81 @@ static int memdbLock(sqlite3_file *pFile, int eLock){
MemFile *pThis = (MemFile*)pFile;
MemStore *p = pThis->pStore;
int rc = SQLITE_OK;
- if( eLock==pThis->eLock ) return SQLITE_OK;
+ if( eLock<=pThis->eLock ) return SQLITE_OK;
memdbEnter(p);
- if( eLock>SQLITE_LOCK_SHARED ){
- if( p->mFlags & SQLITE_DESERIALIZE_READONLY ){
- rc = SQLITE_READONLY;
- }else if( pThis->eLock<=SQLITE_LOCK_SHARED ){
- if( p->nWrLock ){
- rc = SQLITE_BUSY;
- }else{
- p->nWrLock = 1;
+
+ assert( p->nWrLock==0 || p->nWrLock==1 );
+ assert( pThis->eLock<=SQLITE_LOCK_SHARED || p->nWrLock==1 );
+ assert( pThis->eLock==SQLITE_LOCK_NONE || p->nRdLock>=1 );
+
+ if( eLock>SQLITE_LOCK_SHARED && (p->mFlags & SQLITE_DESERIALIZE_READONLY) ){
+ rc = SQLITE_READONLY;
+ }else{
+ switch( eLock ){
+ case SQLITE_LOCK_SHARED: {
+ assert( pThis->eLock==SQLITE_LOCK_NONE );
+ if( p->nWrLock>0 ){
+ rc = SQLITE_BUSY;
+ }else{
+ p->nRdLock++;
+ }
+ break;
+ };
+
+ case SQLITE_LOCK_RESERVED:
+ case SQLITE_LOCK_PENDING: {
+ assert( pThis->eLock>=SQLITE_LOCK_SHARED );
+ if( ALWAYS(pThis->eLock==SQLITE_LOCK_SHARED) ){
+ if( p->nWrLock>0 ){
+ rc = SQLITE_BUSY;
+ }else{
+ p->nWrLock = 1;
+ }
+ }
+ break;
+ }
+
+ default: {
+ assert( eLock==SQLITE_LOCK_EXCLUSIVE );
+ assert( pThis->eLock>=SQLITE_LOCK_SHARED );
+ if( p->nRdLock>1 ){
+ rc = SQLITE_BUSY;
+ }else if( pThis->eLock==SQLITE_LOCK_SHARED ){
+ p->nWrLock = 1;
+ }
+ break;
}
}
- }else if( eLock==SQLITE_LOCK_SHARED ){
- if( pThis->eLock > SQLITE_LOCK_SHARED ){
- assert( p->nWrLock==1 );
- p->nWrLock = 0;
- }else if( p->nWrLock ){
- rc = SQLITE_BUSY;
- }else{
- p->nRdLock++;
+ }
+ if( rc==SQLITE_OK ) pThis->eLock = eLock;
+ memdbLeave(p);
+ return rc;
+}
+
+/*
+** Unlock an memdb-file.
+*/
+static int memdbUnlock(sqlite3_file *pFile, int eLock){
+ MemFile *pThis = (MemFile*)pFile;
+ MemStore *p = pThis->pStore;
+ if( eLock>=pThis->eLock ) return SQLITE_OK;
+ memdbEnter(p);
+
+ assert( eLock==SQLITE_LOCK_SHARED || eLock==SQLITE_LOCK_NONE );
+ if( eLock==SQLITE_LOCK_SHARED ){
+ if( ALWAYS(pThis->eLock>SQLITE_LOCK_SHARED) ){
+ p->nWrLock--;
}
}else{
- assert( eLock==SQLITE_LOCK_NONE );
if( pThis->eLock>SQLITE_LOCK_SHARED ){
- assert( p->nWrLock==1 );
- p->nWrLock = 0;
+ p->nWrLock--;
}
- assert( p->nRdLock>0 );
p->nRdLock--;
}
- if( rc==SQLITE_OK ) pThis->eLock = eLock;
+
+ pThis->eLock = eLock;
memdbLeave(p);
- return rc;
+ return SQLITE_OK;
}
#if 0
@@ -49598,7 +51536,7 @@ static int memdbOpen(
memset(pFile, 0, sizeof(*pFile));
szName = sqlite3Strlen30(zName);
- if( szName>1 && zName[0]=='/' ){
+ if( szName>1 && (zName[0]=='/' || zName[0]=='\\') ){
int i;
#ifndef SQLITE_MUTEX_OMIT
sqlite3_mutex *pVfsMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1);
@@ -49945,6 +51883,13 @@ end_deserialize:
return rc;
}
+/*
+** Return true if the VFS is the memvfs.
+*/
+//SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs *pVfs){
+// return pVfs==&memdb_vfs;
+//}
+
/*
** This routine is called when the extension is loaded.
** Register the new VFS.
@@ -50449,12 +52394,20 @@ struct PCache {
int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */
int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */
# define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;}
- void pcacheDump(PCache *pCache){
- int N;
- int i, j;
- sqlite3_pcache_page *pLower;
+ static void pcachePageTrace(int i, sqlite3_pcache_page *pLower){
PgHdr *pPg;
unsigned char *a;
+ int j;
+ pPg = (PgHdr*)pLower->pExtra;
+ printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags);
+ a = (unsigned char *)pLower->pBuf;
+ for(j=0; j<12; j++) printf("%02x", a[j]);
+ printf(" ptr %p\n", pPg);
+ }
+ static void pcacheDump(PCache *pCache){
+ int N;
+ int i;
+ sqlite3_pcache_page *pLower;
if( sqlite3PcacheTrace<2 ) return;
if( pCache->pCache==0 ) return;
@@ -50463,21 +52416,32 @@ struct PCache {
for(i=1; i<=N; i++){
pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0);
if( pLower==0 ) continue;
- pPg = (PgHdr*)pLower->pExtra;
- printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags);
- a = (unsigned char *)pLower->pBuf;
- for(j=0; j<12; j++) printf("%02x", a[j]);
- printf("\n");
- if( pPg->pPage==0 ){
+ pcachePageTrace(i, pLower);
+ if( ((PgHdr*)pLower)->pPage==0 ){
sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0);
}
}
}
- #else
+#else
# define pcacheTrace(X)
+# define pcachePageTrace(PGNO, X)
# define pcacheDump(X)
#endif
+/*
+** Return 1 if pPg is on the dirty list for pCache. Return 0 if not.
+** This routine runs inside of assert() statements only.
+*/
+#ifdef SQLITE_DEBUG
+static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){
+ PgHdr *p;
+ for(p=pCache->pDirty; p; p=p->pDirtyNext){
+ if( p==pPg ) return 1;
+ }
+ return 0;
+}
+#endif
+
/*
** Check invariants on a PgHdr entry. Return true if everything is OK.
** Return false if any invariant is violated.
@@ -50496,8 +52460,13 @@ SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){
assert( pCache!=0 ); /* Every page has an associated PCache */
if( pPg->flags & PGHDR_CLEAN ){
assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */
- assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */
- assert( pCache->pDirtyTail!=pPg );
+ assert( !pageOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirty list */
+ }else{
+ assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */
+ assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg );
+ assert( pPg->pDirtyPrev==0 || pPg->pDirtyPrev->pDirtyNext==pPg );
+ assert( pPg->pDirtyPrev!=0 || pCache->pDirty==pPg );
+ assert( pageOnDirtyList(pCache, pPg) );
}
/* WRITEABLE pages must also be DIRTY */
if( pPg->flags & PGHDR_WRITEABLE ){
@@ -50771,8 +52740,9 @@ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(
assert( createFlag==0 || pCache->eCreate==eCreate );
assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) );
pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate);
- pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno,
+ pcacheTrace(("%p.FETCH %d%s (result: %p) ",pCache,pgno,
createFlag?" create":"",pRes));
+ pcachePageTrace(pgno, pRes);
return pRes;
}
@@ -50900,6 +52870,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){
pcacheUnpin(p);
}else{
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
+ assert( sqlite3PcachePageSanity(p) );
}
}
}
@@ -50934,8 +52905,7 @@ SQLITE_PRIVATE void sqlite3PcacheDrop(PgHdr *p){
** make it so.
*/
SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
- assert( p->nRef>0 || p->pCache->bPurgeable==0 );
- testcase( p->nRef==0 );
+ assert( p->nRef>0 );
assert( sqlite3PcachePageSanity(p) );
if( p->flags & (PGHDR_CLEAN|PGHDR_DONT_WRITE) ){ /*OPTIMIZATION-IF-FALSE*/
p->flags &= ~PGHDR_DONT_WRITE;
@@ -50944,6 +52914,7 @@ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){
pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno));
assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY );
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD);
+ assert( sqlite3PcachePageSanity(p) );
}
assert( sqlite3PcachePageSanity(p) );
}
@@ -51006,14 +52977,24 @@ SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *pCache){
*/
SQLITE_PRIVATE void sqlite3PcacheMove(PgHdr *p, Pgno newPgno){
PCache *pCache = p->pCache;
+ sqlite3_pcache_page *pOther;
assert( p->nRef>0 );
assert( newPgno>0 );
assert( sqlite3PcachePageSanity(p) );
pcacheTrace(("%p.MOVE %d -> %d\n",pCache,p->pgno,newPgno));
+ pOther = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, newPgno, 0);
+ if( pOther ){
+ PgHdr *pXPage = (PgHdr*)pOther->pExtra;
+ assert( pXPage->nRef==0 );
+ pXPage->nRef++;
+ pCache->nRefSum++;
+ sqlite3PcacheDrop(pXPage);
+ }
sqlite3GlobalConfig.pcache2.xRekey(pCache->pCache, p->pPage, p->pgno,newPgno);
p->pgno = newPgno;
if( (p->flags&PGHDR_DIRTY) && (p->flags&PGHDR_NEED_SYNC) ){
pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT);
+ assert( sqlite3PcachePageSanity(p) );
}
}
@@ -51311,12 +53292,13 @@ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHd
** size can vary according to architecture, compile-time options, and
** SQLite library version number.
**
-** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained
-** using a separate memory allocation from the database page content. This
-** seeks to overcome the "clownshoe" problem (also called "internal
-** fragmentation" in academic literature) of allocating a few bytes more
-** than a power of two with the memory allocator rounding up to the next
-** power of two, and leaving the rounded-up space unused.
+** Historical note: It used to be that if the SQLITE_PCACHE_SEPARATE_HEADER
+** was defined, then the page content would be held in a separate memory
+** allocation from the PgHdr1. This was intended to avoid clownshoe memory
+** allocations. However, the btree layer needs a small (16-byte) overrun
+** area after the page content buffer. The header serves as that overrun
+** area. Therefore SQLITE_PCACHE_SEPARATE_HEADER was discontinued to avoid
+** any possibility of a memory error.
**
** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates
** with this module. Information is passed back and forth as PgHdr1 pointers.
@@ -51361,30 +53343,40 @@ typedef struct PGroup PGroup;
/*
** Each cache entry is represented by an instance of the following
-** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of
-** PgHdr1.pCache->szPage bytes is allocated directly before this structure
-** in memory.
+** structure. A buffer of PgHdr1.pCache->szPage bytes is allocated
+** directly before this structure and is used to cache the page content.
**
-** Note: Variables isBulkLocal and isAnchor were once type "u8". That works,
+** When reading a corrupt database file, it is possible that SQLite might
+** read a few bytes (no more than 16 bytes) past the end of the page buffer.
+** It will only read past the end of the page buffer, never write. This
+** object is positioned immediately after the page buffer to serve as an
+** overrun area, so that overreads are harmless.
+**
+** Variables isBulkLocal and isAnchor were once type "u8". That works,
** but causes a 2-byte gap in the structure for most architectures (since
** pointers must be either 4 or 8-byte aligned). As this structure is located
** in memory directly after the associated page data, if the database is
** corrupt, code at the b-tree layer may overread the page buffer and
** read part of this structure before the corruption is detected. This
** can cause a valgrind error if the unitialized gap is accessed. Using u16
-** ensures there is no such gap, and therefore no bytes of unitialized memory
-** in the structure.
+** ensures there is no such gap, and therefore no bytes of uninitialized
+** memory in the structure.
+**
+** The pLruNext and pLruPrev pointers form a double-linked circular list
+** of all pages that are unpinned. The PGroup.lru element (which should be
+** the only element on the list with PgHdr1.isAnchor set to 1) forms the
+** beginning and the end of the list.
*/
struct PgHdr1 {
- sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */
- unsigned int iKey; /* Key value (page number) */
- u16 isBulkLocal; /* This page from bulk local storage */
- u16 isAnchor; /* This is the PGroup.lru element */
- PgHdr1 *pNext; /* Next in hash table chain */
- PCache1 *pCache; /* Cache that currently owns this page */
- PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */
- PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */
- /* NB: pLruPrev is only valid if pLruNext!=0 */
+ sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */
+ unsigned int iKey; /* Key value (page number) */
+ u16 isBulkLocal; /* This page from bulk local storage */
+ u16 isAnchor; /* This is the PGroup.lru element */
+ PgHdr1 *pNext; /* Next in hash table chain */
+ PCache1 *pCache; /* Cache that currently owns this page */
+ PgHdr1 *pLruNext; /* Next in circular LRU list of unpinned pages */
+ PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */
+ /* NB: pLruPrev is only valid if pLruNext!=0 */
};
/*
@@ -51710,25 +53702,13 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){
pcache1LeaveMutex(pCache->pGroup);
#endif
if( benignMalloc ){ sqlite3BeginBenignMalloc(); }
-#ifdef SQLITE_PCACHE_SEPARATE_HEADER
- pPg = pcache1Alloc(pCache->szPage);
- p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra);
- if( !pPg || !p ){
- pcache1Free(pPg);
- sqlite3_free(p);
- pPg = 0;
- }
-#else
pPg = pcache1Alloc(pCache->szAlloc);
-#endif
if( benignMalloc ){ sqlite3EndBenignMalloc(); }
#ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT
pcache1EnterMutex(pCache->pGroup);
#endif
if( pPg==0 ) return 0;
-#ifndef SQLITE_PCACHE_SEPARATE_HEADER
p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage];
-#endif
p->page.pBuf = pPg;
p->page.pExtra = &p[1];
p->isBulkLocal = 0;
@@ -51752,9 +53732,6 @@ static void pcache1FreePage(PgHdr1 *p){
pCache->pFree = p;
}else{
pcache1Free(p->page.pBuf);
-#ifdef SQLITE_PCACHE_SEPARATE_HEADER
- sqlite3_free(p);
-#endif
}
(*pCache->pnPurgeable)--;
}
@@ -52395,23 +54372,26 @@ static void pcache1Rekey(
PCache1 *pCache = (PCache1 *)p;
PgHdr1 *pPage = (PgHdr1 *)pPg;
PgHdr1 **pp;
- unsigned int h;
+ unsigned int hOld, hNew;
assert( pPage->iKey==iOld );
assert( pPage->pCache==pCache );
+ assert( iOld!=iNew ); /* The page number really is changing */
pcache1EnterMutex(pCache->pGroup);
- h = iOld%pCache->nHash;
- pp = &pCache->apHash[h];
+ assert( pcache1FetchNoMutex(p, iOld, 0)==pPage ); /* pPg really is iOld */
+ hOld = iOld%pCache->nHash;
+ pp = &pCache->apHash[hOld];
while( (*pp)!=pPage ){
pp = &(*pp)->pNext;
}
*pp = pPage->pNext;
- h = iNew%pCache->nHash;
+ assert( pcache1FetchNoMutex(p, iNew, 0)==0 ); /* iNew not in cache */
+ hNew = iNew%pCache->nHash;
pPage->iKey = iNew;
- pPage->pNext = pCache->apHash[h];
- pCache->apHash[h] = pPage;
+ pPage->pNext = pCache->apHash[hNew];
+ pCache->apHash[hNew] = pPage;
if( iNew>pCache->iMaxKey ){
pCache->iMaxKey = iNew;
}
@@ -52518,9 +54498,6 @@ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){
&& p->isAnchor==0
){
nFree += pcache1MemSize(p->page.pBuf);
-#ifdef SQLITE_PCACHE_SEPARATE_HEADER
- nFree += sqlite3MemSize(p);
-#endif
assert( PAGE_IS_UNPINNED(p) );
pcache1PinPage(p);
pcache1RemoveFromHash(p, 1);
@@ -53904,6 +55881,7 @@ struct Pager {
u32 vfsFlags; /* Flags for sqlite3_vfs.xOpen() */
u32 sectorSize; /* Assumed sector size during rollback */
Pgno mxPgno; /* Maximum allowed size of the database */
+ Pgno lckPgno; /* Page number for the locking page */
i64 pageSize; /* Number of bytes in a page */
i64 journalSizeLimit; /* Size limit for persistent journal files */
char *zFilename; /* Name of the database file */
@@ -54890,7 +56868,7 @@ static int readJournalHdr(
** journal file descriptor is advanced to the next sector boundary before
** anything is written. The format is:
**
-** + 4 bytes: PAGER_MJ_PGNO.
+** + 4 bytes: PAGER_SJ_PGNO.
** + N bytes: super-journal filename in utf-8.
** + 4 bytes: N (length of super-journal name in bytes, no nul-terminator).
** + 4 bytes: super-journal name checksum.
@@ -54938,7 +56916,7 @@ static int writeSuperJournal(Pager *pPager, const char *zSuper){
/* Write the super-journal data to the end of the journal file. If
** an error occurs, return the error code to the caller.
*/
- if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_MJ_PGNO(pPager))))
+ if( (0 != (rc = write32bits(pPager->jfd, iHdrOff, PAGER_SJ_PGNO(pPager))))
|| (0 != (rc = sqlite3OsWrite(pPager->jfd, zSuper, nSuper, iHdrOff+4)))
|| (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nSuper, nSuper)))
|| (0 != (rc = write32bits(pPager->jfd, iHdrOff+4+nSuper+4, cksum)))
@@ -55448,7 +57426,7 @@ static u32 pager_cksum(Pager *pPager, const u8 *aData){
** corrupted, SQLITE_DONE is returned. Data is considered corrupted in
** two circumstances:
**
-** * If the record page-number is illegal (0 or PAGER_MJ_PGNO), or
+** * If the record page-number is illegal (0 or PAGER_SJ_PGNO), or
** * If the record is being rolled back from the main journal file
** and the checksum field does not match the record content.
**
@@ -55508,7 +57486,7 @@ static int pager_playback_one_page(
** it could cause invalid data to be written into the journal. We need to
** detect this invalid data (with high probability) and ignore it.
*/
- if( pgno==0 || pgno==PAGER_MJ_PGNO(pPager) ){
+ if( pgno==0 || pgno==PAGER_SJ_PGNO(pPager) ){
assert( !isSavepnt );
return SQLITE_DONE;
}
@@ -55845,6 +57823,7 @@ static int pager_truncate(Pager *pPager, Pgno nPage){
memset(pTmp, 0, szPage);
testcase( (newSize-szPage) == currentSize );
testcase( (newSize-szPage) > currentSize );
+ sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_SIZE_HINT, &newSize);
rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, newSize-szPage);
}
if( rc==SQLITE_OK ){
@@ -56966,6 +58945,7 @@ SQLITE_PRIVATE int sqlite3PagerSetPagesize(Pager *pPager, u32 *pPageSize, int nR
pPager->pTmpSpace = pNew;
pPager->dbSize = (Pgno)((nByte+pageSize-1)/pageSize);
pPager->pageSize = pageSize;
+ pPager->lckPgno = (Pgno)(PENDING_BYTE/pageSize) + 1;
}else{
sqlite3PageFree(pNew);
}
@@ -58735,7 +60715,7 @@ static int getPageNormal(
if( pPg->pPager && !noContent ){
/* In this case the pcache already contains an initialized copy of
** the page. Return without further ado. */
- assert( pgno!=PAGER_MJ_PGNO(pPager) );
+ assert( pgno!=PAGER_SJ_PGNO(pPager) );
pPager->aStat[PAGER_STAT_HIT]++;
return SQLITE_OK;
@@ -58746,7 +60726,7 @@ static int getPageNormal(
** (*) obsolete. Was: maximum page number is 2^31
** (2) Never try to fetch the locking page
*/
- if( pgno==PAGER_MJ_PGNO(pPager) ){
+ if( pgno==PAGER_SJ_PGNO(pPager) ){
rc = SQLITE_CORRUPT_BKPT;
goto pager_acquire_err;
}
@@ -59006,6 +60986,7 @@ static int pager_open_journal(Pager *pPager){
if( pPager->tempFile ){
flags |= (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL);
+ flags |= SQLITE_OPEN_EXCLUSIVE;
nSpill = sqlite3Config.nStmtSpill;
}else{
flags |= SQLITE_OPEN_MAIN_JOURNAL;
@@ -59041,6 +61022,7 @@ static int pager_open_journal(Pager *pPager){
if( rc!=SQLITE_OK ){
sqlite3BitvecDestroy(pPager->pInJournal);
pPager->pInJournal = 0;
+ pPager->journalOff = 0;
}else{
assert( pPager->eState==PAGER_WRITER_LOCKED );
pPager->eState = PAGER_WRITER_CACHEMOD;
@@ -59145,7 +61127,7 @@ static SQLITE_NOINLINE int pagerAddPageToRollbackJournal(PgHdr *pPg){
/* We should never write to the journal file the page that
** contains the database locks. The following assert verifies
** that we do not. */
- assert( pPg->pgno!=PAGER_MJ_PGNO(pPager) );
+ assert( pPg->pgno!=PAGER_SJ_PGNO(pPager) );
assert( pPager->journalHdr<=pPager->journalOff );
pData2 = pPg->pData;
@@ -59324,7 +61306,7 @@ static SQLITE_NOINLINE int pagerWriteLargeSector(PgHdr *pPg){
Pgno pg = pg1+ii;
PgHdr *pPage;
if( pg==pPg->pgno || !sqlite3BitvecTest(pPager->pInJournal, pg) ){
- if( pg!=PAGER_MJ_PGNO(pPager) ){
+ if( pg!=PAGER_SJ_PGNO(pPager) ){
rc = sqlite3PagerGet(pPager, pg, &pPage, 0);
if( rc==SQLITE_OK ){
rc = pager_write(pPage);
@@ -59802,7 +61784,7 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne(
** last page is never written out to disk, leaving the database file
** undersized. Fix this now if it is the case. */
if( pPager->dbSize>pPager->dbFileSize ){
- Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_MJ_PGNO(pPager));
+ Pgno nNew = pPager->dbSize - (pPager->dbSize==PAGER_SJ_PGNO(pPager));
assert( pPager->eState==PAGER_WRITER_DBMOD );
rc = pager_truncate(pPager, nNew);
if( rc!=SQLITE_OK ) goto commit_phase_one_exit;
@@ -65431,7 +67413,7 @@ struct MemPage {
u8 *aData; /* Pointer to disk image of the page data */
u8 *aDataEnd; /* One byte past the end of the entire page - not just
** the usable space, the entire page. Used to prevent
- ** corruption-induced of buffer overflow. */
+ ** corruption-induced buffer overflow. */
u8 *aCellIdx; /* The cell index area */
u8 *aDataOfst; /* Same as aData for leaves. aData+4 for interior */
DbPage *pDbPage; /* Pager page handle */
@@ -65736,7 +67718,7 @@ struct BtCursor {
/*
** The database page the PENDING_BYTE occupies. This page is never used.
*/
-# define PENDING_BYTE_PAGE(pBt) PAGER_MJ_PGNO(pBt)
+#define PENDING_BYTE_PAGE(pBt) ((Pgno)((PENDING_BYTE/((pBt)->pageSize))+1))
/*
** These macros define the location of the pointer-map entry for a
@@ -66104,6 +68086,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){
SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3 *db, int iDb, Schema *pSchema){
Btree *p;
assert( db!=0 );
+ if( db->pVfs==0 && db->nDb==0 ) return 1;
if( pSchema ) iDb = sqlite3SchemaToIndex(db, pSchema);
assert( iDb>=0 && iDbnDb );
if( !sqlite3_mutex_held(db->mutex) ) return 0;
@@ -66377,7 +68360,7 @@ static int hasSharedCacheTableLock(
int bSeen = 0;
for(p=sqliteHashFirst(&pSchema->idxHash); p; p=sqliteHashNext(p)){
Index *pIdx = (Index *)sqliteHashData(p);
- if( pIdx->tnum==(int)iRoot ){
+ if( pIdx->tnum==iRoot ){
if( bSeen ){
/* Two or more indexes share the same root page. There must
** be imposter tables. So just return true. The assert is not
@@ -66970,7 +68953,7 @@ SQLITE_PRIVATE void sqlite3BtreeClearCursor(BtCursor *pCur){
/*
** In this version of BtreeMoveto, pKey is a packed index record
** such as is generated by the OP_MakeRecord opcode. Unpack the
-** record and then call BtreeMovetoUnpacked() to do the work.
+** record and then call sqlite3BtreeIndexMoveto() to do the work.
*/
static int btreeMoveto(
BtCursor *pCur, /* Cursor open on the btree to be searched */
@@ -67490,6 +69473,7 @@ static void btreeParseCell(
** the space used by the cell pointer.
**
** cellSizePtrNoPayload() => table internal nodes
+** cellSizePtrTableLeaf() => table leaf nodes
** cellSizePtr() => all index nodes & table leaf nodes
*/
static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
@@ -67515,13 +69499,6 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){
}while( *(pIter)>=0x80 && pIterintKey ){
- /* pIter now points at the 64-bit integer key value, a variable length
- ** integer. The following block moves pIter to point at the first byte
- ** past the end of the key value. */
- pEnd = &pIter[9];
- while( (*pIter++)&0x80 && pItermaxLocal );
testcase( nSize==(u32)pPage->maxLocal+1 );
if( nSize<=pPage->maxLocal ){
@@ -67561,6 +69538,58 @@ static u16 cellSizePtrNoPayload(MemPage *pPage, u8 *pCell){
assert( debuginfo.nSize==(u16)(pIter - pCell) || CORRUPT_DB );
return (u16)(pIter - pCell);
}
+static u16 cellSizePtrTableLeaf(MemPage *pPage, u8 *pCell){
+ u8 *pIter = pCell; /* For looping over bytes of pCell */
+ u8 *pEnd; /* End mark for a varint */
+ u32 nSize; /* Size value to return */
+
+#ifdef SQLITE_DEBUG
+ /* The value returned by this function should always be the same as
+ ** the (CellInfo.nSize) value found by doing a full parse of the
+ ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of
+ ** this function verifies that this invariant is not violated. */
+ CellInfo debuginfo;
+ pPage->xParseCell(pPage, pCell, &debuginfo);
+#endif
+
+ nSize = *pIter;
+ if( nSize>=0x80 ){
+ pEnd = &pIter[8];
+ nSize &= 0x7f;
+ do{
+ nSize = (nSize<<7) | (*++pIter & 0x7f);
+ }while( *(pIter)>=0x80 && pItermaxLocal );
+ testcase( nSize==(u32)pPage->maxLocal+1 );
+ if( nSize<=pPage->maxLocal ){
+ nSize += (u32)(pIter - pCell);
+ if( nSize<4 ) nSize = 4;
+ }else{
+ int minLocal = pPage->minLocal;
+ nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4);
+ testcase( nSize==pPage->maxLocal );
+ testcase( nSize==(u32)pPage->maxLocal+1 );
+ if( nSize>pPage->maxLocal ){
+ nSize = minLocal;
+ }
+ nSize += 4 + (u16)(pIter - pCell);
+ }
+ assert( nSize==debuginfo.nSize || CORRUPT_DB );
+ return (u16)nSize;
+}
#ifdef SQLITE_DEBUG
@@ -67574,7 +69603,7 @@ static u16 cellSize(MemPage *pPage, int iCell){
#ifndef SQLITE_OMIT_AUTOVACUUM
/*
** The cell pCell is currently part of page pSrc but will ultimately be part
-** of pPage. (pSrc and pPager are often the same.) If pCell contains a
+** of pPage. (pSrc and pPage are often the same.) If pCell contains a
** pointer to an overflow page, insert an entry into the pointer-map for
** the overflow page that will be valid after pCell has been moved to pPage.
*/
@@ -67630,8 +69659,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){
assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE );
assert( pPage->nOverflow==0 );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- temp = 0;
- src = data = pPage->aData;
+ data = pPage->aData;
hdr = pPage->hdrOffset;
cellOffset = pPage->cellOffset;
nCell = pPage->nCell;
@@ -67665,7 +69693,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){
if( iFree2+sz2 > usableSize ) return SQLITE_CORRUPT_PAGE(pPage);
memmove(&data[iFree+sz+sz2], &data[iFree+sz], iFree2-(iFree+sz));
sz += sz2;
- }else if( NEVER(iFree+sz>usableSize) ){
+ }else if( iFree+sz>usableSize ){
return SQLITE_CORRUPT_PAGE(pPage);
}
@@ -67685,39 +69713,38 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){
cbrk = usableSize;
iCellLast = usableSize - 4;
iCellStart = get2byte(&data[hdr+5]);
- for(i=0; iiCellLast ){
- return SQLITE_CORRUPT_PAGE(pPage);
- }
- assert( pc>=iCellStart && pc<=iCellLast );
- size = pPage->xCellSize(pPage, &src[pc]);
- cbrk -= size;
- if( cbrkusableSize ){
- return SQLITE_CORRUPT_PAGE(pPage);
- }
- assert( cbrk+size<=usableSize && cbrk>=iCellStart );
- testcase( cbrk+size==usableSize );
- testcase( pc+size==usableSize );
- put2byte(pAddr, cbrk);
- if( temp==0 ){
- if( cbrk==pc ) continue;
- temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
- memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart);
- src = temp;
+ if( nCell>0 ){
+ temp = sqlite3PagerTempSpace(pPage->pBt->pPager);
+ memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart);
+ src = temp;
+ for(i=0; iiCellLast ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( pc>=iCellStart && pc<=iCellLast );
+ size = pPage->xCellSize(pPage, &src[pc]);
+ cbrk -= size;
+ if( cbrkusableSize ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ assert( cbrk+size<=usableSize && cbrk>=iCellStart );
+ testcase( cbrk+size==usableSize );
+ testcase( pc+size==usableSize );
+ put2byte(pAddr, cbrk);
+ memcpy(&data[cbrk], &src[pc], size);
}
- memcpy(&data[cbrk], &src[pc], size);
}
data[hdr+7] = 0;
- defragment_out:
+defragment_out:
assert( pPage->nFree>=0 );
if( data[hdr+7]+cbrk-iCellFirst!=pPage->nFree ){
return SQLITE_CORRUPT_PAGE(pPage);
@@ -67749,7 +69776,8 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
const int hdr = pPg->hdrOffset; /* Offset to page header */
u8 * const aData = pPg->aData; /* Page data */
int iAddr = hdr + 1; /* Address of ptr to pc */
- int pc = get2byte(&aData[iAddr]); /* Address of a free slot */
+ u8 *pTmp = &aData[iAddr]; /* Temporary ptr into aData[] */
+ int pc = get2byte(pTmp); /* Address of a free slot */
int x; /* Excess size of the slot */
int maxPC = pPg->pBt->usableSize - nByte; /* Max address for a usable slot */
int size; /* Size of the free slot */
@@ -67759,7 +69787,8 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
/* EVIDENCE-OF: R-22710-53328 The third and fourth bytes of each
** freeblock form a big-endian integer which is the size of the freeblock
** in bytes, including the 4-byte header. */
- size = get2byte(&aData[pc+2]);
+ pTmp = &aData[pc+2];
+ size = get2byte(pTmp);
if( (x = size - nByte)>=0 ){
testcase( x==4 );
testcase( x==3 );
@@ -67772,7 +69801,6 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
** fragmented bytes within the page. */
memcpy(&aData[iAddr], &aData[pc], 2);
aData[hdr+7] += (u8)x;
- testcase( pc+x>maxPC );
return &aData[pc];
}else if( x+pc > maxPC ){
/* This slot extends off the end of the usable part of the page */
@@ -67786,10 +69814,11 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){
return &aData[pc + x];
}
iAddr = pc;
- pc = get2byte(&aData[pc]);
- if( pc<=iAddr+size ){
+ pTmp = &aData[pc];
+ pc = get2byte(pTmp);
+ if( pc<=iAddr ){
if( pc ){
- /* The next slot in the chain is not past the end of the current slot */
+ /* The next slot in the chain comes before the current slot */
*pRc = SQLITE_CORRUPT_PAGE(pPg);
}
return 0;
@@ -67820,6 +69849,7 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
u8 * const data = pPage->aData; /* Local cache of pPage->aData */
int top; /* First byte of cell content area */
int rc = SQLITE_OK; /* Integer return code */
+ u8 *pTmp; /* Temp ptr into data[] */
int gap; /* First byte of gap between cell pointers and cell content */
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
@@ -67838,7 +69868,8 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){
** then the cell content offset of an empty page wants to be 65536.
** However, that integer is too large to be stored in a 2-byte unsigned
** integer, so a value of 0 is used in its place. */
- top = get2byte(&data[hdr+5]);
+ pTmp = &data[hdr+5];
+ top = get2byte(pTmp);
assert( top<=(int)pPage->pBt->usableSize ); /* by btreeComputeFreeSpace() */
if( gap>top ){
if( top==0 && pPage->pBt->usableSize==65536 ){
@@ -67920,6 +69951,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
u16 x; /* Offset to cell content area */
u32 iEnd = iStart + iSize; /* First byte past the iStart buffer */
unsigned char *data = pPage->aData; /* Page content */
+ u8 *pTmp; /* Temporary ptr into data[] */
assert( pPage->pBt!=0 );
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
@@ -67938,7 +69970,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */
}else{
while( (iFreeBlk = get2byte(&data[iPtr]))data[hdr+7] ) return SQLITE_CORRUPT_PAGE(pPage);
data[hdr+7] -= nFrag;
}
- x = get2byte(&data[hdr+5]);
+ pTmp = &data[hdr+5];
+ x = get2byte(pTmp);
if( iStart<=x ){
/* The new freeblock is at the beginning of the cell content area,
** so just extend the cell content area rather than create another
@@ -68026,7 +70059,6 @@ static int decodeFlags(MemPage *pPage, int flagByte){
pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 );
flagByte &= ~PTF_LEAF;
pPage->childPtrSize = 4-4*pPage->leaf;
- pPage->xCellSize = cellSizePtr;
pBt = pPage->pBt;
if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
/* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an
@@ -68038,6 +70070,7 @@ static int decodeFlags(MemPage *pPage, int flagByte){
pPage->intKey = 1;
if( pPage->leaf ){
pPage->intKeyLeaf = 1;
+ pPage->xCellSize = cellSizePtrTableLeaf;
pPage->xParseCell = btreeParseCellPtr;
}else{
pPage->intKeyLeaf = 0;
@@ -68055,12 +70088,17 @@ static int decodeFlags(MemPage *pPage, int flagByte){
assert( (PTF_ZERODATA|PTF_LEAF)==10 );
pPage->intKey = 0;
pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtr;
pPage->xParseCell = btreeParseCellPtrIndex;
pPage->maxLocal = pBt->maxLocal;
pPage->minLocal = pBt->minLocal;
}else{
/* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is
** an error. */
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtr;
+ pPage->xParseCell = btreeParseCellPtrIndex;
return SQLITE_CORRUPT_PAGE(pPage);
}
pPage->max1bytePayload = pBt->max1bytePayload;
@@ -68414,7 +70452,7 @@ getAndInitPage_error1:
pCur->pPage = pCur->apPage[pCur->iPage];
}
testcase( pgno==0 );
- assert( pgno!=0 || rc==SQLITE_CORRUPT );
+ assert( pgno!=0 || rc!=SQLITE_OK );
return rc;
}
@@ -69850,6 +71888,9 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
}
}
}else{
+ if( pCell+4 > pPage->aData+pPage->pBt->usableSize ){
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
if( get4byte(pCell)==iFrom ){
put4byte(pCell, iTo);
break;
@@ -70036,12 +72077,17 @@ static int incrVacuumStep(BtShared *pBt, Pgno nFin, Pgno iLastPg, int bCommit){
}
do {
MemPage *pFreePg;
+ Pgno dbSize = btreePagecount(pBt);
rc = allocateBtreePage(pBt, &pFreePg, &iFreePg, iNear, eMode);
if( rc!=SQLITE_OK ){
releasePage(pLastPg);
return rc;
}
releasePage(pFreePg);
+ if( iFreePg>dbSize ){
+ releasePage(pLastPg);
+ return SQLITE_CORRUPT_BKPT;
+ }
}while( bCommit && iFreePg>nFin );
assert( iFreePgpBt;
-
assert( cursorOwnsBtShared(pCur) );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->iPageapPage[pCur->iPage] = pCur->pPage;
pCur->ix = 0;
pCur->iPage++;
- return getAndInitPage(pBt, newPgno, &pCur->pPage, pCur, pCur->curPagerFlags);
+ return getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur,
+ pCur->curPagerFlags);
}
#ifdef SQLITE_DEBUG
@@ -71472,7 +73517,7 @@ static int moveToRoot(BtCursor *pCur){
}
sqlite3BtreeClearCursor(pCur);
}
- rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->pPage,
+ rc = getAndInitPage(pCur->pBt, pCur->pgnoRoot, &pCur->pPage,
0, pCur->curPagerFlags);
if( rc!=SQLITE_OK ){
pCur->eState = CURSOR_INVALID;
@@ -71803,6 +73848,69 @@ moveto_table_finish:
return rc;
}
+/*
+** Compare the "idx"-th cell on the page the cursor pCur is currently
+** pointing to to pIdxKey using xRecordCompare. Return negative or
+** zero if the cell is less than or equal pIdxKey. Return positive
+** if unknown.
+**
+** Return value negative: Cell at pCur[idx] less than pIdxKey
+**
+** Return value is zero: Cell at pCur[idx] equals pIdxKey
+**
+** Return value positive: Nothing is known about the relationship
+** of the cell at pCur[idx] and pIdxKey.
+**
+** This routine is part of an optimization. It is always safe to return
+** a positive value as that will cause the optimization to be skipped.
+*/
+static int indexCellCompare(
+ BtCursor *pCur,
+ int idx,
+ UnpackedRecord *pIdxKey,
+ RecordCompare xRecordCompare
+){
+ MemPage *pPage = pCur->pPage;
+ int c;
+ int nCell; /* Size of the pCell cell in bytes */
+ u8 *pCell = findCellPastPtr(pPage, idx);
+
+ nCell = pCell[0];
+ if( nCell<=pPage->max1bytePayload ){
+ /* This branch runs if the record-size field of the cell is a
+ ** single byte varint and the record fits entirely on the main
+ ** b-tree page. */
+ testcase( pCell+nCell+1==pPage->aDataEnd );
+ c = xRecordCompare(nCell, (void*)&pCell[1], pIdxKey);
+ }else if( !(pCell[1] & 0x80)
+ && (nCell = ((nCell&0x7f)<<7) + pCell[1])<=pPage->maxLocal
+ ){
+ /* The record-size field is a 2 byte varint and the record
+ ** fits entirely on the main b-tree page. */
+ testcase( pCell+nCell+2==pPage->aDataEnd );
+ c = xRecordCompare(nCell, (void*)&pCell[2], pIdxKey);
+ }else{
+ /* If the record extends into overflow pages, do not attempt
+ ** the optimization. */
+ c = 99;
+ }
+ return c;
+}
+
+/*
+** Return true (non-zero) if pCur is current pointing to the last
+** page of a table.
+*/
+static int cursorOnLastPage(BtCursor *pCur){
+ int i;
+ assert( pCur->eState==CURSOR_VALID );
+ for(i=0; iiPage; i++){
+ MemPage *pPage = pCur->apPage[i];
+ if( pCur->aiIdx[i]nCell ) return 0;
+ }
+ return 1;
+}
+
/* Move the cursor so that it points to an entry in an index table
** near the key pIdxKey. Return a success code.
**
@@ -71853,6 +73961,43 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto(
|| pIdxKey->default_rc==-1
);
+
+ /* Check to see if we can skip a lot of work. Two cases:
+ **
+ ** (1) If the cursor is already pointing to the very last cell
+ ** in the table and the pIdxKey search key is greater than or
+ ** equal to that last cell, then no movement is required.
+ **
+ ** (2) If the cursor is on the last page of the table and the first
+ ** cell on that last page is less than or equal to the pIdxKey
+ ** search key, then we can start the search on the current page
+ ** without needing to go back to root.
+ */
+ if( pCur->eState==CURSOR_VALID
+ && pCur->pPage->leaf
+ && cursorOnLastPage(pCur)
+ ){
+ int c;
+ if( pCur->ix==pCur->pPage->nCell-1
+ && (c = indexCellCompare(pCur, pCur->ix, pIdxKey, xRecordCompare))<=0
+ && pIdxKey->errCode==SQLITE_OK
+ ){
+ *pRes = c;
+ return SQLITE_OK; /* Cursor already pointing at the correct spot */
+ }
+ if( pCur->iPage>0
+ && indexCellCompare(pCur, 0, pIdxKey, xRecordCompare)<=0
+ && pIdxKey->errCode==SQLITE_OK
+ ){
+ pCur->curFlags &= ~BTCF_ValidOvfl;
+ if( !pCur->pPage->isInit ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ goto bypass_moveto_root; /* Start search on the current page */
+ }
+ pIdxKey->errCode = SQLITE_OK;
+ }
+
rc = moveToRoot(pCur);
if( rc ){
if( rc==SQLITE_EMPTY ){
@@ -71862,12 +74007,14 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto(
}
return rc;
}
+
+bypass_moveto_root:
assert( pCur->pPage );
assert( pCur->pPage->isInit );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->pPage->nCell > 0 );
- assert( pCur->iPage==0 || pCur->apPage[0]->intKey==pCur->curIntKey );
- assert( pCur->curIntKey || pIdxKey );
+ assert( pCur->curIntKey==0 );
+ assert( pIdxKey!=0 );
for(;;){
int lwr, upr, idx, c;
Pgno chldPg;
@@ -71881,7 +74028,7 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto(
** be the right kind (index or table) of b-tree page. Otherwise
** a moveToChild() or moveToRoot() call would have detected corruption. */
assert( pPage->nCell>0 );
- assert( pPage->intKey==(pIdxKey==0) );
+ assert( pPage->intKey==0 );
lwr = 0;
upr = pPage->nCell-1;
idx = upr>>1; /* idx = (lwr+upr)/2; */
@@ -72078,14 +74225,7 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){
pPage = pCur->pPage;
idx = ++pCur->ix;
- if( !pPage->isInit || sqlite3FaultSim(412) ){
- /* The only known way for this to happen is for there to be a
- ** recursive SQL function that does a DELETE operation as part of a
- ** SELECT which deletes content out from under an active cursor
- ** in a corrupt database file where the table being DELETE-ed from
- ** has pages in common with the table being queried. See TH3
- ** module cov1/btree78.test testcase 220 (2018-06-08) for an
- ** example. */
+ if( NEVER(!pPage->isInit) || sqlite3FaultSim(412) ){
return SQLITE_CORRUPT_BKPT;
}
@@ -72261,8 +74401,8 @@ static int allocateBtreePage(
assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) );
pPage1 = pBt->pPage1;
mxPage = btreePagecount(pBt);
- /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36
- ** stores stores the total number of pages on the freelist. */
+ /* EVIDENCE-OF: R-21003-45125 The 4-byte big-endian integer at offset 36
+ ** stores the total number of pages on the freelist. */
n = get4byte(&pPage1->aData[36]);
testcase( n==mxPage-1 );
if( n>=mxPage ){
@@ -73011,12 +75151,6 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
assert( pPage->pBt->usableSize > (u32)(ptr-data) );
pc = get2byte(ptr);
hdr = pPage->hdrOffset;
-#if 0 /* Not required. Omit for efficiency */
- if( pcnCell*2 ){
- *pRC = SQLITE_CORRUPT_BKPT;
- return;
- }
-#endif
testcase( pc==(u32)get2byte(&data[hdr+5]) );
testcase( pc+sz==pPage->pBt->usableSize );
if( pc+sz > pPage->pBt->usableSize ){
@@ -73900,8 +76034,6 @@ static int balance_nonroot(
Pgno pgno; /* Temp var to store a page number in */
u8 abDone[NB+2]; /* True after i'th new page is populated */
Pgno aPgno[NB+2]; /* Page numbers of new pages before shuffling */
- Pgno aPgOrder[NB+2]; /* Copy of aPgno[] used for sorting pages */
- u16 aPgFlags[NB+2]; /* flags field of new pages before shuffling */
CellArray b; /* Parsed information on cells being balanced */
memset(abDone, 0, sizeof(abDone));
@@ -74325,42 +76457,39 @@ static int balance_nonroot(
** of the table is closer to a linear scan through the file. That in turn
** helps the operating system to deliver pages from the disk more rapidly.
**
- ** An O(n^2) insertion sort algorithm is used, but since n is never more
- ** than (NB+2) (a small constant), that should not be a problem.
+ ** An O(N*N) sort algorithm is used, but since N is never more than NB+2
+ ** (5), that is not a performance concern.
**
** When NB==3, this one optimization makes the database about 25% faster
** for large insertions and deletions.
*/
for(i=0; ipgno;
- aPgFlags[i] = apNew[i]->pDbPage->flags;
- for(j=0; jpgno;
+ assert( apNew[i]->pDbPage->flags & PGHDR_WRITEABLE );
+ assert( apNew[i]->pDbPage->flags & PGHDR_DIRTY );
}
- for(i=0; ipgno < apNew[iB]->pgno ) iB = j;
}
- pgno = aPgOrder[iBest];
- aPgOrder[iBest] = 0xffffffff;
- if( iBest!=i ){
- if( iBest>i ){
- sqlite3PagerRekey(apNew[iBest]->pDbPage, pBt->nPage+iBest+1, 0);
- }
- sqlite3PagerRekey(apNew[i]->pDbPage, pgno, aPgFlags[iBest]);
- apNew[i]->pgno = pgno;
+
+ /* If apNew[i] has a page number that is bigger than any of the
+ ** subsequence apNew[i] entries, then swap apNew[i] with the subsequent
+ ** entry that has the smallest page number (which we know to be
+ ** entry apNew[iB]).
+ */
+ if( iB!=i ){
+ Pgno pgnoA = apNew[i]->pgno;
+ Pgno pgnoB = apNew[iB]->pgno;
+ Pgno pgnoTemp = (PENDING_BYTE/pBt->pageSize)+1;
+ u16 fgA = apNew[i]->pDbPage->flags;
+ u16 fgB = apNew[iB]->pDbPage->flags;
+ sqlite3PagerRekey(apNew[i]->pDbPage, pgnoTemp, fgB);
+ sqlite3PagerRekey(apNew[iB]->pDbPage, pgnoA, fgA);
+ sqlite3PagerRekey(apNew[i]->pDbPage, pgnoB, fgB);
+ apNew[i]->pgno = pgnoB;
+ apNew[iB]->pgno = pgnoA;
}
}
@@ -74748,7 +76877,6 @@ static int anotherValidCursor(BtCursor *pCur){
*/
static int balance(BtCursor *pCur){
int rc = SQLITE_OK;
- const int nMin = pCur->pBt->usableSize * 2 / 3;
u8 aBalanceQuickSpace[13];
u8 *pFree = 0;
@@ -74760,7 +76888,11 @@ static int balance(BtCursor *pCur){
MemPage *pPage = pCur->pPage;
if( NEVER(pPage->nFree<0) && btreeComputeFreeSpace(pPage) ) break;
- if( pPage->nOverflow==0 && pPage->nFree<=nMin ){
+ if( pPage->nOverflow==0 && pPage->nFree*3<=(int)pCur->pBt->usableSize*2 ){
+ /* No rebalance required as long as:
+ ** (1) There are no overflow cells
+ ** (2) The amount of free space on the page is less than 2/3rds of
+ ** the total usable space on the page. */
break;
}else if( (iPage = pCur->iPage)==0 ){
if( pPage->nOverflow && (rc = anotherValidCursor(pCur))==SQLITE_OK ){
@@ -74783,6 +76915,11 @@ static int balance(BtCursor *pCur){
}else{
break;
}
+ }else if( sqlite3PagerPageRefcount(pPage->pDbPage)>1 ){
+ /* The page being written is not a root page, and there is currently
+ ** more than one reference to it. This only happens if the page is one
+ ** of its own ancestor pages. Corruption. */
+ rc = SQLITE_CORRUPT_BKPT;
}else{
MemPage * const pParent = pCur->apPage[iPage-1];
int const iIdx = pCur->aiIdx[iPage-1];
@@ -74980,7 +77117,7 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){
** pX.pData,nData,nZero fields must be zero.
**
** If the seekResult parameter is non-zero, then a successful call to
-** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already
+** sqlite3BtreeIndexMoveto() to seek cursor pCur to (pKey,nKey) has already
** been performed. In other words, if seekResult!=0 then the cursor
** is currently pointing to a cell that will be adjacent to the cell
** to be inserted. If seekResult<0 then pCur points to a cell that is
@@ -74998,7 +77135,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
BtCursor *pCur, /* Insert data into the table of this cursor */
const BtreePayload *pX, /* Content of the row to be inserted */
int flags, /* True if this is likely an append */
- int seekResult /* Result of prior MovetoUnpacked() call */
+ int seekResult /* Result of prior IndexMoveto() call */
){
int rc;
int loc = seekResult; /* -1: before desired location +1: after */
@@ -75037,7 +77174,12 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
}
}
+ /* Ensure that the cursor is not in the CURSOR_FAULT state and that it
+ ** points to a valid cell.
+ */
if( pCur->eState>=CURSOR_REQUIRESEEK ){
+ testcase( pCur->eState==CURSOR_REQUIRESEEK );
+ testcase( pCur->eState==CURSOR_FAULT );
rc = moveToRoot(pCur);
if( rc && rc!=SQLITE_EMPTY ) return rc;
}
@@ -75149,7 +77291,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) );
assert( pPage->leaf || !pPage->intKey );
if( pPage->nFree<0 ){
- if( pCur->eState>CURSOR_INVALID ){
+ if( NEVER(pCur->eState>CURSOR_INVALID) ){
+ /* ^^^^^--- due to the moveToRoot() call above */
rc = SQLITE_CORRUPT_BKPT;
}else{
rc = btreeComputeFreeSpace(pPage);
@@ -75160,7 +77303,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n",
pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno,
loc==0 ? "overwrite" : "new entry"));
- assert( pPage->isInit );
+ assert( pPage->isInit || CORRUPT_DB );
newCell = pBt->pTmpSpace;
assert( newCell!=0 );
if( flags & BTREE_PREFORMAT ){
@@ -75311,7 +77454,11 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64
u32 nRem; /* Bytes of data still to copy */
getCellInfo(pSrc);
- aOut += putVarint32(aOut, pSrc->info.nPayload);
+ if( pSrc->info.nPayload<0x80 ){
+ *(aOut++) = pSrc->info.nPayload;
+ }else{
+ aOut += sqlite3PutVarint(aOut, pSrc->info.nPayload);
+ }
if( pDest->pKeyInfo==0 ) aOut += putVarint(aOut, iKey);
nIn = pSrc->info.nLocal;
aIn = pSrc->info.pPayload;
@@ -75471,7 +77618,8 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
bPreserve = (flags & BTREE_SAVEPOSITION)!=0;
if( bPreserve ){
if( !pPage->leaf
- || (pPage->nFree+cellSizePtr(pPage,pCell)+2)>(int)(pBt->usableSize*2/3)
+ || (pPage->nFree+pPage->xCellSize(pPage,pCell)+2) >
+ (int)(pBt->usableSize*2/3)
|| pPage->nCell==1 /* See dbfuzz001.test for a test case */
){
/* A b-tree rebalance will be required after deleting this entry.
@@ -75567,7 +77715,15 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
** been corrected, so be it. Otherwise, after balancing the leaf node,
** walk the cursor up the tree to the internal node and balance it as
** well. */
- rc = balance(pCur);
+ assert( pCur->pPage->nOverflow==0 );
+ assert( pCur->pPage->nFree>=0 );
+ if( pCur->pPage->nFree*3<=(int)pCur->pBt->usableSize*2 ){
+ /* Optimization: If the free space is less than 2/3rds of the page,
+ ** then balance() will always be a no-op. No need to invoke it. */
+ rc = SQLITE_OK;
+ }else{
+ rc = balance(pCur);
+ }
if( rc==SQLITE_OK && pCur->iPage>iCellDepth ){
releasePageNotNull(pCur->pPage);
pCur->iPage--;
@@ -77062,6 +79218,17 @@ SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){
*/
SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); }
+/*
+** If no transaction is active and the database is not a temp-db, clear
+** the in-memory pager cache.
+*/
+SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree *p){
+ BtShared *pBt = p->pBt;
+ if( pBt->inTransaction==TRANS_NONE ){
+ sqlite3PagerClearCache(pBt->pPager);
+ }
+}
+
#if !defined(SQLITE_OMIT_SHARED_CACHE)
/*
** Return true if the Btree passed as the only argument is sharable.
@@ -78315,9 +80482,10 @@ SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){
Mem t;
assert( pFunc!=0 );
assert( pMem!=0 );
+ assert( pMem->db!=0 );
assert( pFunc->xFinalize!=0 );
assert( (pMem->flags & MEM_Null)!=0 || pFunc==pMem->u.pDef );
- assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
+ assert( sqlite3_mutex_held(pMem->db->mutex) );
memset(&ctx, 0, sizeof(ctx));
memset(&t, 0, sizeof(t));
t.flags = MEM_Null;
@@ -78325,6 +80493,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem *pMem, FuncDef *pFunc){
ctx.pOut = &t;
ctx.pMem = pMem;
ctx.pFunc = pFunc;
+ ctx.enc = ENC(t.db);
pFunc->xFinalize(&ctx); /* IMP: R-24505-23230 */
assert( (pMem->flags & MEM_Dyn)==0 );
if( pMem->szMalloc>0 ) sqlite3DbFreeNN(pMem->db, pMem->zMalloc);
@@ -78346,12 +80515,14 @@ SQLITE_PRIVATE int sqlite3VdbeMemAggValue(Mem *pAccum, Mem *pOut, FuncDef *pFunc
assert( pFunc!=0 );
assert( pFunc->xValue!=0 );
assert( (pAccum->flags & MEM_Null)!=0 || pFunc==pAccum->u.pDef );
- assert( pAccum->db==0 || sqlite3_mutex_held(pAccum->db->mutex) );
+ assert( pAccum->db!=0 );
+ assert( sqlite3_mutex_held(pAccum->db->mutex) );
memset(&ctx, 0, sizeof(ctx));
sqlite3VdbeMemSetNull(pOut);
ctx.pOut = pOut;
ctx.pMem = pAccum;
ctx.pFunc = pFunc;
+ ctx.enc = ENC(pAccum->db);
pFunc->xValue(&ctx);
return ctx.isError;
}
@@ -78417,6 +80588,14 @@ SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p){
}
}
+/* Like sqlite3VdbeMemRelease() but faster for cases where we
+** know in advance that the Mem is not MEM_Dyn or MEM_Agg.
+*/
+SQLITE_PRIVATE void sqlite3VdbeMemReleaseMalloc(Mem *p){
+ assert( !VdbeMemDynamic(p) );
+ if( p->szMalloc ) vdbeMemClear(p);
+}
+
/*
** Convert a 64-bit IEEE double into a 64-bit signed integer.
** If the double is out of range of a 64-bit signed integer then
@@ -78595,6 +80774,16 @@ SQLITE_PRIVATE int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){
&& i >= -2251799813685248LL && i < 2251799813685248LL);
}
+/* Convert a floating point value to its closest integer. Do so in
+** a way that avoids 'outside the range of representable values' warnings
+** from UBSAN.
+*/
+SQLITE_PRIVATE i64 sqlite3RealToI64(double r){
+ if( r<=(double)SMALLEST_INT64 ) return SMALLEST_INT64;
+ if( r>=(double)LARGEST_INT64) return LARGEST_INT64;
+ return (i64)r;
+}
+
/*
** Convert pMem so that it has type MEM_Real or MEM_Int.
** Invalidate any prior representations.
@@ -78616,7 +80805,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
rc = sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc);
if( ((rc==0 || rc==1) && sqlite3Atoi64(pMem->z, &ix, pMem->n, pMem->enc)<=1)
- || sqlite3RealSameAsInt(pMem->u.r, (ix = (i64)pMem->u.r))
+ || sqlite3RealSameAsInt(pMem->u.r, (ix = sqlite3RealToI64(pMem->u.r)))
){
pMem->u.i = ix;
MemSetTypeFlag(pMem, MEM_Int);
@@ -78668,6 +80857,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){
sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding);
assert( pMem->flags & MEM_Str || pMem->db->mallocFailed );
pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero);
+ if( encoding!=SQLITE_UTF8 ) pMem->n &= ~1;
return sqlite3VdbeChangeEncoding(pMem, encoding);
}
}
@@ -78961,6 +81151,13 @@ SQLITE_PRIVATE void sqlite3VdbeMemMove(Mem *pTo, Mem *pFrom){
** stored without allocating memory, then it is. If a memory allocation
** is required to store the string, then value of pMem is unchanged. In
** either case, SQLITE_TOOBIG is returned.
+**
+** The "enc" parameter is the text encoding for the string, or zero
+** to store a blob.
+**
+** If n is negative, then the string consists of all bytes up to but
+** excluding the first zero character. The n parameter must be
+** non-negative for blobs.
*/
SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
Mem *pMem, /* Memory cell to set to string value */
@@ -78971,11 +81168,12 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
){
i64 nByte = n; /* New value for pMem->n */
int iLimit; /* Maximum allowed string or blob size */
- u16 flags = 0; /* New value for pMem->flags */
+ u16 flags; /* New value for pMem->flags */
assert( pMem!=0 );
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( !sqlite3VdbeMemIsRowSet(pMem) );
+ assert( enc!=0 || n>=0 );
/* If z is a NULL pointer, set pMem to contain an SQL NULL. */
if( !z ){
@@ -78988,7 +81186,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
}else{
iLimit = SQLITE_MAX_LENGTH;
}
- flags = (enc==0?MEM_Blob:MEM_Str);
if( nByte<0 ){
assert( enc!=0 );
if( enc==SQLITE_UTF8 ){
@@ -78996,7 +81193,23 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
}else{
for(nByte=0; nByte<=iLimit && (z[nByte] | z[nByte+1]); nByte+=2){}
}
- flags |= MEM_Term;
+ flags= MEM_Str|MEM_Term;
+ }else if( enc==0 ){
+ flags = MEM_Blob;
+ enc = SQLITE_UTF8;
+ }else{
+ flags = MEM_Str;
+ }
+ if( nByte>iLimit ){
+ if( xDel && xDel!=SQLITE_TRANSIENT ){
+ if( xDel==SQLITE_DYNAMIC ){
+ sqlite3DbFree(pMem->db, (void*)z);
+ }else{
+ xDel((void*)z);
+ }
+ }
+ sqlite3VdbeMemSetNull(pMem);
+ return sqlite3ErrorToParser(pMem->db, SQLITE_TOOBIG);
}
/* The following block sets the new values of Mem.z and Mem.xDel. It
@@ -79008,9 +81221,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
if( flags&MEM_Term ){
nAlloc += (enc==SQLITE_UTF8?1:2);
}
- if( nByte>iLimit ){
- return sqlite3ErrorToParser(pMem->db, SQLITE_TOOBIG);
- }
testcase( nAlloc==0 );
testcase( nAlloc==31 );
testcase( nAlloc==32 );
@@ -79032,16 +81242,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
pMem->n = (int)(nByte & 0x7fffffff);
pMem->flags = flags;
- if( enc ){
- pMem->enc = enc;
-#ifdef SQLITE_ENABLE_SESSION
- }else if( pMem->db==0 ){
- pMem->enc = SQLITE_UTF8;
-#endif
- }else{
- assert( pMem->db!=0 );
- pMem->enc = ENC(pMem->db);
- }
+ pMem->enc = enc;
#ifndef SQLITE_OMIT_UTF16
if( enc>SQLITE_UTF8 && sqlite3VdbeMemHandleBom(pMem) ){
@@ -79049,9 +81250,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
}
#endif
- if( nByte>iLimit ){
- return sqlite3ErrorToParser(pMem->db, SQLITE_TOOBIG);
- }
return SQLITE_OK;
}
@@ -79329,10 +81527,12 @@ static int valueFromFunction(
goto value_from_function_out;
}
- assert( pCtx->pParse->rc==SQLITE_OK );
+ testcase( pCtx->pParse->rc==SQLITE_ERROR );
+ testcase( pCtx->pParse->rc==SQLITE_OK );
memset(&ctx, 0, sizeof(ctx));
ctx.pOut = pVal;
ctx.pFunc = pFunc;
+ ctx.enc = ENC(db);
pFunc->xSFunc(&ctx, nVal, apVal);
if( ctx.isError ){
rc = ctx.isError;
@@ -79408,8 +81608,8 @@ static int valueFromExpr(
rc = valueFromExpr(db, pExpr->pLeft, enc, aff, ppVal, pCtx);
testcase( rc!=SQLITE_OK );
if( *ppVal ){
- sqlite3VdbeMemCast(*ppVal, aff, SQLITE_UTF8);
- sqlite3ValueApplyAffinity(*ppVal, affinity, SQLITE_UTF8);
+ sqlite3VdbeMemCast(*ppVal, aff, enc);
+ sqlite3ValueApplyAffinity(*ppVal, affinity, enc);
}
return rc;
}
@@ -79793,6 +81993,9 @@ SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){
if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){
return p->n;
}
+ if( (p->flags & MEM_Str)!=0 && enc!=SQLITE_UTF8 && pVal->enc!=SQLITE_UTF8 ){
+ return p->n;
+ }
if( (p->flags & MEM_Blob)!=0 ){
if( p->flags & MEM_Zero ){
return p->n + p->u.nZero;
@@ -79838,12 +82041,12 @@ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){
memset(&p->aOp, 0, sizeof(Vdbe)-offsetof(Vdbe,aOp));
p->db = db;
if( db->pVdbe ){
- db->pVdbe->pPrev = p;
+ db->pVdbe->ppVPrev = &p->pVNext;
}
- p->pNext = db->pVdbe;
- p->pPrev = 0;
+ p->pVNext = db->pVdbe;
+ p->ppVPrev = &db->pVdbe;
db->pVdbe = p;
- p->iVdbeMagic = VDBE_MAGIC_INIT;
+ assert( p->eVdbeState==VDBE_INIT_STATE );
p->pParse = pParse;
pParse->pVdbe = p;
assert( pParse->aLabel==0 );
@@ -79923,21 +82126,28 @@ SQLITE_PRIVATE int sqlite3VdbeUsesDoubleQuotedString(
#endif
/*
-** Swap all content between two VDBE structures.
+** Swap byte-code between two VDBE structures.
+**
+** This happens after pB was previously run and returned
+** SQLITE_SCHEMA. The statement was then reprepared in pA.
+** This routine transfers the new bytecode in pA over to pB
+** so that pB can be run again. The old pB byte code is
+** moved back to pA so that it will be cleaned up when pA is
+** finalized.
*/
SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){
- Vdbe tmp, *pTmp;
+ Vdbe tmp, *pTmp, **ppTmp;
char *zTmp;
assert( pA->db==pB->db );
tmp = *pA;
*pA = *pB;
*pB = tmp;
- pTmp = pA->pNext;
- pA->pNext = pB->pNext;
- pB->pNext = pTmp;
- pTmp = pA->pPrev;
- pA->pPrev = pB->pPrev;
- pB->pPrev = pTmp;
+ pTmp = pA->pVNext;
+ pA->pVNext = pB->pVNext;
+ pB->pVNext = pTmp;
+ ppTmp = pA->ppVPrev;
+ pA->ppVPrev = pB->ppVPrev;
+ pB->ppVPrev = ppTmp;
zTmp = pA->zSql;
pA->zSql = pB->zSql;
pB->zSql = zTmp;
@@ -79988,7 +82198,7 @@ static int growOpArray(Vdbe *v, int nOp){
return SQLITE_NOMEM;
}
- assert( nOp<=(1024/sizeof(Op)) );
+ assert( nOp<=(int)(1024/sizeof(Op)) );
assert( nNew>=(v->nOpAlloc+nOp) );
pNew = sqlite3DbRealloc(p->db, v->aOp, nNew*sizeof(Op));
if( pNew ){
@@ -80044,7 +82254,7 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
VdbeOp *pOp;
i = p->nOp;
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT );
+ assert( p->eVdbeState==VDBE_INIT_STATE );
assert( op>=0 && op<0xff );
if( p->nOpAlloc<=i ){
return growOp3(p, op, p1, p2, p3);
@@ -80189,6 +82399,7 @@ SQLITE_PRIVATE int sqlite3VdbeAddFunctionCall(
addr = sqlite3VdbeAddOp4(v, eCallCtx ? OP_PureFunc : OP_Function,
p1, p2, p3, (char*)pCtx, P4_FUNCCTX);
sqlite3VdbeChangeP5(v, eCallCtx & NC_SelfRef);
+ sqlite3MayAbort(pParse);
return addr;
}
@@ -80257,7 +82468,7 @@ SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt
iThis = v->nOp;
sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0,
zMsg, P4_DYNAMIC);
- sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetOp(v,-1)->p4.z);
+ sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetLastOp(v)->p4.z);
if( bPush){
pParse->addrExplain = iThis;
}
@@ -80376,7 +82587,7 @@ static SQLITE_NOINLINE void resizeResolveLabel(Parse *p, Vdbe *v, int j){
SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
Parse *p = v->pParse;
int j = ADDR(x);
- assert( v->iVdbeMagic==VDBE_MAGIC_INIT );
+ assert( v->eVdbeState==VDBE_INIT_STATE );
assert( j<-p->nLabel );
assert( j>=0 );
#ifdef SQLITE_DEBUG
@@ -80396,14 +82607,20 @@ SQLITE_PRIVATE void sqlite3VdbeResolveLabel(Vdbe *v, int x){
** Mark the VDBE as one that can only be run one time.
*/
SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe *p){
- p->runOnlyOnce = 1;
+ sqlite3VdbeAddOp2(p, OP_Expire, 1, 1);
}
/*
-** Mark the VDBE as one that can only be run multiple times.
+** Mark the VDBE as one that can be run multiple times.
*/
SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe *p){
- p->runOnlyOnce = 0;
+ int i;
+ for(i=1; ALWAYS(inOp); i++){
+ if( ALWAYS(p->aOp[i].opcode==OP_Expire) ){
+ p->aOp[1].opcode = OP_Noop;
+ break;
+ }
+ }
}
#ifdef SQLITE_DEBUG /* sqlite3AssertMayAbort() logic */
@@ -80507,6 +82724,8 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
int hasInitCoroutine = 0;
Op *pOp;
VdbeOpIter sIter;
+
+ if( v==0 ) return 0;
memset(&sIter, 0, sizeof(sIter));
sIter.v = v;
@@ -80516,6 +82735,7 @@ SQLITE_PRIVATE int sqlite3VdbeAssertMayAbort(Vdbe *v, int mayAbort){
|| opcode==OP_VDestroy
|| opcode==OP_VCreate
|| opcode==OP_ParseSchema
+ || opcode==OP_Function || opcode==OP_PureFunc
|| ((opcode==OP_Halt || opcode==OP_HaltIfNull)
&& ((pOp->p1)!=SQLITE_OK && pOp->p2==OE_Abort))
){
@@ -80590,7 +82810,7 @@ SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe *p){
** (3) Update the Vdbe.readOnly and Vdbe.bIsReader flags to accurately
** indicate what the prepared statement actually does.
**
-** (4) Initialize the p4.xAdvance pointer on opcodes that use it.
+** (4) (discontinued)
**
** (5) Reclaim the memory allocated for storing labels.
**
@@ -80606,8 +82826,8 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
p->readOnly = 1;
p->bIsReader = 0;
pOp = &p->aOp[p->nOp-1];
- while(1){
-
+ assert( p->aOp[0].opcode==OP_Init );
+ while( 1 /* Loop termates when it reaches the OP_Init opcode */ ){
/* Only JUMP opcodes and the short list of special opcodes in the switch
** below need to be considered. The mkopcodeh.tcl generator script groups
** all these opcodes together near the front of the opcode list. Skip
@@ -80636,24 +82856,9 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
p->bIsReader = 1;
break;
}
- case OP_Next:
- case OP_SorterNext: {
- pOp->p4.xAdvance = sqlite3BtreeNext;
- pOp->p4type = P4_ADVANCE;
- /* The code generator never codes any of these opcodes as a jump
- ** to a label. They are always coded as a jump backwards to a
- ** known address */
- assert( pOp->p2>=0 );
- break;
- }
- case OP_Prev: {
- pOp->p4.xAdvance = sqlite3BtreePrevious;
- pOp->p4type = P4_ADVANCE;
- /* The code generator never codes any of these opcodes as a jump
- ** to a label. They are always coded as a jump backwards to a
- ** known address */
+ case OP_Init: {
assert( pOp->p2>=0 );
- break;
+ goto resolve_p2_values_loop_exit;
}
#ifndef SQLITE_OMIT_VIRTUALTABLE
case OP_VUpdate: {
@@ -80687,21 +82892,108 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){
** have non-negative values for P2. */
assert( (sqlite3OpcodeProperty[pOp->opcode]&OPFLG_JUMP)==0 || pOp->p2>=0);
}
- if( pOp==p->aOp ) break;
+ assert( pOp>p->aOp );
pOp--;
}
- sqlite3DbFree(p->db, pParse->aLabel);
- pParse->aLabel = 0;
+resolve_p2_values_loop_exit:
+ if( aLabel ){
+ sqlite3DbNNFreeNN(p->db, pParse->aLabel);
+ pParse->aLabel = 0;
+ }
pParse->nLabel = 0;
*pMaxFuncArgs = nMaxArgs;
assert( p->bIsReader!=0 || DbMaskAllZero(p->btreeMask) );
}
+#ifdef SQLITE_DEBUG
+/*
+** Check to see if a subroutine contains a jump to a location outside of
+** the subroutine. If a jump outside the subroutine is detected, add code
+** that will cause the program to halt with an error message.
+**
+** The subroutine consists of opcodes between iFirst and iLast. Jumps to
+** locations within the subroutine are acceptable. iRetReg is a register
+** that contains the return address. Jumps to outside the range of iFirst
+** through iLast are also acceptable as long as the jump destination is
+** an OP_Return to iReturnAddr.
+**
+** A jump to an unresolved label means that the jump destination will be
+** beyond the current address. That is normally a jump to an early
+** termination and is consider acceptable.
+**
+** This routine only runs during debug builds. The purpose is (of course)
+** to detect invalid escapes out of a subroutine. The OP_Halt opcode
+** is generated rather than an assert() or other error, so that ".eqp full"
+** will still work to show the original bytecode, to aid in debugging.
+*/
+SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn(
+ Vdbe *v, /* The byte-code program under construction */
+ int iFirst, /* First opcode of the subroutine */
+ int iLast, /* Last opcode of the subroutine */
+ int iRetReg /* Subroutine return address register */
+){
+ VdbeOp *pOp;
+ Parse *pParse;
+ int i;
+ sqlite3_str *pErr = 0;
+ assert( v!=0 );
+ pParse = v->pParse;
+ assert( pParse!=0 );
+ if( pParse->nErr ) return;
+ assert( iLast>=iFirst );
+ assert( iLastnOp );
+ pOp = &v->aOp[iFirst];
+ for(i=iFirst; i<=iLast; i++, pOp++){
+ if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 ){
+ int iDest = pOp->p2; /* Jump destination */
+ if( iDest==0 ) continue;
+ if( pOp->opcode==OP_Gosub ) continue;
+ if( iDest<0 ){
+ int j = ADDR(iDest);
+ assert( j>=0 );
+ if( j>=-pParse->nLabel || pParse->aLabel[j]<0 ){
+ continue;
+ }
+ iDest = pParse->aLabel[j];
+ }
+ if( iDestiLast ){
+ int j = iDest;
+ for(; jnOp; j++){
+ VdbeOp *pX = &v->aOp[j];
+ if( pX->opcode==OP_Return ){
+ if( pX->p1==iRetReg ) break;
+ continue;
+ }
+ if( pX->opcode==OP_Noop ) continue;
+ if( pX->opcode==OP_Explain ) continue;
+ if( pErr==0 ){
+ pErr = sqlite3_str_new(0);
+ }else{
+ sqlite3_str_appendchar(pErr, 1, '\n');
+ }
+ sqlite3_str_appendf(pErr,
+ "Opcode at %d jumps to %d which is outside the "
+ "subroutine at %d..%d",
+ i, iDest, iFirst, iLast);
+ break;
+ }
+ }
+ }
+ }
+ if( pErr ){
+ char *zErr = sqlite3_str_finish(pErr);
+ sqlite3VdbeAddOp4(v, OP_Halt, SQLITE_INTERNAL, OE_Abort, 0, zErr, 0);
+ sqlite3_free(zErr);
+ sqlite3MayAbort(pParse);
+ }
+}
+#endif /* SQLITE_DEBUG */
+
/*
** Return the address of the next instruction to be inserted.
*/
SQLITE_PRIVATE int sqlite3VdbeCurrentAddr(Vdbe *p){
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT );
+ assert( p->eVdbeState==VDBE_INIT_STATE );
return p->nOp;
}
@@ -80786,7 +83078,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(
int i;
VdbeOp *pOut, *pFirst;
assert( nOp>0 );
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT );
+ assert( p->eVdbeState==VDBE_INIT_STATE );
if( p->nOp + nOp > p->nOpAlloc && growOpArray(p, nOp) ){
return 0;
}
@@ -80854,15 +83146,19 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus(
** for a specific instruction.
*/
SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe *p, int addr, u8 iNewOpcode){
+ assert( addr>=0 );
sqlite3VdbeGetOp(p,addr)->opcode = iNewOpcode;
}
SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, int addr, int val){
+ assert( addr>=0 );
sqlite3VdbeGetOp(p,addr)->p1 = val;
}
SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, int addr, int val){
+ assert( addr>=0 || p->db->mallocFailed );
sqlite3VdbeGetOp(p,addr)->p2 = val;
}
SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, int addr, int val){
+ assert( addr>=0 );
sqlite3VdbeGetOp(p,addr)->p3 = val;
}
SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){
@@ -80870,6 +83166,18 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){
if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5;
}
+/*
+** If the previous opcode is an OP_Column that delivers results
+** into register iDest, then add the OPFLAG_TYPEOFARG flag to that
+** opcode.
+*/
+SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe *p, int iDest){
+ VdbeOp *pOp = sqlite3VdbeGetLastOp(p);
+ if( pOp->p3==iDest && pOp->opcode==OP_Column ){
+ pOp->p5 |= OPFLAG_TYPEOFARG;
+ }
+}
+
/*
** Change the P2 operand of instruction addr so that it points to
** the address of the next instruction to be coded.
@@ -80898,7 +83206,7 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){
|| p->aOp[addr].opcode==OP_FkIfZero );
assert( p->aOp[addr].p4type==0 );
#ifdef SQLITE_VDBE_COVERAGE
- sqlite3VdbeGetOp(p,-1)->iSrcLine = 0; /* Erase VdbeCoverage() macros */
+ sqlite3VdbeGetLastOp(p)->iSrcLine = 0; /* Erase VdbeCoverage() macros */
#endif
p->nOp--;
}else{
@@ -80912,8 +83220,9 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){
** the FuncDef is not ephermal, then do nothing.
*/
static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){
+ assert( db!=0 );
if( (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){
- sqlite3DbFreeNN(db, pDef);
+ sqlite3DbNNFreeNN(db, pDef);
}
}
@@ -80922,11 +83231,12 @@ static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){
*/
static SQLITE_NOINLINE void freeP4Mem(sqlite3 *db, Mem *p){
if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc);
- sqlite3DbFreeNN(db, p);
+ sqlite3DbNNFreeNN(db, p);
}
static SQLITE_NOINLINE void freeP4FuncCtx(sqlite3 *db, sqlite3_context *p){
+ assert( db!=0 );
freeEphemeralFunction(db, p->pFunc);
- sqlite3DbFreeNN(db, p);
+ sqlite3DbNNFreeNN(db, p);
}
static void freeP4(sqlite3 *db, int p4type, void *p4){
assert( db );
@@ -80938,9 +83248,8 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
case P4_REAL:
case P4_INT64:
case P4_DYNAMIC:
- case P4_DYNBLOB:
case P4_INTARRAY: {
- sqlite3DbFree(db, p4);
+ if( p4 ) sqlite3DbNNFreeNN(db, p4);
break;
}
case P4_KEYINFO: {
@@ -80978,15 +83287,19 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
** nOp entries.
*/
static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){
+ assert( nOp>=0 );
+ assert( db!=0 );
if( aOp ){
- Op *pOp;
- for(pOp=&aOp[nOp-1]; pOp>=aOp; pOp--){
+ Op *pOp = &aOp[nOp-1];
+ while(1){ /* Exit via break */
if( pOp->p4type <= P4_FREE_IF_LE ) freeP4(db, pOp->p4type, pOp->p4.p);
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
sqlite3DbFree(db, pOp->zComment);
#endif
+ if( pOp==aOp ) break;
+ pOp--;
}
- sqlite3DbFreeNN(db, aOp);
+ sqlite3DbNNFreeNN(db, aOp);
}
}
@@ -81046,7 +83359,7 @@ SQLITE_PRIVATE void sqlite3VdbeReleaseRegisters(
u32 mask, /* Mask of registers to NOT release */
int bUndefine /* If true, mark registers as undefined */
){
- if( N==0 ) return;
+ if( N==0 || OptimizationDisabled(pParse->db, SQLITE_ReleaseReg) ) return;
assert( pParse->pVdbe );
assert( iFirst>=1 );
assert( iFirst+N-1<=pParse->nMem );
@@ -81110,7 +83423,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int
sqlite3 *db;
assert( p!=0 );
db = p->db;
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT );
+ assert( p->eVdbeState==VDBE_INIT_STATE );
assert( p->aOp!=0 || db->mallocFailed );
if( db->mallocFailed ){
if( n!=P4_VTAB ) freeP4(db, n, (void*)*(char**)&zP4);
@@ -81155,7 +83468,7 @@ SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){
if( p->db->mallocFailed ){
freeP4(p->db, n, pP4);
}else{
- assert( pP4!=0 );
+ assert( pP4!=0 || n==P4_DYNAMIC );
assert( p->nOp>0 );
pOp = &p->aOp[p->nOp-1];
assert( pOp->p4type==P4_NOTUSED );
@@ -81217,13 +83530,13 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){
** Set the value if the iSrcLine field for the previously coded instruction.
*/
SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){
- sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine;
+ sqlite3VdbeGetLastOp(v)->iSrcLine = iLine;
}
#endif /* SQLITE_VDBE_COVERAGE */
/*
-** Return the opcode for a given address. If the address is -1, then
-** return the most recently inserted opcode.
+** Return the opcode for a given address. The address must be non-negative.
+** See sqlite3VdbeGetLastOp() to get the most recently added opcode.
**
** If a memory allocation error has occurred prior to the calling of this
** routine, then a pointer to a dummy VdbeOp will be returned. That opcode
@@ -81238,10 +83551,7 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){
/* C89 specifies that the constant "dummy" will be initialized to all
** zeros, which is correct. MSVC generates a warning, nevertheless. */
static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT );
- if( addr<0 ){
- addr = p->nOp - 1;
- }
+ assert( p->eVdbeState==VDBE_INIT_STATE );
assert( (addr>=0 && addrnOp) || p->db->mallocFailed );
if( p->db->mallocFailed ){
return (VdbeOp*)&dummy;
@@ -81250,6 +83560,12 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){
}
}
+/* Return the most recently added opcode
+*/
+VdbeOp * sqlite3VdbeGetLastOp(Vdbe *p){
+ return sqlite3VdbeGetOp(p, p->nOp - 1);
+}
+
#if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS)
/*
** Return an integer value for one of the parameters to the opcode pOp
@@ -81305,8 +83621,11 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayComment(
if( c=='4' ){
sqlite3_str_appendall(&x, zP4);
}else if( c=='X' ){
- sqlite3_str_appendall(&x, pOp->zComment);
- seenCom = 1;
+ if( pOp->zComment && pOp->zComment[0] ){
+ sqlite3_str_appendall(&x, pOp->zComment);
+ seenCom = 1;
+ break;
+ }
}else{
int v1 = translateP(c, pOp);
int v2;
@@ -81535,10 +83854,6 @@ SQLITE_PRIVATE char *sqlite3VdbeDisplayP4(sqlite3 *db, Op *pOp){
zP4 = "program";
break;
}
- case P4_DYNBLOB:
- case P4_ADVANCE: {
- break;
- }
case P4_TABLE: {
zP4 = pOp->p4.pTab->zName;
break;
@@ -81670,21 +83985,40 @@ SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE *pOut, int pc, VdbeOp *pOp){
/*
** Initialize an array of N Mem element.
+**
+** This is a high-runner, so only those fields that really do need to
+** be initialized are set. The Mem structure is organized so that
+** the fields that get initialized are nearby and hopefully on the same
+** cache line.
+**
+** Mem.flags = flags
+** Mem.db = db
+** Mem.szMalloc = 0
+**
+** All other fields of Mem can safely remain uninitialized for now. They
+** will be initialized before use.
*/
static void initMemArray(Mem *p, int N, sqlite3 *db, u16 flags){
- while( (N--)>0 ){
- p->db = db;
- p->flags = flags;
- p->szMalloc = 0;
+ if( N>0 ){
+ do{
+ p->flags = flags;
+ p->db = db;
+ p->szMalloc = 0;
#ifdef SQLITE_DEBUG
- p->pScopyFrom = 0;
+ p->pScopyFrom = 0;
#endif
- p++;
+ p++;
+ }while( (--N)>0 );
}
}
/*
-** Release an array of N Mem elements
+** Release auxiliary memory held in an array of N Mem elements.
+**
+** After this routine returns, all Mem elements in the array will still
+** be valid. Those Mem elements that were not holding auxiliary resources
+** will be unchanged. Mem elements which had something freed will be
+** set to MEM_Undefined.
*/
static void releaseMemArray(Mem *p, int N){
if( p && N ){
@@ -81717,12 +84051,17 @@ static void releaseMemArray(Mem *p, int N){
if( p->flags&(MEM_Agg|MEM_Dyn) ){
testcase( (p->flags & MEM_Dyn)!=0 && p->xDel==sqlite3VdbeFrameMemDel );
sqlite3VdbeMemRelease(p);
+ p->flags = MEM_Undefined;
}else if( p->szMalloc ){
- sqlite3DbFreeNN(db, p->zMalloc);
+ sqlite3DbNNFreeNN(db, p->zMalloc);
p->szMalloc = 0;
+ p->flags = MEM_Undefined;
}
-
- p->flags = MEM_Undefined;
+#ifdef SQLITE_DEBUG
+ else{
+ p->flags = MEM_Undefined;
+ }
+#endif
}while( (++p)nChildMem];
assert( sqlite3VdbeFrameIsValid(p) );
for(i=0; inChildCsr; i++){
- sqlite3VdbeFreeCursor(p->v, apCsr[i]);
+ if( apCsr[i] ) sqlite3VdbeFreeCursorNN(p->v, apCsr[i]);
}
releaseMemArray(aMem, p->nChildMem);
sqlite3VdbeDeleteAuxData(p->v->db, &p->pAuxData, -1, 0);
@@ -81920,7 +84259,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
Op *pOp; /* Current opcode */
assert( p->explain );
- assert( p->iVdbeMagic==VDBE_MAGIC_RUN );
+ assert( p->eVdbeState==VDBE_RUN_STATE );
assert( p->rc==SQLITE_OK || p->rc==SQLITE_BUSY || p->rc==SQLITE_NOMEM );
/* Even though this opcode does not use dynamic strings for
@@ -82075,11 +84414,11 @@ struct ReusableSpace {
static void *allocSpace(
struct ReusableSpace *p, /* Bulk memory available for allocation */
void *pBuf, /* Pointer to a prior allocation */
- sqlite3_int64 nByte /* Bytes of memory needed */
+ sqlite3_int64 nByte /* Bytes of memory needed. */
){
assert( EIGHT_BYTE_ALIGNMENT(p->pSpace) );
if( pBuf==0 ){
- nByte = ROUND8(nByte);
+ nByte = ROUND8P(nByte);
if( nByte <= p->nFree ){
p->nFree -= nByte;
pBuf = &p->pSpace[p->nFree];
@@ -82100,14 +84439,15 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
int i;
#endif
assert( p!=0 );
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT || p->iVdbeMagic==VDBE_MAGIC_RESET );
+ assert( p->eVdbeState==VDBE_INIT_STATE
+ || p->eVdbeState==VDBE_READY_STATE
+ || p->eVdbeState==VDBE_HALT_STATE );
/* There should be at least one opcode.
*/
assert( p->nOp>0 );
- /* Set the magic to VDBE_MAGIC_RUN sooner rather than later. */
- p->iVdbeMagic = VDBE_MAGIC_RUN;
+ p->eVdbeState = VDBE_READY_STATE;
#ifdef SQLITE_DEBUG
for(i=0; inMem; i++){
@@ -82163,7 +84503,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
assert( p!=0 );
assert( p->nOp>0 );
assert( pParse!=0 );
- assert( p->iVdbeMagic==VDBE_MAGIC_INIT );
+ assert( p->eVdbeState==VDBE_INIT_STATE );
assert( pParse==p->pParse );
p->pVList = pParse->pVList;
pParse->pVList = 0;
@@ -82186,7 +84526,7 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
** opcode array. This extra memory will be reallocated for other elements
** of the prepared statement.
*/
- n = ROUND8(sizeof(Op)*p->nOp); /* Bytes of opcode memory used */
+ n = ROUND8P(sizeof(Op)*p->nOp); /* Bytes of opcode memory used */
x.pSpace = &((u8*)p->aOp)[n]; /* Unused opcode memory */
assert( EIGHT_BYTE_ALIGNMENT(x.pSpace) );
x.nFree = ROUNDDOWN8(pParse->szOpAlloc - n); /* Bytes of unused memory */
@@ -82274,9 +84614,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
** happens to hold.
*/
SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
- if( pCx==0 ){
- return;
- }
+ if( pCx ) sqlite3VdbeFreeCursorNN(p,pCx);
+}
+SQLITE_PRIVATE void sqlite3VdbeFreeCursorNN(Vdbe *p, VdbeCursor *pCx){
switch( pCx->eCurType ){
case CURTYPE_SORTER: {
sqlite3VdbeSorterClose(p->db, pCx);
@@ -82304,14 +84644,12 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
** Close all cursors in the current frame.
*/
static void closeCursorsInFrame(Vdbe *p){
- if( p->apCsr ){
- int i;
- for(i=0; inCursor; i++){
- VdbeCursor *pC = p->apCsr[i];
- if( pC ){
- sqlite3VdbeFreeCursor(p, pC);
- p->apCsr[i] = 0;
- }
+ int i;
+ for(i=0; inCursor; i++){
+ VdbeCursor *pC = p->apCsr[i];
+ if( pC ){
+ sqlite3VdbeFreeCursorNN(p, pC);
+ p->apCsr[i] = 0;
}
}
}
@@ -82360,9 +84698,7 @@ static void closeAllCursors(Vdbe *p){
}
assert( p->nFrame==0 );
closeCursorsInFrame(p);
- if( p->aMem ){
- releaseMemArray(p->aMem, p->nMem);
- }
+ releaseMemArray(p->aMem, p->nMem);
while( p->pDelFrame ){
VdbeFrame *pDel = p->pDelFrame;
p->pDelFrame = pDel->pParent;
@@ -82709,7 +85045,7 @@ static void checkActiveVdbeCnt(sqlite3 *db){
if( p->readOnly==0 ) nWrite++;
if( p->bIsReader ) nRead++;
}
- p = p->pNext;
+ p = p->pVNext;
}
assert( cnt==db->nVdbeActive );
assert( nWrite==db->nVdbeWrite );
@@ -82802,7 +85138,8 @@ SQLITE_PRIVATE int sqlite3VdbeCheckFk(Vdbe *p, int deferred){
p->rc = SQLITE_CONSTRAINT_FOREIGNKEY;
p->errorAction = OE_Abort;
sqlite3VdbeError(p, "FOREIGN KEY constraint failed");
- return SQLITE_ERROR;
+ if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ) return SQLITE_ERROR;
+ return SQLITE_CONSTRAINT_FOREIGNKEY;
}
return SQLITE_OK;
}
@@ -82841,9 +85178,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
** one, or the complete transaction if there is no statement transaction.
*/
- if( p->iVdbeMagic!=VDBE_MAGIC_RUN ){
- return SQLITE_OK;
- }
+ assert( p->eVdbeState==VDBE_RUN_STATE );
if( db->mallocFailed ){
p->rc = SQLITE_NOMEM_BKPT;
}
@@ -82852,7 +85187,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
/* No commit or rollback needed if the program never started or if the
** SQL statement does not read or write a database file. */
- if( p->pc>=0 && p->bIsReader ){
+ if( p->bIsReader ){
int mrc; /* Primary error code from p->rc */
int eStatementOp = 0;
int isSpecialError; /* Set to true if a 'special' error */
@@ -83000,15 +85335,13 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){
}
/* We have successfully halted and closed the VM. Record this fact. */
- if( p->pc>=0 ){
- db->nVdbeActive--;
- if( !p->readOnly ) db->nVdbeWrite--;
- if( p->bIsReader ) db->nVdbeRead--;
- assert( db->nVdbeActive>=db->nVdbeRead );
- assert( db->nVdbeRead>=db->nVdbeWrite );
- assert( db->nVdbeWrite>=0 );
- }
- p->iVdbeMagic = VDBE_MAGIC_HALT;
+ db->nVdbeActive--;
+ if( !p->readOnly ) db->nVdbeWrite--;
+ if( p->bIsReader ) db->nVdbeRead--;
+ assert( db->nVdbeActive>=db->nVdbeRead );
+ assert( db->nVdbeRead>=db->nVdbeWrite );
+ assert( db->nVdbeWrite>=0 );
+ p->eVdbeState = VDBE_HALT_STATE;
checkActiveVdbeCnt(db);
if( db->mallocFailed ){
p->rc = SQLITE_NOMEM_BKPT;
@@ -83090,8 +85423,8 @@ static void vdbeInvokeSqllog(Vdbe *v){
** again.
**
** To look at it another way, this routine resets the state of the
-** virtual machine from VDBE_MAGIC_RUN or VDBE_MAGIC_HALT back to
-** VDBE_MAGIC_INIT.
+** virtual machine from VDBE_RUN_STATE or VDBE_HALT_STATE back to
+** VDBE_READY_STATE.
*/
SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
@@ -83105,7 +85438,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
** error, then it might not have been halted properly. So halt
** it now.
*/
- sqlite3VdbeHalt(p);
+ if( p->eVdbeState==VDBE_RUN_STATE ) sqlite3VdbeHalt(p);
/* If the VDBE has been run even partially, then transfer the error code
** and error message from the VDBE into the main database structure. But
@@ -83119,13 +85452,6 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
}else{
db->errCode = p->rc;
}
- if( p->runOnlyOnce ) p->expired = 1;
- }else if( p->rc && p->expired ){
- /* The expired flag was set on the VDBE before the first call
- ** to sqlite3_step(). For consistency (since sqlite3_step() was
- ** called), set the database error in this case as well.
- */
- sqlite3ErrorWithMsg(db, p->rc, p->zErrMsg ? "%s" : 0, p->zErrMsg);
}
/* Reset register contents and reclaim error message memory.
@@ -83182,7 +85508,6 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
}
}
#endif
- p->iVdbeMagic = VDBE_MAGIC_RESET;
return p->rc & db->errMask;
}
@@ -83192,7 +85517,10 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
*/
SQLITE_PRIVATE int sqlite3VdbeFinalize(Vdbe *p){
int rc = SQLITE_OK;
- if( p->iVdbeMagic==VDBE_MAGIC_RUN || p->iVdbeMagic==VDBE_MAGIC_HALT ){
+ assert( VDBE_RUN_STATE>VDBE_READY_STATE );
+ assert( VDBE_HALT_STATE>VDBE_READY_STATE );
+ assert( VDBE_INIT_STATEeVdbeState>=VDBE_READY_STATE ){
rc = sqlite3VdbeReset(p);
assert( (rc & p->db->errMask)==rc );
}
@@ -83244,23 +85572,26 @@ SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp,
** VdbeDelete() also unlinks the Vdbe from the list of VMs associated with
** the database connection and frees the object itself.
*/
-SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
+static void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
SubProgram *pSub, *pNext;
+ assert( db!=0 );
assert( p->db==0 || p->db==db );
- releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ if( p->aColName ){
+ releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
+ sqlite3DbNNFreeNN(db, p->aColName);
+ }
for(pSub=p->pProgram; pSub; pSub=pNext){
pNext = pSub->pNext;
vdbeFreeOpArray(db, pSub->aOp, pSub->nOp);
sqlite3DbFree(db, pSub);
}
- if( p->iVdbeMagic!=VDBE_MAGIC_INIT ){
+ if( p->eVdbeState!=VDBE_INIT_STATE ){
releaseMemArray(p->aVar, p->nVar);
- sqlite3DbFree(db, p->pVList);
- sqlite3DbFree(db, p->pFree);
+ if( p->pVList ) sqlite3DbNNFreeNN(db, p->pVList);
+ if( p->pFree ) sqlite3DbNNFreeNN(db, p->pFree);
}
vdbeFreeOpArray(db, p->aOp, p->nOp);
- sqlite3DbFree(db, p->aColName);
- sqlite3DbFree(db, p->zSql);
+ if( p->zSql ) sqlite3DbNNFreeNN(db, p->zSql);
#ifdef SQLITE_ENABLE_NORMALIZE
sqlite3DbFree(db, p->zNormSql);
{
@@ -83290,20 +85621,17 @@ SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){
assert( p!=0 );
db = p->db;
+ assert( db!=0 );
assert( sqlite3_mutex_held(db->mutex) );
sqlite3VdbeClearObject(db, p);
- if( p->pPrev ){
- p->pPrev->pNext = p->pNext;
- }else{
- assert( db->pVdbe==p );
- db->pVdbe = p->pNext;
- }
- if( p->pNext ){
- p->pNext->pPrev = p->pPrev;
+ if( db->pnBytesFreed==0 ){
+ assert( p->ppVPrev!=0 );
+ *p->ppVPrev = p->pVNext;
+ if( p->pVNext ){
+ p->pVNext->ppVPrev = p->ppVPrev;
+ }
}
- p->iVdbeMagic = VDBE_MAGIC_DEAD;
- p->db = 0;
- sqlite3DbFreeNN(db, p);
+ sqlite3DbNNFreeNN(db, p);
}
/*
@@ -83337,7 +85665,7 @@ SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeFinishMoveto(VdbeCursor *p){
** is supposed to be pointing. If the row was deleted out from under the
** cursor, set the cursor to point to a NULL row.
*/
-static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){
+SQLITE_PRIVATE int SQLITE_NOINLINE sqlite3VdbeHandleMovedCursor(VdbeCursor *p){
int isDifferentRow, rc;
assert( p->eCurType==CURTYPE_BTREE );
assert( p->uc.pCursor!=0 );
@@ -83353,41 +85681,9 @@ static int SQLITE_NOINLINE handleMovedCursor(VdbeCursor *p){
** if need be. Return any I/O error from the restore operation.
*/
SQLITE_PRIVATE int sqlite3VdbeCursorRestore(VdbeCursor *p){
- assert( p->eCurType==CURTYPE_BTREE );
- if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){
- return handleMovedCursor(p);
- }
- return SQLITE_OK;
-}
-
-/*
-** Make sure the cursor p is ready to read or write the row to which it
-** was last positioned. Return an error code if an OOM fault or I/O error
-** prevents us from positioning the cursor to its correct position.
-**
-** If a MoveTo operation is pending on the given cursor, then do that
-** MoveTo now. If no move is pending, check to see if the row has been
-** deleted out from under the cursor and if it has, mark the row as
-** a NULL row.
-**
-** If the cursor is already pointing to the correct row and that row has
-** not been deleted out from under the cursor, then this routine is a no-op.
-*/
-SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, u32 *piCol){
- VdbeCursor *p = *pp;
- assert( p->eCurType==CURTYPE_BTREE || p->eCurType==CURTYPE_PSEUDO );
- if( p->deferredMoveto ){
- u32 iMap;
- assert( !p->isEphemeral );
- if( p->ub.aAltMap && (iMap = p->ub.aAltMap[1+*piCol])>0 && !p->nullRow ){
- *pp = p->pAltCursor;
- *piCol = iMap - 1;
- return SQLITE_OK;
- }
- return sqlite3VdbeFinishMoveto(p);
- }
+ assert( p->eCurType==CURTYPE_BTREE || IsNullCursor(p) );
if( sqlite3BtreeCursorHasMoved(p->uc.pCursor) ){
- return handleMovedCursor(p);
+ return sqlite3VdbeHandleMovedCursor(p);
}
return SQLITE_OK;
}
@@ -83398,7 +85694,7 @@ SQLITE_PRIVATE int sqlite3VdbeCursorMoveto(VdbeCursor **pp, u32 *piCol){
** sqlite3VdbeSerialType()
** sqlite3VdbeSerialTypeLen()
** sqlite3VdbeSerialLen()
-** sqlite3VdbeSerialPut()
+** sqlite3VdbeSerialPut() <--- in-lined into OP_MakeRecord as of 2022-04-02
** sqlite3VdbeSerialGet()
**
** encapsulate the code that serializes values for storage in SQLite
@@ -83510,7 +85806,7 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialType(Mem *pMem, int file_format, u32 *pLen){
/*
** The sizes for serial types less than 128
*/
-static const u8 sqlite3SmallTypeSizes[] = {
+SQLITE_PRIVATE const u8 sqlite3SmallTypeSizes[128] = {
/* 0 1 2 3 4 5 6 7 8 9 */
/* 0 */ 0, 1, 2, 3, 4, 6, 8, 8, 0, 0,
/* 10 */ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
@@ -83579,7 +85875,7 @@ SQLITE_PRIVATE u8 sqlite3VdbeOneByteSerialTypeLen(u8 serial_type){
** so we trust him.
*/
#ifdef SQLITE_MIXED_ENDIAN_64BIT_FLOAT
-static u64 floatSwap(u64 in){
+SQLITE_PRIVATE u64 sqlite3FloatSwap(u64 in){
union {
u64 r;
u32 i[2];
@@ -83592,59 +85888,8 @@ static u64 floatSwap(u64 in){
u.i[1] = t;
return u.r;
}
-# define swapMixedEndianFloat(X) X = floatSwap(X)
-#else
-# define swapMixedEndianFloat(X)
-#endif
-
-/*
-** Write the serialized data blob for the value stored in pMem into
-** buf. It is assumed that the caller has allocated sufficient space.
-** Return the number of bytes written.
-**
-** nBuf is the amount of space left in buf[]. The caller is responsible
-** for allocating enough space to buf[] to hold the entire field, exclusive
-** of the pMem->u.nZero bytes for a MEM_Zero value.
-**
-** Return the number of bytes actually written into buf[]. The number
-** of bytes in the zero-filled tail is included in the return value only
-** if those bytes were zeroed in buf[].
-*/
-SQLITE_PRIVATE u32 sqlite3VdbeSerialPut(u8 *buf, Mem *pMem, u32 serial_type){
- u32 len;
-
- /* Integer and Real */
- if( serial_type<=7 && serial_type>0 ){
- u64 v;
- u32 i;
- if( serial_type==7 ){
- assert( sizeof(v)==sizeof(pMem->u.r) );
- memcpy(&v, &pMem->u.r, sizeof(v));
- swapMixedEndianFloat(v);
- }else{
- v = pMem->u.i;
- }
- len = i = sqlite3SmallTypeSizes[serial_type];
- assert( i>0 );
- do{
- buf[--i] = (u8)(v&0xFF);
- v >>= 8;
- }while( i );
- return len;
- }
-
- /* String or blob */
- if( serial_type>=12 ){
- assert( pMem->n + ((pMem->flags & MEM_Zero)?pMem->u.nZero:0)
- == (int)sqlite3VdbeSerialTypeLen(serial_type) );
- len = pMem->n;
- if( len>0 ) memcpy(buf, pMem->z, len);
- return len;
- }
+#endif /* SQLITE_MIXED_ENDIAN_64BIT_FLOAT */
- /* NULL or constants 0 or 1 */
- return 0;
-}
/* Input "x" is a sequence of unsigned characters that represent a
** big-endian integer. Return the equivalent native integer
@@ -83810,10 +86055,10 @@ SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(
){
UnpackedRecord *p; /* Unpacked record to return */
int nByte; /* Number of bytes required for *p */
- nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1);
+ nByte = ROUND8P(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nKeyField+1);
p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte);
if( !p ) return 0;
- p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))];
+ p->aMem = (Mem*)&((char*)p)[ROUND8P(sizeof(UnpackedRecord))];
assert( pKeyInfo->aSortFlags!=0 );
p->pKeyInfo = pKeyInfo;
p->nField = pKeyInfo->nKeyField + 1;
@@ -84049,8 +86294,8 @@ static int vdbeCompareMemString(
}else{
rc = pColl->xCmp(pColl->pUser, c1.n, v1, c2.n, v2);
}
- sqlite3VdbeMemRelease(&c1);
- sqlite3VdbeMemRelease(&c2);
+ sqlite3VdbeMemReleaseMalloc(&c1);
+ sqlite3VdbeMemReleaseMalloc(&c2);
return rc;
}
}
@@ -84311,14 +86556,22 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
** two elements in the keys are equal. Fix the various stack variables so
** that this routine begins comparing at the second field. */
if( bSkip ){
- u32 s1;
- idx1 = 1 + getVarint32(&aKey1[1], s1);
+ u32 s1 = aKey1[1];
+ if( s1<0x80 ){
+ idx1 = 2;
+ }else{
+ idx1 = 1 + sqlite3GetVarint32(&aKey1[1], &s1);
+ }
szHdr1 = aKey1[0];
d1 = szHdr1 + sqlite3VdbeSerialTypeLen(s1);
i = 1;
pRhs++;
}else{
- idx1 = getVarint32(aKey1, szHdr1);
+ if( (szHdr1 = aKey1[0])<0x80 ){
+ idx1 = 1;
+ }else{
+ idx1 = sqlite3GetVarint32(aKey1, &szHdr1);
+ }
d1 = szHdr1;
i = 0;
}
@@ -84333,7 +86586,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
assert( pPKey2->pKeyInfo->aSortFlags!=0 );
assert( pPKey2->pKeyInfo->nKeyField>0 );
assert( idx1<=szHdr1 || CORRUPT_DB );
- do{
+ while( 1 /*exit-by-break*/ ){
u32 serial_type;
/* RHS is an integer */
@@ -84343,7 +86596,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
serial_type = aKey1[idx1];
testcase( serial_type==12 );
if( serial_type>=10 ){
- rc = +1;
+ rc = serial_type==10 ? -1 : +1;
}else if( serial_type==0 ){
rc = -1;
}else if( serial_type==7 ){
@@ -84368,7 +86621,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
** numbers). Types 10 and 11 are currently "reserved for future
** use", so it doesn't really matter what the results of comparing
** them to numberic values are. */
- rc = +1;
+ rc = serial_type==10 ? -1 : +1;
}else if( serial_type==0 ){
rc = -1;
}else{
@@ -84449,7 +86702,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
/* RHS is null */
else{
serial_type = aKey1[idx1];
- rc = (serial_type!=0);
+ rc = (serial_type!=0 && serial_type!=10);
}
if( rc!=0 ){
@@ -84471,8 +86724,13 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(
if( i==pPKey2->nField ) break;
pRhs++;
d1 += sqlite3VdbeSerialTypeLen(serial_type);
+ if( d1>(unsigned)nKey1 ) break;
idx1 += sqlite3VarintLen(serial_type);
- }while( idx1<(unsigned)szHdr1 && d1<=(unsigned)nKey1 );
+ if( idx1>=(unsigned)szHdr1 ){
+ pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
+ return 0; /* Corrupt index */
+ }
+ }
/* No memory allocation is ever used on mem1. Prove this using
** the following assert(). If the assert() fails, it indicates a
@@ -84574,7 +86832,8 @@ static int vdbeRecordCompareInt(
return sqlite3VdbeRecordCompare(nKey1, pKey1, pPKey2);
}
- v = pPKey2->aMem[0].u.i;
+ assert( pPKey2->u.i == pPKey2->aMem[0].u.i );
+ v = pPKey2->u.i;
if( v>lhs ){
res = pPKey2->r1;
}else if( vaMem[0].flags & MEM_Str );
+ assert( pPKey2->aMem[0].n == pPKey2->n );
+ assert( pPKey2->aMem[0].z == pPKey2->u.z );
vdbeAssertFieldCountWithinLimits(nKey1, pKey1, pPKey2->pKeyInfo);
- serial_type = (u8)(aKey1[1]);
- if( serial_type >= 0x80 ){
- sqlite3GetVarint32(&aKey1[1], (u32*)&serial_type);
- }
+ serial_type = (signed char)(aKey1[1]);
+
+vrcs_restart:
if( serial_type<12 ){
+ if( serial_type<0 ){
+ sqlite3GetVarint32(&aKey1[1], (u32*)&serial_type);
+ if( serial_type>=12 ) goto vrcs_restart;
+ assert( CORRUPT_DB );
+ }
res = pPKey2->r1; /* (pKey1/nKey1) is a number or a null */
}else if( !(serial_type & 0x01) ){
res = pPKey2->r2; /* (pKey1/nKey1) is a blob */
@@ -84628,15 +86893,15 @@ static int vdbeRecordCompareString(
pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT;
return 0; /* Corruption */
}
- nCmp = MIN( pPKey2->aMem[0].n, nStr );
- res = memcmp(&aKey1[szHdr], pPKey2->aMem[0].z, nCmp);
+ nCmp = MIN( pPKey2->n, nStr );
+ res = memcmp(&aKey1[szHdr], pPKey2->u.z, nCmp);
if( res>0 ){
res = pPKey2->r2;
}else if( res<0 ){
res = pPKey2->r1;
}else{
- res = nStr - pPKey2->aMem[0].n;
+ res = nStr - pPKey2->n;
if( res==0 ){
if( pPKey2->nField>1 ){
res = sqlite3VdbeRecordCompareWithSkip(nKey1, pKey1, pPKey2, 1);
@@ -84691,6 +86956,7 @@ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){
p->r2 = 1;
}
if( (flags & MEM_Int) ){
+ p->u.i = p->aMem[0].u.i;
return vdbeRecordCompareInt;
}
testcase( flags & MEM_Real );
@@ -84700,6 +86966,8 @@ SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord *p){
&& p->pKeyInfo->aColl[0]==0
){
assert( flags & MEM_Str );
+ p->u.z = p->aMem[0].z;
+ p->n = p->aMem[0].n;
return vdbeRecordCompareString;
}
}
@@ -84772,14 +87040,14 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
/* Fetch the integer off the end of the index record */
sqlite3VdbeSerialGet((u8*)&m.z[m.n-lenRowid], typeRowid, &v);
*rowid = v.u.i;
- sqlite3VdbeMemRelease(&m);
+ sqlite3VdbeMemReleaseMalloc(&m);
return SQLITE_OK;
/* Jump here if database corruption is detected after m has been
** allocated. Free the m object and return SQLITE_CORRUPT. */
idx_rowid_corruption:
testcase( m.szMalloc!=0 );
- sqlite3VdbeMemRelease(&m);
+ sqlite3VdbeMemReleaseMalloc(&m);
return SQLITE_CORRUPT_BKPT;
}
@@ -84821,7 +87089,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
return rc;
}
*res = sqlite3VdbeRecordCompareWithSkip(m.n, m.z, pUnpacked, 0);
- sqlite3VdbeMemRelease(&m);
+ sqlite3VdbeMemReleaseMalloc(&m);
return SQLITE_OK;
}
@@ -84863,7 +87131,7 @@ SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){
*/
SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db, int iCode){
Vdbe *p;
- for(p = db->pVdbe; p; p=p->pNext){
+ for(p = db->pVdbe; p; p=p->pVNext){
p->expired = iCode+1;
}
}
@@ -84984,13 +87252,14 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){
** the vdbeUnpackRecord() function found in vdbeapi.c.
*/
static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){
+ assert( db!=0 );
if( p ){
int i;
for(i=0; iaMem[i];
- if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem);
+ if( pMem->zMalloc ) sqlite3VdbeMemReleaseMalloc(pMem);
}
- sqlite3DbFreeNN(db, p);
+ sqlite3DbNNFreeNN(db, p);
}
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
@@ -85061,7 +87330,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
for(i=0; inField; i++){
sqlite3VdbeMemRelease(&preupdate.aNew[i]);
}
- sqlite3DbFreeNN(db, preupdate.aNew);
+ sqlite3DbNNFreeNN(db, preupdate.aNew);
}
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
@@ -85178,7 +87447,9 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){
if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT;
sqlite3_mutex_enter(db->mutex);
checkProfileCallback(db, v);
- rc = sqlite3VdbeFinalize(v);
+ assert( v->eVdbeState>=VDBE_READY_STATE );
+ rc = sqlite3VdbeReset(v);
+ sqlite3VdbeDelete(v);
rc = sqlite3ApiExit(db, rc);
sqlite3LeaveMutexAndCloseZombie(db);
}
@@ -85386,6 +87657,9 @@ SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){
#endif
return aType[pVal->flags&MEM_AffMask];
}
+SQLITE_API int sqlite3_value_encoding(sqlite3_value *pVal){
+ return pVal->enc;
+}
/* Return true if a parameter to xUpdate represents an unchanged column */
SQLITE_API int sqlite3_value_nochange(sqlite3_value *pVal){
@@ -85415,6 +87689,9 @@ SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value *pOrig){
sqlite3ValueFree(pNew);
pNew = 0;
}
+ }else if( pNew->flags & MEM_Null ){
+ /* Do not duplicate pointer values */
+ pNew->flags &= ~(MEM_Term|MEM_Subtype);
}
return pNew;
}
@@ -85445,7 +87722,8 @@ static void setResultStrOrError(
u8 enc, /* Encoding of z. 0 for BLOBs */
void (*xDel)(void*) /* Destructor function */
){
- int rc = sqlite3VdbeMemSetStr(pCtx->pOut, z, n, enc, xDel);
+ Mem *pOut = pCtx->pOut;
+ int rc = sqlite3VdbeMemSetStr(pOut, z, n, enc, xDel);
if( rc ){
if( rc==SQLITE_TOOBIG ){
sqlite3_result_error_toobig(pCtx);
@@ -85455,6 +87733,11 @@ static void setResultStrOrError(
assert( rc==SQLITE_NOMEM );
sqlite3_result_error_nomem(pCtx);
}
+ return;
+ }
+ sqlite3VdbeChangeEncoding(pOut, pCtx->enc);
+ if( sqlite3VdbeMemTooBig(pOut) ){
+ sqlite3_result_error_toobig(pCtx);
}
}
static int invokeValueDestructor(
@@ -85598,17 +87881,22 @@ SQLITE_API void sqlite3_result_text16le(
}
#endif /* SQLITE_OMIT_UTF16 */
SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
+ Mem *pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemCopy(pCtx->pOut, pValue);
+ sqlite3VdbeMemCopy(pOut, pValue);
+ sqlite3VdbeChangeEncoding(pOut, pCtx->enc);
+ if( sqlite3VdbeMemTooBig(pOut) ){
+ sqlite3_result_error_toobig(pCtx);
+ }
}
SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){
- assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- sqlite3VdbeMemSetZeroBlob(pCtx->pOut, n);
+ sqlite3_result_zeroblob64(pCtx, n>0 ? n : 0);
}
SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){
Mem *pOut = pCtx->pOut;
assert( sqlite3_mutex_held(pOut->db->mutex) );
if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){
+ sqlite3_result_error_toobig(pCtx);
return SQLITE_TOOBIG;
}
#ifndef SQLITE_OMIT_INCRBLOB
@@ -85624,8 +87912,8 @@ SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){
if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode;
#endif
if( pCtx->pOut->flags & MEM_Null ){
- sqlite3VdbeMemSetStr(pCtx->pOut, sqlite3ErrStr(errCode), -1,
- SQLITE_UTF8, SQLITE_STATIC);
+ setResultStrOrError(pCtx, sqlite3ErrStr(errCode), -1, SQLITE_UTF8,
+ SQLITE_STATIC);
}
}
@@ -85699,80 +87987,83 @@ static int sqlite3Step(Vdbe *p){
int rc;
assert(p);
- if( p->iVdbeMagic!=VDBE_MAGIC_RUN ){
- /* We used to require that sqlite3_reset() be called before retrying
- ** sqlite3_step() after any error or after SQLITE_DONE. But beginning
- ** with version 3.7.0, we changed this so that sqlite3_reset() would
- ** be called automatically instead of throwing the SQLITE_MISUSE error.
- ** This "automatic-reset" change is not technically an incompatibility,
- ** since any application that receives an SQLITE_MISUSE is broken by
- ** definition.
- **
- ** Nevertheless, some published applications that were originally written
- ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE
- ** returns, and those were broken by the automatic-reset change. As a
- ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the
- ** legacy behavior of returning SQLITE_MISUSE for cases where the
- ** previous sqlite3_step() returned something other than a SQLITE_LOCKED
- ** or SQLITE_BUSY error.
- */
-#ifdef SQLITE_OMIT_AUTORESET
- if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){
- sqlite3_reset((sqlite3_stmt*)p);
- }else{
- return SQLITE_MISUSE_BKPT;
- }
-#else
- sqlite3_reset((sqlite3_stmt*)p);
-#endif
- }
-
- /* Check that malloc() has not failed. If it has, return early. */
db = p->db;
- if( db->mallocFailed ){
- p->rc = SQLITE_NOMEM;
- return SQLITE_NOMEM_BKPT;
- }
+ if( p->eVdbeState!=VDBE_RUN_STATE ){
+ restart_step:
+ if( p->eVdbeState==VDBE_READY_STATE ){
+ if( p->expired ){
+ p->rc = SQLITE_SCHEMA;
+ rc = SQLITE_ERROR;
+ if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 ){
+ /* If this statement was prepared using saved SQL and an
+ ** error has occurred, then return the error code in p->rc to the
+ ** caller. Set the error code in the database handle to the same
+ ** value.
+ */
+ rc = sqlite3VdbeTransferError(p);
+ }
+ goto end_of_step;
+ }
- if( p->pc<0 && p->expired ){
- p->rc = SQLITE_SCHEMA;
- rc = SQLITE_ERROR;
- if( (p->prepFlags & SQLITE_PREPARE_SAVESQL)!=0 ){
- /* If this statement was prepared using saved SQL and an
- ** error has occurred, then return the error code in p->rc to the
- ** caller. Set the error code in the database handle to the same value.
+ /* If there are no other statements currently running, then
+ ** reset the interrupt flag. This prevents a call to sqlite3_interrupt
+ ** from interrupting a statement that has not yet started.
*/
- rc = sqlite3VdbeTransferError(p);
- }
- goto end_of_step;
- }
- if( p->pc<0 ){
- /* If there are no other statements currently running, then
- ** reset the interrupt flag. This prevents a call to sqlite3_interrupt
- ** from interrupting a statement that has not yet started.
- */
- if( db->nVdbeActive==0 ){
- AtomicStore(&db->u1.isInterrupted, 0);
- }
+ if( db->nVdbeActive==0 ){
+ AtomicStore(&db->u1.isInterrupted, 0);
+ }
- assert( db->nVdbeWrite>0 || db->autoCommit==0
- || (db->nDeferredCons==0 && db->nDeferredImmCons==0)
- );
+ assert( db->nVdbeWrite>0 || db->autoCommit==0
+ || (db->nDeferredCons==0 && db->nDeferredImmCons==0)
+ );
#ifndef SQLITE_OMIT_TRACE
- if( (db->mTrace & (SQLITE_TRACE_PROFILE|SQLITE_TRACE_XPROFILE))!=0
- && !db->init.busy && p->zSql ){
- sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime);
- }else{
- assert( p->startTime==0 );
- }
+ if( (db->mTrace & (SQLITE_TRACE_PROFILE|SQLITE_TRACE_XPROFILE))!=0
+ && !db->init.busy && p->zSql ){
+ sqlite3OsCurrentTimeInt64(db->pVfs, &p->startTime);
+ }else{
+ assert( p->startTime==0 );
+ }
#endif
- db->nVdbeActive++;
- if( p->readOnly==0 ) db->nVdbeWrite++;
- if( p->bIsReader ) db->nVdbeRead++;
- p->pc = 0;
+ db->nVdbeActive++;
+ if( p->readOnly==0 ) db->nVdbeWrite++;
+ if( p->bIsReader ) db->nVdbeRead++;
+ p->pc = 0;
+ p->eVdbeState = VDBE_RUN_STATE;
+ }else
+
+ if( ALWAYS(p->eVdbeState==VDBE_HALT_STATE) ){
+ /* We used to require that sqlite3_reset() be called before retrying
+ ** sqlite3_step() after any error or after SQLITE_DONE. But beginning
+ ** with version 3.7.0, we changed this so that sqlite3_reset() would
+ ** be called automatically instead of throwing the SQLITE_MISUSE error.
+ ** This "automatic-reset" change is not technically an incompatibility,
+ ** since any application that receives an SQLITE_MISUSE is broken by
+ ** definition.
+ **
+ ** Nevertheless, some published applications that were originally written
+ ** for version 3.6.23 or earlier do in fact depend on SQLITE_MISUSE
+ ** returns, and those were broken by the automatic-reset change. As a
+ ** a work-around, the SQLITE_OMIT_AUTORESET compile-time restores the
+ ** legacy behavior of returning SQLITE_MISUSE for cases where the
+ ** previous sqlite3_step() returned something other than a SQLITE_LOCKED
+ ** or SQLITE_BUSY error.
+ */
+#ifdef SQLITE_OMIT_AUTORESET
+ if( (rc = p->rc&0xff)==SQLITE_BUSY || rc==SQLITE_LOCKED ){
+ sqlite3_reset((sqlite3_stmt*)p);
+ }else{
+ return SQLITE_MISUSE_BKPT;
+ }
+#else
+ sqlite3_reset((sqlite3_stmt*)p);
+#endif
+ assert( p->eVdbeState==VDBE_READY_STATE );
+ goto restart_step;
+ }
}
+
#ifdef SQLITE_DEBUG
p->rcApp = SQLITE_OK;
#endif
@@ -85787,7 +88078,12 @@ static int sqlite3Step(Vdbe *p){
db->nVdbeExec--;
}
- if( rc!=SQLITE_ROW ){
+ if( rc==SQLITE_ROW ){
+ assert( p->rc==SQLITE_OK );
+ assert( db->mallocFailed==0 );
+ db->errCode = SQLITE_ROW;
+ return SQLITE_ROW;
+ }else{
#ifndef SQLITE_OMIT_TRACE
/* If the statement completed successfully, invoke the profile callback */
checkProfileCallback(db, p);
@@ -85839,7 +88135,6 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
}
db = v->db;
sqlite3_mutex_enter(db->mutex);
- v->doingRerun = 0;
while( (rc = sqlite3Step(v))==SQLITE_SCHEMA
&& cnt++ < SQLITE_MAX_SCHEMA_RETRY ){
int savedPc = v->pc;
@@ -85865,7 +88160,13 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){
break;
}
sqlite3_reset(pStmt);
- if( savedPc>=0 ) v->doingRerun = 1;
+ if( savedPc>=0 ){
+ /* Setting minWriteFileFormat to 254 is a signal to the OP_Init and
+ ** OP_Trace opcodes to *not* perform SQLITE_TRACE_STMT because it has
+ ** already been done once on a prior invocation that failed due to
+ ** SQLITE_SCHEMA. tag-20220401a */
+ v->minWriteFileFormat = 254;
+ }
assert( v->expired==0 );
}
sqlite3_mutex_leave(db->mutex);
@@ -86174,15 +88475,15 @@ static const Mem *columnNullValue(void){
#endif
= {
/* .u = */ {0},
+ /* .z = */ (char*)0,
+ /* .n = */ (int)0,
/* .flags = */ (u16)MEM_Null,
/* .enc = */ (u8)0,
/* .eSubtype = */ (u8)0,
- /* .n = */ (int)0,
- /* .z = */ (char*)0,
- /* .zMalloc = */ (char*)0,
+ /* .db = */ (sqlite3*)0,
/* .szMalloc = */ (int)0,
/* .uTemp = */ (u32)0,
- /* .db = */ (sqlite3*)0,
+ /* .zMalloc = */ (char*)0,
/* .xDel = */ (void(*)(void*))0,
#ifdef SQLITE_DEBUG
/* .pScopyFrom = */ (Mem*)0,
@@ -86473,25 +88774,24 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){
** The error code stored in database p->db is overwritten with the return
** value in any case.
*/
-static int vdbeUnbind(Vdbe *p, int i){
+static int vdbeUnbind(Vdbe *p, unsigned int i){
Mem *pVar;
if( vdbeSafetyNotNull(p) ){
return SQLITE_MISUSE_BKPT;
}
sqlite3_mutex_enter(p->db->mutex);
- if( p->iVdbeMagic!=VDBE_MAGIC_RUN || p->pc>=0 ){
+ if( p->eVdbeState!=VDBE_READY_STATE ){
sqlite3Error(p->db, SQLITE_MISUSE);
sqlite3_mutex_leave(p->db->mutex);
sqlite3_log(SQLITE_MISUSE,
"bind on a busy prepared statement: [%s]", p->zSql);
return SQLITE_MISUSE_BKPT;
}
- if( i<1 || i>p->nVar ){
+ if( i>=(unsigned int)p->nVar ){
sqlite3Error(p->db, SQLITE_RANGE);
sqlite3_mutex_leave(p->db->mutex);
return SQLITE_RANGE;
}
- i--;
pVar = &p->aVar[i];
sqlite3VdbeMemRelease(pVar);
pVar->flags = MEM_Null;
@@ -86528,7 +88828,7 @@ static int bindText(
Mem *pVar;
int rc;
- rc = vdbeUnbind(p, i);
+ rc = vdbeUnbind(p, (u32)(i-1));
if( rc==SQLITE_OK ){
if( zData!=0 ){
pVar = &p->aVar[i-1];
@@ -86577,7 +88877,7 @@ SQLITE_API int sqlite3_bind_blob64(
SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){
int rc;
Vdbe *p = (Vdbe *)pStmt;
- rc = vdbeUnbind(p, i);
+ rc = vdbeUnbind(p, (u32)(i-1));
if( rc==SQLITE_OK ){
sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue);
sqlite3_mutex_leave(p->db->mutex);
@@ -86590,7 +88890,7 @@ SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){
SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){
int rc;
Vdbe *p = (Vdbe *)pStmt;
- rc = vdbeUnbind(p, i);
+ rc = vdbeUnbind(p, (u32)(i-1));
if( rc==SQLITE_OK ){
sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue);
sqlite3_mutex_leave(p->db->mutex);
@@ -86600,7 +88900,7 @@ SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValu
SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){
int rc;
Vdbe *p = (Vdbe*)pStmt;
- rc = vdbeUnbind(p, i);
+ rc = vdbeUnbind(p, (u32)(i-1));
if( rc==SQLITE_OK ){
sqlite3_mutex_leave(p->db->mutex);
}
@@ -86615,7 +88915,7 @@ SQLITE_API int sqlite3_bind_pointer(
){
int rc;
Vdbe *p = (Vdbe*)pStmt;
- rc = vdbeUnbind(p, i);
+ rc = vdbeUnbind(p, (u32)(i-1));
if( rc==SQLITE_OK ){
sqlite3VdbeMemSetPointer(&p->aVar[i-1], pPtr, zPTtype, xDestructor);
sqlite3_mutex_leave(p->db->mutex);
@@ -86693,7 +88993,7 @@ SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_valu
SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){
int rc;
Vdbe *p = (Vdbe *)pStmt;
- rc = vdbeUnbind(p, i);
+ rc = vdbeUnbind(p, (u32)(i-1));
if( rc==SQLITE_OK ){
#ifndef SQLITE_OMIT_INCRBLOB
sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n);
@@ -86832,7 +89132,7 @@ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){
*/
SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt *pStmt){
Vdbe *v = (Vdbe*)pStmt;
- return v!=0 && v->iVdbeMagic==VDBE_MAGIC_RUN && v->pc>=0;
+ return v!=0 && v->eVdbeState==VDBE_RUN_STATE;
}
/*
@@ -86853,7 +89153,7 @@ SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){
if( pStmt==0 ){
pNext = (sqlite3_stmt*)pDb->pVdbe;
}else{
- pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext;
+ pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pVNext;
}
sqlite3_mutex_leave(pDb->mutex);
return pNext;
@@ -86878,9 +89178,11 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){
sqlite3_mutex_enter(db->mutex);
v = 0;
db->pnBytesFreed = (int*)&v;
- sqlite3VdbeClearObject(db, pVdbe);
- sqlite3DbFree(db, pVdbe);
+ assert( db->lookaside.pEnd==db->lookaside.pTrueEnd );
+ db->lookaside.pEnd = db->lookaside.pStart;
+ sqlite3VdbeDelete(pVdbe);
db->pnBytesFreed = 0;
+ db->lookaside.pEnd = db->lookaside.pTrueEnd;
sqlite3_mutex_leave(db->mutex);
}else{
v = pVdbe->aCounter[op];
@@ -87672,12 +89974,12 @@ static VdbeCursor *allocateCursor(
int nByte;
VdbeCursor *pCx = 0;
nByte =
- ROUND8(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField +
+ ROUND8P(sizeof(VdbeCursor)) + 2*sizeof(u32)*nField +
(eCurType==CURTYPE_BTREE?sqlite3BtreeCursorSize():0);
assert( iCur>=0 && iCurnCursor );
if( p->apCsr[iCur] ){ /*OPTIMIZATION-IF-FALSE*/
- sqlite3VdbeFreeCursor(p, p->apCsr[iCur]);
+ sqlite3VdbeFreeCursorNN(p, p->apCsr[iCur]);
p->apCsr[iCur] = 0;
}
@@ -87707,7 +90009,7 @@ static VdbeCursor *allocateCursor(
pCx->aOffset = &pCx->aType[nField];
if( eCurType==CURTYPE_BTREE ){
pCx->uc.pCursor = (BtCursor*)
- &pMem->z[ROUND8(sizeof(VdbeCursor))+2*sizeof(u32)*nField];
+ &pMem->z[ROUND8P(sizeof(VdbeCursor))+2*sizeof(u32)*nField];
sqlite3BtreeCursorZero(pCx->uc.pCursor);
}
return pCx;
@@ -87720,7 +90022,8 @@ static VdbeCursor *allocateCursor(
** return false.
*/
static int alsoAnInt(Mem *pRec, double rValue, i64 *piValue){
- i64 iValue = (double)rValue;
+ i64 iValue;
+ iValue = sqlite3RealToI64(rValue);
if( sqlite3RealSameAsInt(rValue,iValue) ){
*piValue = iValue;
return 1;
@@ -87882,17 +90185,18 @@ static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){
** But it does set pMem->u.r and pMem->u.i appropriately.
*/
static u16 numericType(Mem *pMem){
- if( pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal) ){
+ assert( (pMem->flags & MEM_Null)==0
+ || pMem->db==0 || pMem->db->mallocFailed );
+ if( pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Null) ){
testcase( pMem->flags & MEM_Int );
testcase( pMem->flags & MEM_Real );
testcase( pMem->flags & MEM_IntReal );
- return pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal);
- }
- if( pMem->flags & (MEM_Str|MEM_Blob) ){
- testcase( pMem->flags & MEM_Str );
- testcase( pMem->flags & MEM_Blob );
- return computeNumericType(pMem);
+ return pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Null);
}
+ assert( pMem->flags & (MEM_Str|MEM_Blob) );
+ testcase( pMem->flags & MEM_Str );
+ testcase( pMem->flags & MEM_Blob );
+ return computeNumericType(pMem);
return 0;
}
@@ -88146,7 +90450,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
#endif
/*** INSERT STACK UNION HERE ***/
- assert( p->iVdbeMagic==VDBE_MAGIC_RUN ); /* sqlite3_step() verifies this */
+ assert( p->eVdbeState==VDBE_RUN_STATE ); /* sqlite3_step() verifies this */
sqlite3VdbeEnter(p);
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
if( db->xProgress ){
@@ -88389,26 +90693,39 @@ case OP_Gosub: { /* jump */
pIn1->flags = MEM_Int;
pIn1->u.i = (int)(pOp-aOp);
REGISTER_TRACE(pOp->p1, pIn1);
-
- /* Most jump operations do a goto to this spot in order to update
- ** the pOp pointer. */
-jump_to_p2:
- assert( pOp->p2>0 ); /* There are never any jumps to instruction 0 */
- assert( pOp->p2nOp ); /* Jumps must be in range */
- pOp = &aOp[pOp->p2 - 1];
- break;
+ goto jump_to_p2_and_check_for_interrupt;
}
-/* Opcode: Return P1 * * * *
+/* Opcode: Return P1 P2 P3 * *
+**
+** Jump to the address stored in register P1. If P1 is a return address
+** register, then this accomplishes a return from a subroutine.
+**
+** If P3 is 1, then the jump is only taken if register P1 holds an integer
+** values, otherwise execution falls through to the next opcode, and the
+** OP_Return becomes a no-op. If P3 is 0, then register P1 must hold an
+** integer or else an assert() is raised. P3 should be set to 1 when
+** this opcode is used in combination with OP_BeginSubrtn, and set to 0
+** otherwise.
+**
+** The value in register P1 is unchanged by this opcode.
**
-** Jump to the next instruction after the address in register P1. After
-** the jump, register P1 becomes undefined.
+** P2 is not used by the byte-code engine. However, if P2 is positive
+** and also less than the current address, then the "EXPLAIN" output
+** formatter in the CLI will indent all opcodes from the P2 opcode up
+** to be not including the current Return. P2 should be the first opcode
+** in the subroutine from which this opcode is returning. Thus the P2
+** value is a byte-code indentation hint. See tag-20220407a in
+** wherecode.c and shell.c.
*/
case OP_Return: { /* in1 */
pIn1 = &aMem[pOp->p1];
- assert( pIn1->flags==MEM_Int );
- pOp = &aOp[pIn1->u.i];
- pIn1->flags = MEM_Undefined;
+ if( pIn1->flags & MEM_Int ){
+ if( pOp->p3 ){ VdbeBranchTaken(1, 2); }
+ pOp = &aOp[pIn1->u.i];
+ }else if( ALWAYS(pOp->p3) ){
+ VdbeBranchTaken(0, 2);
+ }
break;
}
@@ -88431,7 +90748,14 @@ case OP_InitCoroutine: { /* jump */
assert( !VdbeMemDynamic(pOut) );
pOut->u.i = pOp->p3 - 1;
pOut->flags = MEM_Int;
- if( pOp->p2 ) goto jump_to_p2;
+ if( pOp->p2==0 ) break;
+
+ /* Most jump operations do a goto to this spot in order to update
+ ** the pOp pointer. */
+jump_to_p2:
+ assert( pOp->p2>0 ); /* There are never any jumps to instruction 0 */
+ assert( pOp->p2nOp ); /* Jumps must be in range */
+ pOp = &aOp[pOp->p2 - 1];
break;
}
@@ -88533,11 +90857,10 @@ case OP_Halt: {
VdbeFrame *pFrame;
int pcx;
- pcx = (int)(pOp - aOp);
#ifdef SQLITE_DEBUG
if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); }
#endif
- if( pOp->p1==SQLITE_OK && p->pFrame ){
+ if( p->pFrame && pOp->p1==SQLITE_OK ){
/* Halt the sub-program. Return control to the parent frame. */
pFrame = p->pFrame;
p->pFrame = pFrame->pParent;
@@ -88559,7 +90882,6 @@ case OP_Halt: {
}
p->rc = pOp->p1;
p->errorAction = (u8)pOp->p2;
- p->pc = pcx;
assert( pOp->p5<=4 );
if( p->rc ){
if( pOp->p5 ){
@@ -88576,6 +90898,7 @@ case OP_Halt: {
}else{
sqlite3VdbeError(p, "%s", pOp->p4.z);
}
+ pcx = (int)(pOp - aOp);
sqlite3_log(pOp->p1, "abort at %d in [%s]: %s", pcx, p->zSql, p->zErrMsg);
}
rc = sqlite3VdbeHalt(p);
@@ -88701,6 +91024,28 @@ case OP_String: { /* out2 */
break;
}
+/* Opcode: BeginSubrtn * P2 * * *
+** Synopsis: r[P2]=NULL
+**
+** Mark the beginning of a subroutine that can be entered in-line
+** or that can be called using OP_Gosub. The subroutine should
+** be terminated by an OP_Return instruction that has a P1 operand that
+** is the same as the P2 operand to this opcode and that has P3 set to 1.
+** If the subroutine is entered in-line, then the OP_Return will simply
+** fall through. But if the subroutine is entered using OP_Gosub, then
+** the OP_Return will jump back to the first instruction after the OP_Gosub.
+**
+** This routine works by loading a NULL into the P2 register. When the
+** return address register contains a NULL, the OP_Return instruction is
+** a no-op that simply falls through to the next instruction (assuming that
+** the OP_Return opcode has a P3 value of 1). Thus if the subroutine is
+** entered in-line, then the OP_Return will cause in-line execution to
+** continue. But if the subroutine is entered via OP_Gosub, then the
+** OP_Return will cause a return to the address following the OP_Gosub.
+**
+** This opcode is identical to OP_Null. It has a different name
+** only to make the byte code easier to read and verify.
+*/
/* Opcode: Null P1 P2 P3 * *
** Synopsis: r[P2..P3]=NULL
**
@@ -88713,6 +91058,7 @@ case OP_String: { /* out2 */
** NULL values will not compare equal even if SQLITE_NULLEQ is set on
** OP_Ne or OP_Eq.
*/
+case OP_BeginSubrtn:
case OP_Null: { /* out2 */
int cnt;
u16 nullFlag;
@@ -88843,11 +91189,16 @@ case OP_Move: {
break;
}
-/* Opcode: Copy P1 P2 P3 * *
+/* Opcode: Copy P1 P2 P3 * P5
** Synopsis: r[P2@P3+1]=r[P1@P3+1]
**
** Make a copy of registers P1..P1+P3 into registers P2..P2+P3.
**
+** If the 0x0002 bit of P5 is set then also clear the MEM_Subtype flag in the
+** destination. The 0x0001 bit of P5 indicates that this Copy opcode cannot
+** be merged. The 0x0001 bit is used by the query planner and does not
+** come into play during query execution.
+**
** This instruction makes a deep copy of the value. A duplicate
** is made of any string or blob constant. See also OP_SCopy.
*/
@@ -88862,6 +91213,9 @@ case OP_Copy: {
memAboutToChange(p, pOut);
sqlite3VdbeMemShallowCopy(pOut, pIn1, MEM_Ephem);
Deephemeralize(pOut);
+ if( (pOut->flags & MEM_Subtype)!=0 && (pOp->p5 & 0x0002)!=0 ){
+ pOut->flags &= ~MEM_Subtype;
+ }
#ifdef SQLITE_DEBUG
pOut->pScopyFrom = 0;
#endif
@@ -88942,45 +91296,32 @@ case OP_FkCheck: {
** the result row.
*/
case OP_ResultRow: {
- Mem *pMem;
- int i;
assert( p->nResColumn==pOp->p2 );
assert( pOp->p1>0 || CORRUPT_DB );
assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 );
- /* Invalidate all ephemeral cursor row caches */
p->cacheCtr = (p->cacheCtr + 2)|1;
-
- /* Make sure the results of the current row are \000 terminated
- ** and have an assigned type. The results are de-ephemeralized as
- ** a side effect.
- */
- pMem = p->pResultSet = &aMem[pOp->p1];
- for(i=0; ip2; i++){
- assert( memIsValid(&pMem[i]) );
- Deephemeralize(&pMem[i]);
- assert( (pMem[i].flags & MEM_Ephem)==0
- || (pMem[i].flags & (MEM_Str|MEM_Blob))==0 );
- sqlite3VdbeMemNulTerminate(&pMem[i]);
- REGISTER_TRACE(pOp->p1+i, &pMem[i]);
+ p->pResultSet = &aMem[pOp->p1];
#ifdef SQLITE_DEBUG
- /* The registers in the result will not be used again when the
- ** prepared statement restarts. This is because sqlite3_column()
- ** APIs might have caused type conversions of made other changes to
- ** the register values. Therefore, we can go ahead and break any
- ** OP_SCopy dependencies. */
- pMem[i].pScopyFrom = 0;
-#endif
+ {
+ Mem *pMem = p->pResultSet;
+ int i;
+ for(i=0; ip2; i++){
+ assert( memIsValid(&pMem[i]) );
+ REGISTER_TRACE(pOp->p1+i, &pMem[i]);
+ /* The registers in the result will not be used again when the
+ ** prepared statement restarts. This is because sqlite3_column()
+ ** APIs might have caused type conversions of made other changes to
+ ** the register values. Therefore, we can go ahead and break any
+ ** OP_SCopy dependencies. */
+ pMem[i].pScopyFrom = 0;
+ }
}
+#endif
if( db->mallocFailed ) goto no_mem;
-
if( db->mTrace & SQLITE_TRACE_ROW ){
db->trace.xV2(SQLITE_TRACE_ROW, db->pTraceArg, p, 0);
}
-
-
- /* Return SQLITE_ROW
- */
p->pc = (int)(pOp - aOp) + 1;
rc = SQLITE_ROW;
goto vdbe_return;
@@ -89035,7 +91376,7 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
if( nByte>db->aLimit[SQLITE_LIMIT_LENGTH] ){
goto too_big;
}
- if( sqlite3VdbeMemGrow(pOut, (int)nByte+3, pOut==pIn2) ){
+ if( sqlite3VdbeMemGrow(pOut, (int)nByte+2, pOut==pIn2) ){
goto no_mem;
}
MemSetTypeFlag(pOut, MEM_Str);
@@ -89047,9 +91388,9 @@ case OP_Concat: { /* same as TK_CONCAT, in1, in2, out3 */
memcpy(&pOut->z[pIn2->n], pIn1->z, pIn1->n);
assert( (pIn1->flags & MEM_Dyn) == (flags1 & MEM_Dyn) );
pIn1->flags = flags1;
+ if( encoding>SQLITE_UTF8 ) nByte &= ~1;
pOut->z[nByte]=0;
pOut->z[nByte+1] = 0;
- pOut->z[nByte+2] = 0;
pOut->flags |= MEM_Term;
pOut->n = (int)nByte;
pOut->enc = encoding;
@@ -89100,7 +91441,6 @@ case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */
case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */
case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */
case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */
- u16 flags; /* Combined MEM_* flags from both inputs */
u16 type1; /* Numeric type of left operand */
u16 type2; /* Numeric type of right operand */
i64 iA; /* Integer value of left operand */
@@ -89109,12 +91449,12 @@ case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */
double rB; /* Real value of right operand */
pIn1 = &aMem[pOp->p1];
- type1 = numericType(pIn1);
+ type1 = pIn1->flags;
pIn2 = &aMem[pOp->p2];
- type2 = numericType(pIn2);
+ type2 = pIn2->flags;
pOut = &aMem[pOp->p3];
- flags = pIn1->flags | pIn2->flags;
if( (type1 & type2 & MEM_Int)!=0 ){
+int_math:
iA = pIn1->u.i;
iB = pIn2->u.i;
switch( pOp->opcode ){
@@ -89136,9 +91476,12 @@ case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */
}
pOut->u.i = iB;
MemSetTypeFlag(pOut, MEM_Int);
- }else if( (flags & MEM_Null)!=0 ){
+ }else if( ((type1 | type2) & MEM_Null)!=0 ){
goto arithmetic_result_is_null;
}else{
+ type1 = numericType(pIn1);
+ type2 = numericType(pIn2);
+ if( (type1 & type2 & MEM_Int)!=0 ) goto int_math;
fp_math:
rA = sqlite3VdbeRealValue(pIn1);
rB = sqlite3VdbeRealValue(pIn2);
@@ -89494,23 +91837,23 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
assert( (pOp->p5 & SQLITE_AFF_MASK)!=SQLITE_AFF_TEXT || CORRUPT_DB );
/* Common case of comparison of two integers */
if( pIn3->u.i > pIn1->u.i ){
- iCompare = +1;
if( sqlite3aGTb[pOp->opcode] ){
VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3);
goto jump_to_p2;
}
+ iCompare = +1;
}else if( pIn3->u.i < pIn1->u.i ){
- iCompare = -1;
if( sqlite3aLTb[pOp->opcode] ){
VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3);
goto jump_to_p2;
}
+ iCompare = -1;
}else{
- iCompare = 0;
if( sqlite3aEQb[pOp->opcode] ){
VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3);
goto jump_to_p2;
}
+ iCompare = 0;
}
VdbeBranchTaken(0, (pOp->p5 & SQLITE_NULLEQ)?2:3);
break;
@@ -89537,11 +91880,11 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
** then the result is always NULL.
** The jump is taken if the SQLITE_JUMPIFNULL bit is set.
*/
- iCompare = 1; /* Operands are not equal */
VdbeBranchTaken(2,3);
if( pOp->p5 & SQLITE_JUMPIFNULL ){
goto jump_to_p2;
}
+ iCompare = 1; /* Operands are not equal */
break;
}
}else{
@@ -89647,9 +91990,8 @@ case OP_ElseEq: { /* same as TK_ESCAPE, jump */
** Set the permutation used by the OP_Compare operator in the next
** instruction. The permutation is stored in the P4 operand.
**
-** The permutation is only valid until the next OP_Compare that has
-** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should
-** occur immediately prior to the OP_Compare.
+** The permutation is only valid for the next opcode which must be
+** an OP_Compare that has the OPFLAG_PERMUTE bit set in P5.
**
** The first integer in the P4 integer array is the length of the array
** and does not become part of the permutation.
@@ -89681,6 +92023,8 @@ case OP_Permutation: {
** The comparison is a sort comparison, so NULLs compare equal,
** NULLs are less than numbers, numbers are less than strings,
** and strings are less than blobs.
+**
+** This opcode must be immediately followed by an OP_Jump opcode.
*/
case OP_Compare: {
int n;
@@ -89739,6 +92083,7 @@ case OP_Compare: {
break;
}
}
+ assert( pOp[1].opcode==OP_Jump );
break;
}
@@ -89747,8 +92092,11 @@ case OP_Compare: {
** Jump to the instruction at address P1, P2, or P3 depending on whether
** in the most recent OP_Compare instruction the P1 vector was less than
** equal to, or greater than the P2 vector, respectively.
+**
+** This opcode must immediately follow an OP_Compare opcode.
*/
case OP_Jump: { /* jump */
+ assert( pOp>aOp && pOp[-1].opcode==OP_Compare );
if( iCompare<0 ){
VdbeBranchTaken(0,4); pOp = &aOp[pOp->p1 - 1];
}else if( iCompare==0 ){
@@ -89948,19 +92296,90 @@ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */
break;
}
-/* Opcode: IsNullOrType P1 P2 P3 * *
-** Synopsis: if typeof(r[P1]) IN (P3,5) goto P2
+/* Opcode: IsType P1 P2 P3 P4 P5
+** Synopsis: if typeof(P1.P3) in P5 goto P2
+**
+** Jump to P2 if the type of a column in a btree is one of the types specified
+** by the P5 bitmask.
+**
+** P1 is normally a cursor on a btree for which the row decode cache is
+** valid through at least column P3. In other words, there should have been
+** a prior OP_Column for column P3 or greater. If the cursor is not valid,
+** then this opcode might give spurious results.
+** The the btree row has fewer than P3 columns, then use P4 as the
+** datatype.
+**
+** If P1 is -1, then P3 is a register number and the datatype is taken
+** from the value in that register.
+**
+** P5 is a bitmask of data types. SQLITE_INTEGER is the least significant
+** (0x01) bit. SQLITE_FLOAT is the 0x02 bit. SQLITE_TEXT is 0x04.
+** SQLITE_BLOB is 0x08. SQLITE_NULL is 0x10.
+**
+** Take the jump to address P2 if and only if the datatype of the
+** value determined by P1 and P3 corresponds to one of the bits in the
+** P5 bitmask.
**
-** Jump to P2 if the value in register P1 is NULL or has a datatype P3.
-** P3 is an integer which should be one of SQLITE_INTEGER, SQLITE_FLOAT,
-** SQLITE_BLOB, SQLITE_NULL, or SQLITE_TEXT.
*/
-case OP_IsNullOrType: { /* jump, in1 */
- int doTheJump;
- pIn1 = &aMem[pOp->p1];
- doTheJump = (pIn1->flags & MEM_Null)!=0 || sqlite3_value_type(pIn1)==pOp->p3;
- VdbeBranchTaken( doTheJump, 2);
- if( doTheJump ) goto jump_to_p2;
+case OP_IsType: { /* jump */
+ VdbeCursor *pC;
+ u16 typeMask;
+ u32 serialType;
+
+ assert( pOp->p1>=(-1) && pOp->p1nCursor );
+ assert( pOp->p1>=0 || (pOp->p3>=0 && pOp->p3<=(p->nMem+1 - p->nCursor)) );
+ if( pOp->p1>=0 ){
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pOp->p3>=0 );
+ if( pOp->p3nHdrParsed ){
+ serialType = pC->aType[pOp->p3];
+ if( serialType>=12 ){
+ if( serialType&1 ){
+ typeMask = 0x04; /* SQLITE_TEXT */
+ }else{
+ typeMask = 0x08; /* SQLITE_BLOB */
+ }
+ }else{
+ static const unsigned char aMask[] = {
+ 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x2,
+ 0x01, 0x01, 0x10, 0x10
+ };
+ testcase( serialType==0 );
+ testcase( serialType==1 );
+ testcase( serialType==2 );
+ testcase( serialType==3 );
+ testcase( serialType==4 );
+ testcase( serialType==5 );
+ testcase( serialType==6 );
+ testcase( serialType==7 );
+ testcase( serialType==8 );
+ testcase( serialType==9 );
+ testcase( serialType==10 );
+ testcase( serialType==11 );
+ typeMask = aMask[serialType];
+ }
+ }else{
+ typeMask = 1 << (pOp->p4.i - 1);
+ testcase( typeMask==0x01 );
+ testcase( typeMask==0x02 );
+ testcase( typeMask==0x04 );
+ testcase( typeMask==0x08 );
+ testcase( typeMask==0x10 );
+ }
+ }else{
+ assert( memIsValid(&aMem[pOp->p3]) );
+ typeMask = 1 << (sqlite3_value_type((sqlite3_value*)&aMem[pOp->p3])-1);
+ testcase( typeMask==0x01 );
+ testcase( typeMask==0x02 );
+ testcase( typeMask==0x04 );
+ testcase( typeMask==0x08 );
+ testcase( typeMask==0x10 );
+ }
+ VdbeBranchTaken( (typeMask & pOp->p5)!=0, 2);
+ if( typeMask & pOp->p5 ){
+ goto jump_to_p2;
+ }
break;
}
@@ -90003,11 +92422,14 @@ case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */
** If it is, then set register P3 to NULL and jump immediately to P2.
** If P1 is not on a NULL row, then fall through without making any
** changes.
+**
+** If P1 is not an open cursor, then this opcode is a no-op.
*/
case OP_IfNullRow: { /* jump */
+ VdbeCursor *pC;
assert( pOp->p1>=0 && pOp->p1nCursor );
- assert( p->apCsr[pOp->p1]!=0 );
- if( p->apCsr[pOp->p1]->nullRow ){
+ pC = p->apCsr[pOp->p1];
+ if( ALWAYS(pC) && pC->nullRow ){
sqlite3VdbeMemSetNull(aMem + pOp->p3);
goto jump_to_p2;
}
@@ -90053,12 +92475,12 @@ case OP_Offset: { /* out3 */
#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */
/* Opcode: Column P1 P2 P3 P4 P5
-** Synopsis: r[P3]=PX
+** Synopsis: r[P3]=PX cursor P1 column P2
**
** Interpret the data that cursor P1 points to as a structure built using
** the MakeRecord instruction. (See the MakeRecord opcode for additional
** information about the format of the data.) Extract the P2-th column
-** from this record. If there are less that (P2+1)
+** from this record. If there are less than (P2+1)
** values in the record, extract a NULL.
**
** The value extracted is stored in register P3.
@@ -90067,15 +92489,17 @@ case OP_Offset: { /* out3 */
** if the P4 argument is a P4_MEM use the value of the P4 argument as
** the result.
**
-** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 then
-** the result is guaranteed to only be used as the argument of a length()
-** or typeof() function, respectively. The loading of large blobs can be
-** skipped for length() and all content loading can be skipped for typeof().
+** If the OPFLAG_LENGTHARG bit is set in P5 then the result is guaranteed
+** to only be used by the length() function or the equivalent. The content
+** of large blobs is not loaded, thus saving CPU cycles. If the
+** OPFLAG_TYPEOFARG bit is set then the result will only be used by the
+** typeof() function or the IS NULL or IS NOT NULL operators or the
+** equivalent. In this case, all content loading can be omitted.
*/
case OP_Column: {
u32 p2; /* column number to retrieve */
VdbeCursor *pC; /* The VDBE cursor */
- BtCursor *pCrsr; /* The BTree cursor */
+ BtCursor *pCrsr; /* The B-Tree cursor corresponding to pC */
u32 *aOffset; /* aOffset[i] is offset to start of data for i-th column */
int len; /* The length of the serialized data for the column */
int i; /* Loop counter */
@@ -90089,21 +92513,14 @@ case OP_Column: {
Mem *pReg; /* PseudoTable input register */
assert( pOp->p1>=0 && pOp->p1nCursor );
+ assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
p2 = (u32)pOp->p2;
- /* If the cursor cache is stale (meaning it is not currently point at
- ** the correct row) then bring it up-to-date by doing the necessary
- ** B-Tree seek. */
- rc = sqlite3VdbeCursorMoveto(&pC, &p2);
- if( rc ) goto abort_due_to_error;
-
- assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
- pDest = &aMem[pOp->p3];
- memAboutToChange(p, pDest);
+op_column_restart:
assert( pC!=0 );
- assert( p2<(u32)pC->nField );
+ assert( p2<(u32)pC->nField
+ || (pC->eCurType==CURTYPE_PSEUDO && pC->seekResult==0) );
aOffset = pC->aOffset;
assert( aOffset==pC->aType+pC->nField );
assert( pC->eCurType!=CURTYPE_VTAB );
@@ -90112,21 +92529,37 @@ case OP_Column: {
if( pC->cacheStatus!=p->cacheCtr ){ /*OPTIMIZATION-IF-FALSE*/
if( pC->nullRow ){
- if( pC->eCurType==CURTYPE_PSEUDO ){
+ if( pC->eCurType==CURTYPE_PSEUDO && pC->seekResult>0 ){
/* For the special case of as pseudo-cursor, the seekResult field
** identifies the register that holds the record */
- assert( pC->seekResult>0 );
pReg = &aMem[pC->seekResult];
assert( pReg->flags & MEM_Blob );
assert( memIsValid(pReg) );
pC->payloadSize = pC->szRow = pReg->n;
pC->aRow = (u8*)pReg->z;
}else{
+ pDest = &aMem[pOp->p3];
+ memAboutToChange(p, pDest);
sqlite3VdbeMemSetNull(pDest);
goto op_column_out;
}
}else{
pCrsr = pC->uc.pCursor;
+ if( pC->deferredMoveto ){
+ u32 iMap;
+ assert( !pC->isEphemeral );
+ if( pC->ub.aAltMap && (iMap = pC->ub.aAltMap[1+p2])>0 ){
+ pC = pC->pAltCursor;
+ p2 = iMap - 1;
+ goto op_column_restart;
+ }
+ rc = sqlite3VdbeFinishMoveto(pC);
+ if( rc ) goto abort_due_to_error;
+ }else if( sqlite3BtreeCursorHasMoved(pCrsr) ){
+ rc = sqlite3VdbeHandleMovedCursor(pC);
+ if( rc ) goto abort_due_to_error;
+ goto op_column_restart;
+ }
assert( pC->eCurType==CURTYPE_BTREE );
assert( pCrsr );
assert( sqlite3BtreeCursorIsValid(pCrsr) );
@@ -90134,15 +92567,15 @@ case OP_Column: {
pC->aRow = sqlite3BtreePayloadFetch(pCrsr, &pC->szRow);
assert( pC->szRow<=pC->payloadSize );
assert( pC->szRow<=65536 ); /* Maximum page size is 64KiB */
- if( pC->payloadSize > (u32)db->aLimit[SQLITE_LIMIT_LENGTH] ){
- goto too_big;
- }
}
pC->cacheStatus = p->cacheCtr;
- pC->iHdrOffset = getVarint32(pC->aRow, aOffset[0]);
+ if( (aOffset[0] = pC->aRow[0])<0x80 ){
+ pC->iHdrOffset = 1;
+ }else{
+ pC->iHdrOffset = sqlite3GetVarint32(pC->aRow, aOffset);
+ }
pC->nHdrParsed = 0;
-
if( pC->szRowaRow does not have to hold the entire row, but it does at least
** need to cover the header of the record. If pC->aRow does not contain
@@ -90182,6 +92615,10 @@ case OP_Column: {
testcase( aOffset[0]==0 );
goto op_column_read_header;
}
+ }else if( sqlite3BtreeCursorHasMoved(pC->uc.pCursor) ){
+ rc = sqlite3VdbeHandleMovedCursor(pC);
+ if( rc ) goto abort_due_to_error;
+ goto op_column_restart;
}
/* Make sure at least the first p2+1 entries of the header have been
@@ -90250,6 +92687,8 @@ case OP_Column: {
** columns. So the result will be either the default value or a NULL.
*/
if( pC->nHdrParsed<=p2 ){
+ pDest = &aMem[pOp->p3];
+ memAboutToChange(p, pDest);
if( pOp->p4type==P4_MEM ){
sqlite3VdbeMemShallowCopy(pDest, pOp->p4.pMem, MEM_Static);
}else{
@@ -90267,6 +92706,8 @@ case OP_Column: {
*/
assert( p2nHdrParsed );
assert( rc==SQLITE_OK );
+ pDest = &aMem[pOp->p3];
+ memAboutToChange(p, pDest);
assert( sqlite3VdbeCheckMemInvariants(pDest) );
if( VdbeMemDynamic(pDest) ){
sqlite3VdbeMemSetNull(pDest);
@@ -90287,6 +92728,7 @@ case OP_Column: {
pDest->n = len = (t-12)/2;
pDest->enc = encoding;
if( pDest->szMalloc < len+2 ){
+ if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) goto too_big;
pDest->flags = MEM_Null;
if( sqlite3VdbeMemGrow(pDest, len+2, 0) ) goto no_mem;
}else{
@@ -90319,6 +92761,7 @@ case OP_Column: {
*/
sqlite3VdbeSerialGet((u8*)sqlite3CtypeMap, t, pDest);
}else{
+ if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) goto too_big;
rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest);
if( rc!=SQLITE_OK ) goto abort_due_to_error;
sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
@@ -90531,7 +92974,6 @@ case OP_MakeRecord: {
Mem *pLast; /* Last field of the record */
int nField; /* Number of fields in the record */
char *zAffinity; /* The affinity string for the record */
- int file_format; /* File format to use for encoding */
u32 len; /* Length of a field */
u8 *zHdr; /* Where to write next byte of the header */
u8 *zPayload; /* Where to write next byte of the payload */
@@ -90560,7 +93002,6 @@ case OP_MakeRecord: {
pData0 = &aMem[nField];
nField = pOp->p2;
pLast = &pData0[nField-1];
- file_format = p->minWriteFileFormat;
/* Identify the output register */
assert( pOp->p3p1 || pOp->p3>=pOp->p1+pOp->p2 );
@@ -90662,7 +93103,7 @@ case OP_MakeRecord: {
testcase( uu==2147483647 ); testcase( uu==2147483648LL );
testcase( uu==140737488355327LL ); testcase( uu==140737488355328LL );
if( uu<=127 ){
- if( (i&1)==i && file_format>=4 ){
+ if( (i&1)==i && p->minWriteFileFormat>=4 ){
pRec->uTemp = 8+(u32)uu;
}else{
nData++;
@@ -90767,18 +93208,60 @@ case OP_MakeRecord: {
zPayload = zHdr + nHdr;
/* Write the record */
- zHdr += putVarint32(zHdr, nHdr);
+ if( nHdr<0x80 ){
+ *(zHdr++) = nHdr;
+ }else{
+ zHdr += sqlite3PutVarint(zHdr,nHdr);
+ }
assert( pData0<=pLast );
pRec = pData0;
- do{
+ while( 1 /*exit-by-break*/ ){
serial_type = pRec->uTemp;
/* EVIDENCE-OF: R-06529-47362 Following the size varint are one or more
- ** additional varints, one per column. */
- zHdr += putVarint32(zHdr, serial_type); /* serial type */
- /* EVIDENCE-OF: R-64536-51728 The values for each column in the record
+ ** additional varints, one per column.
+ ** EVIDENCE-OF: R-64536-51728 The values for each column in the record
** immediately follow the header. */
- zPayload += sqlite3VdbeSerialPut(zPayload, pRec, serial_type); /* content */
- }while( (++pRec)<=pLast );
+ if( serial_type<=7 ){
+ *(zHdr++) = serial_type;
+ if( serial_type==0 ){
+ /* NULL value. No change in zPayload */
+ }else{
+ u64 v;
+ u32 i;
+ if( serial_type==7 ){
+ assert( sizeof(v)==sizeof(pRec->u.r) );
+ memcpy(&v, &pRec->u.r, sizeof(v));
+ swapMixedEndianFloat(v);
+ }else{
+ v = pRec->u.i;
+ }
+ len = i = sqlite3SmallTypeSizes[serial_type];
+ assert( i>0 );
+ while( 1 /*exit-by-break*/ ){
+ zPayload[--i] = (u8)(v&0xFF);
+ if( i==0 ) break;
+ v >>= 8;
+ }
+ zPayload += len;
+ }
+ }else if( serial_type<0x80 ){
+ *(zHdr++) = serial_type;
+ if( serial_type>=14 && pRec->n>0 ){
+ assert( pRec->z!=0 );
+ memcpy(zPayload, pRec->z, pRec->n);
+ zPayload += pRec->n;
+ }
+ }else{
+ zHdr += sqlite3PutVarint(zHdr, serial_type);
+ if( pRec->n ){
+ assert( pRec->z!=0 );
+ memcpy(zPayload, pRec->z, pRec->n);
+ zPayload += pRec->n;
+ }
+ }
+ if( pRec==pLast ) break;
+ pRec++;
+ }
assert( nHdr==(int)(zHdr - (u8*)pOut->z) );
assert( nByte==(int)(zPayload - (u8*)pOut->z) );
@@ -90997,7 +93480,10 @@ case OP_Savepoint: {
}
}
if( rc ) goto abort_due_to_error;
-
+ if( p->eVdbeState==VDBE_HALT_STATE ){
+ rc = SQLITE_DONE;
+ goto vdbe_return;
+ }
break;
}
@@ -91101,6 +93587,7 @@ case OP_AutoCommit: {
*/
case OP_Transaction: {
Btree *pBt;
+ Db *pDb;
int iMeta = 0;
assert( p->bIsReader );
@@ -91120,7 +93607,8 @@ case OP_Transaction: {
}
goto abort_due_to_error;
}
- pBt = db->aDb[pOp->p1].pBt;
+ pDb = &db->aDb[pOp->p1];
+ pBt = pDb->pBt;
if( pBt ){
rc = sqlite3BtreeBeginTrans(pBt, pOp->p2, &iMeta);
@@ -91161,8 +93649,7 @@ case OP_Transaction: {
assert( pOp->p5==0 || pOp->p4type==P4_INT32 );
if( rc==SQLITE_OK
&& pOp->p5
- && (iMeta!=pOp->p3
- || db->aDb[pOp->p1].pSchema->iGeneration!=pOp->p4.i)
+ && (iMeta!=pOp->p3 || pDb->pSchema->iGeneration!=pOp->p4.i)
){
/*
** IMPLEMENTATION-OF: R-03189-51135 As each SQL statement runs, the schema
@@ -91189,6 +93676,11 @@ case OP_Transaction: {
}
p->expired = 1;
rc = SQLITE_SCHEMA;
+
+ /* Set changeCntOn to 0 to prevent the value returned by sqlite3_changes()
+ ** from being modified in sqlite3VdbeHalt(). If this statement is
+ ** reprepared, changeCntOn will be set again. */
+ p->changeCntOn = 0;
}
if( rc ) goto abort_due_to_error;
break;
@@ -91255,7 +93747,7 @@ case OP_SetCookie: {
rc = sqlite3BtreeUpdateMeta(pDb->pBt, pOp->p2, pOp->p3);
if( pOp->p2==BTREE_SCHEMA_VERSION ){
/* When the schema cookie changes, record the new cookie internally */
- pDb->pSchema->schema_cookie = pOp->p3 - pOp->p5;
+ *(u32*)&pDb->pSchema->schema_cookie = *(u32*)&pOp->p3 - pOp->p5;
db->mDbFlags |= DBFLAG_SchemaChange;
sqlite3FkClearTriggerCache(db, pOp->p1);
}else if( pOp->p2==BTREE_FILE_FORMAT ){
@@ -91488,8 +93980,8 @@ case OP_OpenDup: {
pCx->pgnoRoot = pOrig->pgnoRoot;
pCx->isOrdered = pOrig->isOrdered;
pCx->ub.pBtx = pOrig->ub.pBtx;
- pCx->hasBeenDuped = 1;
- pOrig->hasBeenDuped = 1;
+ pCx->noReuse = 1;
+ pOrig->noReuse = 1;
rc = sqlite3BtreeCursor(pCx->ub.pBtx, pCx->pgnoRoot, BTREE_WRCSR,
pCx->pKeyInfo, pCx->uc.pCursor);
/* The sqlite3BtreeCursor() routine can only fail for the first cursor
@@ -91556,7 +94048,7 @@ case OP_OpenEphemeral: {
aMem[pOp->p3].z = "";
}
pCx = p->apCsr[pOp->p1];
- if( pCx && !pCx->hasBeenDuped && ALWAYS(pOp->p2<=pCx->nField) ){
+ if( pCx && !pCx->noReuse && ALWAYS(pOp->p2<=pCx->nField) ){
/* If the ephermeral table is already open and has no duplicates from
** OP_OpenDup, then erase all existing content so that the table is
** empty again, rather than creating a new table. */
@@ -91941,7 +94433,13 @@ case OP_SeekGT: { /* jump, in3, group */
r.aMem = &aMem[pOp->p3];
#ifdef SQLITE_DEBUG
- { int i; for(i=0; i0 ) REGISTER_TRACE(pOp->p3+i, &r.aMem[i]);
+ }
+ }
#endif
r.eqSeen = 0;
rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, &r, &res);
@@ -92004,7 +94502,7 @@ seek_not_found:
}
-/* Opcode: SeekScan P1 P2 * * *
+/* Opcode: SeekScan P1 P2 * * P5
** Synopsis: Scan-ahead up to P1 rows
**
** This opcode is a prefix opcode to OP_SeekGE. In other words, this
@@ -92014,8 +94512,8 @@ seek_not_found:
** This opcode uses the P1 through P4 operands of the subsequent
** OP_SeekGE. In the text that follows, the operands of the subsequent
** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only
-** the P1 and P2 operands of this opcode are also used, and are called
-** This.P1 and This.P2.
+** the P1, P2 and P5 operands of this opcode are also used, and are called
+** This.P1, This.P2 and This.P5.
**
** This opcode helps to optimize IN operators on a multi-column index
** where the IN operator is on the later terms of the index by avoiding
@@ -92025,29 +94523,51 @@ seek_not_found:
**
** The SeekGE.P3 and SeekGE.P4 operands identify an unpacked key which
** is the desired entry that we want the cursor SeekGE.P1 to be pointing
-** to. Call this SeekGE.P4/P5 row the "target".
+** to. Call this SeekGE.P3/P4 row the "target".
**
** If the SeekGE.P1 cursor is not currently pointing to a valid row,
** then this opcode is a no-op and control passes through into the OP_SeekGE.
**
** If the SeekGE.P1 cursor is pointing to a valid row, then that row
** might be the target row, or it might be near and slightly before the
-** target row. This opcode attempts to position the cursor on the target
-** row by, perhaps by invoking sqlite3BtreeStep() on the cursor
-** between 0 and This.P1 times.
-**
-** There are three possible outcomes from this opcode:
-**
-** If after This.P1 steps, the cursor is still pointing to a place that
-** is earlier in the btree than the target row, then fall through
-** into the subsquence OP_SeekGE opcode.
-**
-** If the cursor is successfully moved to the target row by 0 or more
-** sqlite3BtreeNext() calls, then jump to This.P2, which will land just
-** past the OP_IdxGT or OP_IdxGE opcode that follows the OP_SeekGE.
-**
-** If the cursor ends up past the target row (indicating the the target
-** row does not exist in the btree) then jump to SeekOP.P2.
+** target row, or it might be after the target row. If the cursor is
+** currently before the target row, then this opcode attempts to position
+** the cursor on or after the target row by invoking sqlite3BtreeStep()
+** on the cursor between 1 and This.P1 times.
+**
+** The This.P5 parameter is a flag that indicates what to do if the
+** cursor ends up pointing at a valid row that is past the target
+** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If
+** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0
+** case occurs when there are no inequality constraints to the right of
+** the IN constraing. The jump to SeekGE.P2 ends the loop. The P5!=0 case
+** occurs when there are inequality constraints to the right of the IN
+** operator. In that case, the This.P2 will point either directly to or
+** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for
+** loop terminate.
+**
+** Possible outcomes from this opcode:
+**
+** If the cursor is initally not pointed to any valid row, then
+** fall through into the subsequent OP_SeekGE opcode.
+**
+** If the cursor is left pointing to a row that is before the target
+** row, even after making as many as This.P1 calls to
+** sqlite3BtreeNext(), then also fall through into OP_SeekGE.
+**
+** If the cursor is left pointing at the target row, either because it
+** was at the target row to begin with or because one or more
+** sqlite3BtreeNext() calls moved the cursor to the target row,
+** then jump to This.P2..,
+**
+** If the cursor started out before the target row and a call to
+** to sqlite3BtreeNext() moved the cursor off the end of the index
+** (indicating that the target row definitely does not exist in the
+** btree) then jump to SeekGE.P2, ending the loop.
+**
+** If the cursor ends up on a valid row that is past the target row
+** (indicating that the target row does not exist in the btree) then
+** jump to SeekOP.P2 if This.P5==0 or to This.P2 if This.P5>0.
**
*/
case OP_SeekScan: {
@@ -92058,14 +94578,25 @@ case OP_SeekScan: {
assert( pOp[1].opcode==OP_SeekGE );
- /* pOp->p2 points to the first instruction past the OP_IdxGT that
- ** follows the OP_SeekGE. */
+ /* If pOp->p5 is clear, then pOp->p2 points to the first instruction past the
+ ** OP_IdxGT that follows the OP_SeekGE. Otherwise, it points to the first
+ ** opcode past the OP_SeekGE itself. */
assert( pOp->p2>=(int)(pOp-aOp)+2 );
- assert( aOp[pOp->p2-1].opcode==OP_IdxGT || aOp[pOp->p2-1].opcode==OP_IdxGE );
- testcase( aOp[pOp->p2-1].opcode==OP_IdxGE );
- assert( pOp[1].p1==aOp[pOp->p2-1].p1 );
- assert( pOp[1].p2==aOp[pOp->p2-1].p2 );
- assert( pOp[1].p3==aOp[pOp->p2-1].p3 );
+#ifdef SQLITE_DEBUG
+ if( pOp->p5==0 ){
+ /* There are no inequality constraints following the IN constraint. */
+ assert( pOp[1].p1==aOp[pOp->p2-1].p1 );
+ assert( pOp[1].p2==aOp[pOp->p2-1].p2 );
+ assert( pOp[1].p3==aOp[pOp->p2-1].p3 );
+ assert( aOp[pOp->p2-1].opcode==OP_IdxGT
+ || aOp[pOp->p2-1].opcode==OP_IdxGE );
+ testcase( aOp[pOp->p2-1].opcode==OP_IdxGE );
+ }else{
+ /* There are inequality constraints. */
+ assert( pOp->p2==(int)(pOp-aOp)+2 );
+ assert( aOp[pOp->p2-1].opcode==OP_SeekGE );
+ }
+#endif
assert( pOp->p1>0 );
pC = p->apCsr[pOp[1].p1];
@@ -92099,8 +94630,9 @@ case OP_SeekScan: {
while(1){
rc = sqlite3VdbeIdxKeyCompare(db, pC, &r, &res);
if( rc ) goto abort_due_to_error;
- if( res>0 ){
+ if( res>0 && pOp->p5==0 ){
seekscan_search_fail:
+ /* Jump to SeekGE.P2, ending the loop */
#ifdef SQLITE_DEBUG
if( db->flags&SQLITE_VdbeTrace ){
printf("... %d steps and then skip\n", pOp->p1 - nStep);
@@ -92110,7 +94642,8 @@ case OP_SeekScan: {
pOp++;
goto jump_to_p2;
}
- if( res==0 ){
+ if( res>=0 ){
+ /* Jump to This.P2, bypassing the OP_SeekGE opcode */
#ifdef SQLITE_DEBUG
if( db->flags&SQLITE_VdbeTrace ){
printf("... %d steps and then success\n", pOp->p1 - nStep);
@@ -92186,12 +94719,16 @@ case OP_SeekHit: {
/* Opcode: IfNotOpen P1 P2 * * *
** Synopsis: if( !csr[P1] ) goto P2
**
-** If cursor P1 is not open, jump to instruction P2. Otherwise, fall through.
+** If cursor P1 is not open or if P1 is set to a NULL row using the
+** OP_NullRow opcode, then jump to instruction P2. Otherwise, fall through.
*/
case OP_IfNotOpen: { /* jump */
+ VdbeCursor *pCur;
+
assert( pOp->p1>=0 && pOp->p1nCursor );
- VdbeBranchTaken(p->apCsr[pOp->p1]==0, 2);
- if( !p->apCsr[pOp->p1] ){
+ pCur = p->apCsr[pOp->p1];
+ VdbeBranchTaken(pCur==0 || pCur->nullRow, 2);
+ if( pCur==0 || pCur->nullRow ){
goto jump_to_p2_and_check_for_interrupt;
}
break;
@@ -92305,11 +94842,8 @@ case OP_NoConflict: /* jump, in3 */
case OP_NotFound: /* jump, in3 */
case OP_Found: { /* jump, in3 */
int alreadyExists;
- int takeJump;
int ii;
VdbeCursor *pC;
- int res;
- UnpackedRecord *pFree;
UnpackedRecord *pIdxKey;
UnpackedRecord r;
@@ -92324,14 +94858,15 @@ case OP_Found: { /* jump, in3 */
#ifdef SQLITE_DEBUG
pC->seekOp = pOp->opcode;
#endif
- pIn3 = &aMem[pOp->p3];
+ r.aMem = &aMem[pOp->p3];
assert( pC->eCurType==CURTYPE_BTREE );
assert( pC->uc.pCursor!=0 );
assert( pC->isTable==0 );
- if( pOp->p4.i>0 ){
+ r.nField = (u16)pOp->p4.i;
+ if( r.nField>0 ){
+ /* Key values in an array of registers */
r.pKeyInfo = pC->pKeyInfo;
- r.nField = (u16)pOp->p4.i;
- r.aMem = pIn3;
+ r.default_rc = 0;
#ifdef SQLITE_DEBUG
for(ii=0; iip3+ii, &r.aMem[ii]);
}
#endif
- pIdxKey = &r;
- pFree = 0;
+ rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, &r, &pC->seekResult);
}else{
- assert( pIn3->flags & MEM_Blob );
- rc = ExpandBlob(pIn3);
+ /* Composite key generated by OP_MakeRecord */
+ assert( r.aMem->flags & MEM_Blob );
+ assert( pOp->opcode!=OP_NoConflict );
+ rc = ExpandBlob(r.aMem);
assert( rc==SQLITE_OK || rc==SQLITE_NOMEM );
if( rc ) goto no_mem;
- pFree = pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo);
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo);
if( pIdxKey==0 ) goto no_mem;
- sqlite3VdbeRecordUnpack(pC->pKeyInfo, pIn3->n, pIn3->z, pIdxKey);
+ sqlite3VdbeRecordUnpack(pC->pKeyInfo, r.aMem->n, r.aMem->z, pIdxKey);
+ pIdxKey->default_rc = 0;
+ rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, pIdxKey, &pC->seekResult);
+ sqlite3DbFreeNN(db, pIdxKey);
}
- pIdxKey->default_rc = 0;
- takeJump = 0;
- if( pOp->opcode==OP_NoConflict ){
- /* For the OP_NoConflict opcode, take the jump if any of the
- ** input fields are NULL, since any key with a NULL will not
- ** conflict */
- for(ii=0; iinField; ii++){
- if( pIdxKey->aMem[ii].flags & MEM_Null ){
- takeJump = 1;
- break;
- }
- }
- }
- rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, pIdxKey, &res);
- if( pFree ) sqlite3DbFreeNN(db, pFree);
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
- pC->seekResult = res;
- alreadyExists = (res==0);
+ alreadyExists = (pC->seekResult==0);
pC->nullRow = 1-alreadyExists;
pC->deferredMoveto = 0;
pC->cacheStatus = CACHE_STALE;
@@ -92377,9 +94900,25 @@ case OP_Found: { /* jump, in3 */
VdbeBranchTaken(alreadyExists!=0,2);
if( alreadyExists ) goto jump_to_p2;
}else{
- VdbeBranchTaken(takeJump||alreadyExists==0,2);
- if( takeJump || !alreadyExists ) goto jump_to_p2;
- if( pOp->opcode==OP_IfNoHope ) pC->seekHit = pOp->p4.i;
+ if( !alreadyExists ){
+ VdbeBranchTaken(1,2);
+ goto jump_to_p2;
+ }
+ if( pOp->opcode==OP_NoConflict ){
+ /* For the OP_NoConflict opcode, take the jump if any of the
+ ** input fields are NULL, since any key with a NULL will not
+ ** conflict */
+ for(ii=0; iiopcode==OP_IfNoHope ){
+ pC->seekHit = pOp->p4.i;
+ }
}
break;
}
@@ -93070,7 +95609,7 @@ case OP_RowData: {
}
/* Opcode: Rowid P1 P2 * * *
-** Synopsis: r[P2]=rowid
+** Synopsis: r[P2]=PX rowid of P1
**
** Store in register P2 an integer which is the key of the table entry that
** P1 is currently point to.
@@ -93126,16 +95665,24 @@ case OP_Rowid: { /* out2 */
** that occur while the cursor is on the null row will always
** write a NULL.
**
-** Or, if P1 is a Pseudo-Cursor (a cursor opened using OP_OpenPseudo)
-** just reset the cache for that cursor. This causes the row of
-** content held by the pseudo-cursor to be reparsed.
+** If cursor P1 is not previously opened, open it now to a special
+** pseudo-cursor that always returns NULL for every column.
*/
case OP_NullRow: {
VdbeCursor *pC;
assert( pOp->p1>=0 && pOp->p1nCursor );
pC = p->apCsr[pOp->p1];
- assert( pC!=0 );
+ if( pC==0 ){
+ /* If the cursor is not already open, create a special kind of
+ ** pseudo-cursor that always gives null rows. */
+ pC = allocateCursor(p, pOp->p1, 1, CURTYPE_PSEUDO);
+ if( pC==0 ) goto no_mem;
+ pC->seekResult = 0;
+ pC->isTable = 1;
+ pC->noReuse = 1;
+ pC->uc.pCursor = sqlite3BtreeFakeValidCursor();
+ }
pC->nullRow = 1;
pC->cacheStatus = CACHE_STALE;
if( pC->eCurType==CURTYPE_BTREE ){
@@ -93308,7 +95855,7 @@ case OP_Rewind: { /* jump */
break;
}
-/* Opcode: Next P1 P2 P3 P4 P5
+/* Opcode: Next P1 P2 P3 * P5
**
** Advance cursor P1 so that it points to the next key/data pair in its
** table or index. If there are no more key/value pairs then fall through
@@ -93327,15 +95874,12 @@ case OP_Rewind: { /* jump */
** omitted if that index had been unique. P3 is usually 0. P3 is
** always either 0 or 1.
**
-** P4 is always of type P4_ADVANCE. The function pointer points to
-** sqlite3BtreeNext().
-**
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
**
** See also: Prev
*/
-/* Opcode: Prev P1 P2 P3 P4 P5
+/* Opcode: Prev P1 P2 P3 * P5
**
** Back up cursor P1 so that it points to the previous key/data pair in its
** table or index. If there is no previous key/value pairs then fall through
@@ -93355,9 +95899,6 @@ case OP_Rewind: { /* jump */
** omitted if that index had been unique. P3 is usually 0. P3 is
** always either 0 or 1.
**
-** P4 is always of type P4_ADVANCE. The function pointer points to
-** sqlite3BtreePrevious().
-**
** If P5 is positive and the jump is taken, then event counter
** number P5-1 in the prepared statement is incremented.
*/
@@ -93375,30 +95916,37 @@ case OP_SorterNext: { /* jump */
assert( isSorter(pC) );
rc = sqlite3VdbeSorterNext(db, pC);
goto next_tail;
+
case OP_Prev: /* jump */
-case OP_Next: /* jump */
assert( pOp->p1>=0 && pOp->p1nCursor );
- assert( pOp->p5aCounter) );
+ assert( pOp->p5==0
+ || pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP
+ || pOp->p5==SQLITE_STMTSTATUS_AUTOINDEX);
pC = p->apCsr[pOp->p1];
assert( pC!=0 );
assert( pC->deferredMoveto==0 );
assert( pC->eCurType==CURTYPE_BTREE );
- assert( pOp->opcode!=OP_Next || pOp->p4.xAdvance==sqlite3BtreeNext );
- assert( pOp->opcode!=OP_Prev || pOp->p4.xAdvance==sqlite3BtreePrevious );
+ assert( pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE
+ || pC->seekOp==OP_Last || pC->seekOp==OP_IfNoHope
+ || pC->seekOp==OP_NullRow);
+ rc = sqlite3BtreePrevious(pC->uc.pCursor, pOp->p3);
+ goto next_tail;
- /* The Next opcode is only used after SeekGT, SeekGE, Rewind, and Found.
- ** The Prev opcode is only used after SeekLT, SeekLE, and Last. */
- assert( pOp->opcode!=OP_Next
- || pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE
+case OP_Next: /* jump */
+ assert( pOp->p1>=0 && pOp->p1nCursor );
+ assert( pOp->p5==0
+ || pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP
+ || pOp->p5==SQLITE_STMTSTATUS_AUTOINDEX);
+ pC = p->apCsr[pOp->p1];
+ assert( pC!=0 );
+ assert( pC->deferredMoveto==0 );
+ assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->seekOp==OP_SeekGT || pC->seekOp==OP_SeekGE
|| pC->seekOp==OP_Rewind || pC->seekOp==OP_Found
|| pC->seekOp==OP_NullRow|| pC->seekOp==OP_SeekRowid
|| pC->seekOp==OP_IfNoHope);
- assert( pOp->opcode!=OP_Prev
- || pC->seekOp==OP_SeekLT || pC->seekOp==OP_SeekLE
- || pC->seekOp==OP_Last || pC->seekOp==OP_IfNoHope
- || pC->seekOp==OP_NullRow);
+ rc = sqlite3BtreeNext(pC->uc.pCursor, pOp->p3);
- rc = pOp->p4.xAdvance(pC->uc.pCursor, pOp->p3);
next_tail:
pC->cacheStatus = CACHE_STALE;
VdbeBranchTaken(rc==SQLITE_OK,2);
@@ -93585,9 +96133,9 @@ case OP_IdxRowid: { /* out2 */
assert( pOp->p1>=0 && pOp->p1nCursor );
pC = p->apCsr[pOp->p1];
assert( pC!=0 );
- assert( pC->eCurType==CURTYPE_BTREE );
+ assert( pC->eCurType==CURTYPE_BTREE || IsNullCursor(pC) );
assert( pC->uc.pCursor!=0 );
- assert( pC->isTable==0 );
+ assert( pC->isTable==0 || IsNullCursor(pC) );
assert( pC->deferredMoveto==0 );
assert( !pC->nullRow || pOp->opcode==OP_IdxRowid );
@@ -93595,10 +96143,10 @@ case OP_IdxRowid: { /* out2 */
** of sqlite3VdbeCursorRestore() and sqlite3VdbeIdxRowid(). */
rc = sqlite3VdbeCursorRestore(pC);
- /* sqlite3VbeCursorRestore() can only fail if the record has been deleted
- ** out from under the cursor. That will never happens for an IdxRowid
- ** or Seek opcode */
- if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error;
+ /* sqlite3VdbeCursorRestore() may fail if the cursor has been disturbed
+ ** since it was last positioned and an error (e.g. OOM or an IO error)
+ ** occurs while trying to reposition it. */
+ if( rc!=SQLITE_OK ) goto abort_due_to_error;
if( !pC->nullRow ){
rowid = 0; /* Not needed. Only used to silence a warning. */
@@ -93616,6 +96164,7 @@ case OP_IdxRowid: { /* out2 */
pTabCur->nullRow = 0;
pTabCur->movetoTarget = rowid;
pTabCur->deferredMoveto = 1;
+ pTabCur->cacheStatus = CACHE_STALE;
assert( pOp->p4type==P4_INTARRAY || pOp->p4.ai==0 );
assert( !pTabCur->isEphemeral );
pTabCur->ub.aAltMap = pOp->p4.ai;
@@ -93750,7 +96299,7 @@ case OP_IdxGE: { /* jump */
rc = sqlite3VdbeMemFromBtreeZeroOffset(pCur, (u32)nCellKey, &m);
if( rc ) goto abort_due_to_error;
res = sqlite3VdbeRecordCompareWithSkip(m.n, m.z, &r, 0);
- sqlite3VdbeMemRelease(&m);
+ sqlite3VdbeMemReleaseMalloc(&m);
}
/* End of inlined sqlite3VdbeIdxKeyCompare() */
@@ -94499,7 +97048,7 @@ case OP_IfPos: { /* jump, in1 */
** Synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)
**
** This opcode performs a commonly used computation associated with
-** LIMIT and OFFSET process. r[P1] holds the limit counter. r[P3]
+** LIMIT and OFFSET processing. r[P1] holds the limit counter. r[P3]
** holds the offset counter. The opcode computes the combined value
** of the LIMIT and OFFSET and stores that value in r[P2]. The r[P2]
** value computed is the total number of rows that will need to be
@@ -94631,6 +97180,7 @@ case OP_AggStep: {
pCtx->pVdbe = p;
pCtx->skipFlag = 0;
pCtx->isError = 0;
+ pCtx->enc = encoding;
pCtx->argc = n;
pOp->p4type = P4_FUNCCTX;
pOp->p4.pCtx = pCtx;
@@ -94760,9 +97310,6 @@ case OP_AggFinal: {
}
sqlite3VdbeChangeEncoding(pMem, encoding);
UPDATE_MAX_BLOBSIZE(pMem);
- if( sqlite3VdbeMemTooBig(pMem) ){
- goto too_big;
- }
break;
}
@@ -95270,7 +97817,6 @@ case OP_VColumn: {
VdbeCursor *pCur = p->apCsr[pOp->p1];
assert( pCur!=0 );
- assert( pCur->eCurType==CURTYPE_VTAB );
assert( pOp->p3>0 && pOp->p3<=(p->nMem+1 - p->nCursor) );
pDest = &aMem[pOp->p3];
memAboutToChange(p, pDest);
@@ -95278,11 +97824,13 @@ case OP_VColumn: {
sqlite3VdbeMemSetNull(pDest);
break;
}
+ assert( pCur->eCurType==CURTYPE_VTAB );
pVtab = pCur->uc.pVCur->pVtab;
pModule = pVtab->pModule;
assert( pModule->xColumn );
memset(&sContext, 0, sizeof(sContext));
sContext.pOut = pDest;
+ sContext.enc = encoding;
assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 );
if( pOp->p5 & OPFLAG_NOCHNG ){
sqlite3VdbeMemSetNull(pDest);
@@ -95301,9 +97849,6 @@ case OP_VColumn: {
REGISTER_TRACE(pOp->p3, pDest);
UPDATE_MAX_BLOBSIZE(pDest);
- if( sqlite3VdbeMemTooBig(pDest) ){
- goto too_big;
- }
if( rc ) goto abort_due_to_error;
break;
}
@@ -95570,6 +98115,7 @@ case OP_Function: { /* group */
if( pCtx->pOut != pOut ){
pCtx->pVdbe = p;
pCtx->pOut = pOut;
+ pCtx->enc = encoding;
for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i];
}
assert( pCtx->pVdbe==p );
@@ -95596,17 +98142,27 @@ case OP_Function: { /* group */
if( rc ) goto abort_due_to_error;
}
- /* Copy the result of the function into register P3 */
- if( pOut->flags & (MEM_Str|MEM_Blob) ){
- sqlite3VdbeChangeEncoding(pOut, encoding);
- if( sqlite3VdbeMemTooBig(pOut) ) goto too_big;
- }
+ assert( (pOut->flags&MEM_Str)==0
+ || pOut->enc==encoding
+ || db->mallocFailed );
+ assert( !sqlite3VdbeMemTooBig(pOut) );
REGISTER_TRACE(pOp->p3, pOut);
UPDATE_MAX_BLOBSIZE(pOut);
break;
}
+/* Opcode: ClrSubtype P1 * * * *
+** Synopsis: r[P1].subtype = 0
+**
+** Clear the subtype from register P1.
+*/
+case OP_ClrSubtype: { /* in1 */
+ pIn1 = &aMem[pOp->p1];
+ pIn1->flags &= ~MEM_Subtype;
+ break;
+}
+
/* Opcode: FilterAdd P1 * P3 P4 *
** Synopsis: filter(P1) += key(P3@P4)
**
@@ -95726,7 +98282,7 @@ case OP_Init: { /* jump */
#ifndef SQLITE_OMIT_TRACE
if( (db->mTrace & (SQLITE_TRACE_STMT|SQLITE_TRACE_LEGACY))!=0
- && !p->doingRerun
+ && p->minWriteFileFormat!=254 /* tag-20220401a */
&& (zTrace = (pOp->p4.z ? pOp->p4.z : p->zSql))!=0
){
#ifndef SQLITE_OMIT_DEPRECATED
@@ -95955,7 +98511,7 @@ abort_due_to_error:
testcase( sqlite3GlobalConfig.xLog!=0 );
sqlite3_log(rc, "statement aborts at %d: [%s] %s",
(int)(pOp - aOp), p->zSql, p->zErrMsg);
- sqlite3VdbeHalt(p);
+ if( p->eVdbeState==VDBE_RUN_STATE ) sqlite3VdbeHalt(p);
if( rc==SQLITE_IOERR_NOMEM ) sqlite3OomFault(db);
if( rc==SQLITE_CORRUPT && db->autoCommit==0 ){
db->flags |= SQLITE_CorruptRdOnly;
@@ -100090,6 +102646,8 @@ SQLITE_PRIVATE int sqlite3JournalOpen(
){
MemJournal *p = (MemJournal*)pJfd;
+ assert( zName || nSpill<0 || (flags & SQLITE_OPEN_EXCLUSIVE) );
+
/* Zero the file-handle object. If nSpill was passed zero, initialize
** it using the sqlite3OsOpen() function of the underlying VFS. In this
** case none of the code in this module is executed as a result of calls
@@ -100517,53 +103075,24 @@ static void resolveAlias(
sqlite3ExprDelete(db, pDup);
pDup = 0;
}else{
+ Expr temp;
incrAggFunctionDepth(pDup, nSubquery);
if( pExpr->op==TK_COLLATE ){
assert( !ExprHasProperty(pExpr, EP_IntValue) );
pDup = sqlite3ExprAddCollateString(pParse, pDup, pExpr->u.zToken);
}
-
- /* Before calling sqlite3ExprDelete(), set the EP_Static flag. This
- ** prevents ExprDelete() from deleting the Expr structure itself,
- ** allowing it to be repopulated by the memcpy() on the following line.
- ** The pExpr->u.zToken might point into memory that will be freed by the
- ** sqlite3DbFree(db, pDup) on the last line of this block, so be sure to
- ** make a copy of the token before doing the sqlite3DbFree().
- */
- ExprSetProperty(pExpr, EP_Static);
- sqlite3ExprDelete(db, pExpr);
- memcpy(pExpr, pDup, sizeof(*pExpr));
- if( !ExprHasProperty(pExpr, EP_IntValue) && pExpr->u.zToken!=0 ){
- assert( (pExpr->flags & (EP_Reduced|EP_TokenOnly))==0 );
- pExpr->u.zToken = sqlite3DbStrDup(db, pExpr->u.zToken);
- pExpr->flags |= EP_MemToken;
- }
+ memcpy(&temp, pDup, sizeof(Expr));
+ memcpy(pDup, pExpr, sizeof(Expr));
+ memcpy(pExpr, &temp, sizeof(Expr));
if( ExprHasProperty(pExpr, EP_WinFunc) ){
if( ALWAYS(pExpr->y.pWin!=0) ){
pExpr->y.pWin->pOwner = pExpr;
}
}
- sqlite3DbFree(db, pDup);
+ sqlite3ExprDeferredDelete(pParse, pDup);
}
}
-
-/*
-** Return TRUE if the name zCol occurs anywhere in the USING clause.
-**
-** Return FALSE if the USING clause is NULL or if it does not contain
-** zCol.
-*/
-static int nameInUsingClause(IdList *pUsing, const char *zCol){
- if( pUsing ){
- int k;
- for(k=0; knId; k++){
- if( sqlite3StrICmp(pUsing->a[k].zName, zCol)==0 ) return 1;
- }
- }
- return 0;
-}
-
/*
** Subqueries stores the original database, table and column names for their
** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN".
@@ -100579,7 +103108,7 @@ SQLITE_PRIVATE int sqlite3MatchEName(
){
int n;
const char *zSpan;
- if( pItem->eEName!=ENAME_TAB ) return 0;
+ if( pItem->fg.eEName!=ENAME_TAB ) return 0;
zSpan = pItem->zEName;
for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){}
if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){
@@ -100640,6 +103169,29 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){
}
}
+/*
+** Create a new expression term for the column specified by pMatch and
+** iColumn. Append this new expression term to the FULL JOIN Match set
+** in *ppList. Create a new *ppList if this is the first term in the
+** set.
+*/
+static void extendFJMatch(
+ Parse *pParse, /* Parsing context */
+ ExprList **ppList, /* ExprList to extend */
+ SrcItem *pMatch, /* Source table containing the column */
+ i16 iColumn /* The column number */
+){
+ Expr *pNew = sqlite3ExprAlloc(pParse->db, TK_COLUMN, 0, 0);
+ if( pNew ){
+ pNew->iTable = pMatch->iCursor;
+ pNew->iColumn = iColumn;
+ pNew->y.pTab = pMatch->pTab;
+ assert( (pMatch->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 );
+ ExprSetProperty(pNew, EP_CanBeNull);
+ *ppList = sqlite3ExprListAppend(pParse, *ppList, pNew);
+ }
+}
+
/*
** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up
** that name in the set of source tables in pSrcList and make the pExpr
@@ -100685,11 +103237,13 @@ static int lookupName(
NameContext *pTopNC = pNC; /* First namecontext in the list */
Schema *pSchema = 0; /* Schema of the expression */
int eNewExprOp = TK_COLUMN; /* New value for pExpr->op on success */
- Table *pTab = 0; /* Table hold the row */
+ Table *pTab = 0; /* Table holding the row */
Column *pCol; /* A column of pTab */
+ ExprList *pFJMatch = 0; /* Matches for FULL JOIN .. USING */
assert( pNC ); /* the name context cannot be NULL. */
assert( zCol ); /* The Z in X.Y.Z cannot be NULL */
+ assert( zDb==0 || zTab!=0 );
assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
/* Initialize the node to no-match */
@@ -100738,26 +103292,65 @@ static int lookupName(
pTab = pItem->pTab;
assert( pTab!=0 && pTab->zName!=0 );
assert( pTab->nCol>0 || pParse->nErr );
- if( pItem->pSelect && (pItem->pSelect->selFlags & SF_NestedFrom)!=0 ){
+ assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) );
+ if( pItem->fg.isNestedFrom ){
+ /* In this case, pItem is a subquery that has been formed from a
+ ** parenthesized subset of the FROM clause terms. Example:
+ ** .... FROM t1 LEFT JOIN (t2 RIGHT JOIN t3 USING(x)) USING(y) ...
+ ** \_________________________/
+ ** This pItem -------------^
+ */
int hit = 0;
+ assert( pItem->pSelect!=0 );
pEList = pItem->pSelect->pEList;
+ assert( pEList!=0 );
+ assert( pEList->nExpr==pTab->nCol );
for(j=0; jnExpr; j++){
- if( sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){
- cnt++;
- cntTab = 2;
- pMatch = pItem;
- pExpr->iColumn = j;
- hit = 1;
+ if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){
+ continue;
}
+ if( cnt>0 ){
+ if( pItem->fg.isUsing==0
+ || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0
+ ){
+ /* Two or more tables have the same column name which is
+ ** not joined by USING. This is an error. Signal as much
+ ** by clearing pFJMatch and letting cnt go above 1. */
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else
+ if( (pItem->fg.jointype & JT_RIGHT)==0 ){
+ /* An INNER or LEFT JOIN. Use the left-most table */
+ continue;
+ }else
+ if( (pItem->fg.jointype & JT_LEFT)==0 ){
+ /* A RIGHT JOIN. Use the right-most table */
+ cnt = 0;
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else{
+ /* For a FULL JOIN, we must construct a coalesce() func */
+ extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ }
+ }
+ cnt++;
+ cntTab = 2;
+ pMatch = pItem;
+ pExpr->iColumn = j;
+ pEList->a[j].fg.bUsed = 1;
+ hit = 1;
+ if( pEList->a[j].fg.bUsingTerm ) break;
}
if( hit || zTab==0 ) continue;
}
- if( zDb ){
- if( pTab->pSchema!=pSchema ) continue;
- if( pSchema==0 && strcmp(zDb,"*")!=0 ) continue;
- }
+ assert( zDb==0 || zTab!=0 );
if( zTab ){
- const char *zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName;
+ const char *zTabName;
+ if( zDb ){
+ if( pTab->pSchema!=pSchema ) continue;
+ if( pSchema==0 && strcmp(zDb,"*")!=0 ) continue;
+ }
+ zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName;
assert( zTabName!=0 );
if( sqlite3StrICmp(zTabName, zTab)!=0 ){
continue;
@@ -100772,18 +103365,37 @@ static int lookupName(
if( pCol->hName==hCol
&& sqlite3StrICmp(pCol->zCnName, zCol)==0
){
- /* If there has been exactly one prior match and this match
- ** is for the right-hand table of a NATURAL JOIN or is in a
- ** USING clause, then skip this match.
- */
- if( cnt==1 ){
- if( pItem->fg.jointype & JT_NATURAL ) continue;
- if( nameInUsingClause(pItem->pUsing, zCol) ) continue;
+ if( cnt>0 ){
+ if( pItem->fg.isUsing==0
+ || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0
+ ){
+ /* Two or more tables have the same column name which is
+ ** not joined by USING. This is an error. Signal as much
+ ** by clearing pFJMatch and letting cnt go above 1. */
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else
+ if( (pItem->fg.jointype & JT_RIGHT)==0 ){
+ /* An INNER or LEFT JOIN. Use the left-most table */
+ continue;
+ }else
+ if( (pItem->fg.jointype & JT_LEFT)==0 ){
+ /* A RIGHT JOIN. Use the right-most table */
+ cnt = 0;
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }else{
+ /* For a FULL JOIN, we must construct a coalesce() func */
+ extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ }
}
cnt++;
pMatch = pItem;
/* Substitute the rowid (column -1) for the INTEGER PRIMARY KEY */
pExpr->iColumn = j==pTab->iPKey ? -1 : (i16)j;
+ if( pItem->fg.isNestedFrom ){
+ sqlite3SrcItemColumnUsed(pItem, j);
+ }
break;
}
}
@@ -100796,9 +103408,7 @@ static int lookupName(
pExpr->iTable = pMatch->iCursor;
assert( ExprUseYTab(pExpr) );
pExpr->y.pTab = pMatch->pTab;
- /* RIGHT JOIN not (yet) supported */
- assert( (pMatch->fg.jointype & JT_RIGHT)==0 );
- if( (pMatch->fg.jointype & JT_LEFT)!=0 ){
+ if( (pMatch->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 ){
ExprSetProperty(pExpr, EP_CanBeNull);
}
pSchema = pExpr->y.pTab->pSchema;
@@ -100952,7 +103562,7 @@ static int lookupName(
assert( pEList!=0 );
for(j=0; jnExpr; j++){
char *zAs = pEList->a[j].zEName;
- if( pEList->a[j].eEName==ENAME_NAME
+ if( pEList->a[j].fg.eEName==ENAME_NAME
&& sqlite3_stricmp(zAs, zCol)==0
){
Expr *pOrig;
@@ -101039,11 +103649,37 @@ static int lookupName(
}
/*
- ** cnt==0 means there was not match. cnt>1 means there were two or
- ** more matches. Either way, we have an error.
+ ** cnt==0 means there was not match.
+ ** cnt>1 means there were two or more matches.
+ **
+ ** cnt==0 is always an error. cnt>1 is often an error, but might
+ ** be multiple matches for a NATURAL LEFT JOIN or a LEFT JOIN USING.
*/
+ assert( pFJMatch==0 || cnt>0 );
+ assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) );
if( cnt!=1 ){
const char *zErr;
+ if( pFJMatch ){
+ if( pFJMatch->nExpr==cnt-1 ){
+ if( ExprHasProperty(pExpr,EP_Leaf) ){
+ ExprClearProperty(pExpr,EP_Leaf);
+ }else{
+ sqlite3ExprDelete(db, pExpr->pLeft);
+ pExpr->pLeft = 0;
+ sqlite3ExprDelete(db, pExpr->pRight);
+ pExpr->pRight = 0;
+ }
+ extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn);
+ pExpr->op = TK_FUNCTION;
+ pExpr->u.zToken = "coalesce";
+ pExpr->x.pList = pFJMatch;
+ cnt = 1;
+ goto lookupname_end;
+ }else{
+ sqlite3ExprListDelete(db, pFJMatch);
+ pFJMatch = 0;
+ }
+ }
zErr = cnt==0 ? "no such column" : "ambiguous column name";
if( zDb ){
sqlite3ErrorMsg(pParse, "%s: %s.%s.%s", zErr, zDb, zTab, zCol);
@@ -101056,6 +103692,16 @@ static int lookupName(
pParse->checkSchema = 1;
pTopNC->nNcErr++;
}
+ assert( pFJMatch==0 );
+
+ /* Remove all substructure from pExpr */
+ if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){
+ sqlite3ExprDelete(db, pExpr->pLeft);
+ pExpr->pLeft = 0;
+ sqlite3ExprDelete(db, pExpr->pRight);
+ pExpr->pRight = 0;
+ ExprSetProperty(pExpr, EP_Leaf);
+ }
/* If a column from a table in pSrcList is referenced, then record
** this fact in the pSrcList.a[].colUsed bitmask. Column 0 causes
@@ -101075,16 +103721,7 @@ static int lookupName(
pMatch->colUsed |= sqlite3ExprColUsed(pExpr);
}
- /* Clean up and return
- */
- if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){
- sqlite3ExprDelete(db, pExpr->pLeft);
- pExpr->pLeft = 0;
- sqlite3ExprDelete(db, pExpr->pRight);
- pExpr->pRight = 0;
- }
pExpr->op = eNewExprOp;
- ExprSetProperty(pExpr, EP_Leaf);
lookupname_end:
if( cnt==1 ){
assert( pNC!=0 );
@@ -101269,7 +103906,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
}
sqlite3WalkExpr(pWalker, pExpr->pLeft);
if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){
- testcase( ExprHasProperty(pExpr, EP_FromJoin) );
+ testcase( ExprHasProperty(pExpr, EP_OuterON) );
assert( !ExprHasProperty(pExpr, EP_IntValue) );
if( pExpr->op==TK_NOTNULL ){
pExpr->u.zToken = "true";
@@ -101678,7 +104315,7 @@ static int resolveAsName(
assert( !ExprHasProperty(pE, EP_IntValue) );
zCol = pE->u.zToken;
for(i=0; inExpr; i++){
- if( pEList->a[i].eEName==ENAME_NAME
+ if( pEList->a[i].fg.eEName==ENAME_NAME
&& sqlite3_stricmp(pEList->a[i].zEName, zCol)==0
){
return i+1;
@@ -101799,7 +104436,7 @@ static int resolveCompoundOrderBy(
return 1;
}
for(i=0; inExpr; i++){
- pOrderBy->a[i].done = 0;
+ pOrderBy->a[i].fg.done = 0;
}
pSelect->pNext = 0;
while( pSelect->pPrior ){
@@ -101814,7 +104451,7 @@ static int resolveCompoundOrderBy(
for(i=0, pItem=pOrderBy->a; inExpr; i++, pItem++){
int iCol = -1;
Expr *pE, *pDup;
- if( pItem->done ) continue;
+ if( pItem->fg.done ) continue;
pE = sqlite3ExprSkipCollateAndLikely(pItem->pExpr);
if( NEVER(pE==0) ) continue;
if( sqlite3ExprIsInteger(pE, &iCol) ){
@@ -101867,7 +104504,7 @@ static int resolveCompoundOrderBy(
sqlite3ExprDelete(db, pE);
pItem->u.x.iOrderByCol = (u16)iCol;
}
- pItem->done = 1;
+ pItem->fg.done = 1;
}else{
moreToDo = 1;
}
@@ -101875,7 +104512,7 @@ static int resolveCompoundOrderBy(
pSelect = pSelect->pNext;
}
for(i=0; inExpr; i++){
- if( pOrderBy->a[i].done==0 ){
+ if( pOrderBy->a[i].fg.done==0 ){
sqlite3ErrorMsg(pParse, "%r ORDER BY term does not match any "
"column in the result set", i+1);
return 1;
@@ -102165,8 +104802,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){
sNC.uNC.pEList = p->pEList;
sNC.ncFlags |= NC_UEList;
if( p->pHaving ){
- if( !pGroupBy ){
- sqlite3ErrorMsg(pParse, "a GROUP BY clause is required before HAVING");
+ if( (p->selFlags & SF_Aggregate)==0 ){
+ sqlite3ErrorMsg(pParse, "HAVING clause on a non-aggregate query");
return WRC_Abort;
}
if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort;
@@ -102546,9 +105183,8 @@ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
if( op==TK_REGISTER ) op = pExpr->op2;
if( op==TK_COLUMN || op==TK_AGG_COLUMN ){
assert( ExprUseYTab(pExpr) );
- if( pExpr->y.pTab ){
- return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
- }
+ assert( pExpr->y.pTab!=0 );
+ return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
}
if( op==TK_SELECT ){
assert( ExprUseXSelect(pExpr) );
@@ -102666,17 +105302,14 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){
int op = p->op;
if( op==TK_REGISTER ) op = p->op2;
if( op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER ){
+ int j;
assert( ExprUseYTab(p) );
- if( p->y.pTab!=0 ){
- /* op==TK_REGISTER && p->y.pTab!=0 happens when pExpr was originally
- ** a TK_COLUMN but was previously evaluated and cached in a register */
- int j = p->iColumn;
- if( j>=0 ){
- const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]);
- pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0);
- }
- break;
+ assert( p->y.pTab!=0 );
+ if( (j = p->iColumn)>=0 ){
+ const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]);
+ pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0);
}
+ break;
}
if( op==TK_CAST || op==TK_UPLUS ){
p = p->pLeft;
@@ -103261,7 +105894,9 @@ static void heightOfSelect(const Select *pSelect, int *pnHeight){
*/
static void exprSetHeight(Expr *p){
int nHeight = p->pLeft ? p->pLeft->nHeight : 0;
- if( p->pRight && p->pRight->nHeight>nHeight ) nHeight = p->pRight->nHeight;
+ if( NEVER(p->pRight) && p->pRight->nHeight>nHeight ){
+ nHeight = p->pRight->nHeight;
+ }
if( ExprUseXSelect(p) ){
heightOfSelect(p->x.pSelect, &nHeight);
}else if( p->x.pList ){
@@ -103404,15 +106039,26 @@ SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(
sqlite3ExprDelete(db, pLeft);
sqlite3ExprDelete(db, pRight);
}else{
+ assert( ExprUseXList(pRoot) );
+ assert( pRoot->x.pSelect==0 );
if( pRight ){
pRoot->pRight = pRight;
pRoot->flags |= EP_Propagate & pRight->flags;
+#if SQLITE_MAX_EXPR_DEPTH>0
+ pRoot->nHeight = pRight->nHeight+1;
+ }else{
+ pRoot->nHeight = 1;
+#endif
}
if( pLeft ){
pRoot->pLeft = pLeft;
pRoot->flags |= EP_Propagate & pLeft->flags;
+#if SQLITE_MAX_EXPR_DEPTH>0
+ if( pLeft->nHeight>=pRoot->nHeight ){
+ pRoot->nHeight = pLeft->nHeight+1;
+ }
+#endif
}
- exprSetHeight(pRoot);
}
}
@@ -103560,6 +106206,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(
sqlite3ExprListDelete(db, pList); /* Avoid memory leak when malloc fails */
return 0;
}
+ assert( !ExprHasProperty(pNew, EP_InnerON|EP_OuterON) );
pNew->w.iOfst = (int)(pToken->z - pParse->zTail);
if( pList
&& pList->nExpr > pParse->db->aLimit[SQLITE_LIMIT_FUNCTION_ARG]
@@ -103697,6 +106344,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n
*/
static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
assert( p!=0 );
+ assert( db!=0 );
assert( !ExprUseUValue(p) || p->u.iValue>=0 );
assert( !ExprUseYWin(p) || !ExprUseYSub(p) );
assert( !ExprUseYWin(p) || p->y.pWin!=0 || db->mallocFailed );
@@ -103728,18 +106376,26 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){
#endif
}
}
- if( ExprHasProperty(p, EP_MemToken) ){
- assert( !ExprHasProperty(p, EP_IntValue) );
- sqlite3DbFree(db, p->u.zToken);
- }
if( !ExprHasProperty(p, EP_Static) ){
- sqlite3DbFreeNN(db, p);
+ sqlite3DbNNFreeNN(db, p);
}
}
SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){
if( p ) sqlite3ExprDeleteNN(db, p);
}
+/*
+** Clear both elements of an OnOrUsing object
+*/
+SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){
+ if( p==0 ){
+ /* Nothing to clear */
+ }else if( p->pOn ){
+ sqlite3ExprDeleteNN(db, p->pOn);
+ }else if( p->pUsing ){
+ sqlite3IdListDelete(db, p->pUsing);
+ }
+}
/*
** Arrange to cause pExpr to be deleted when the pParse is deleted.
@@ -103752,8 +106408,9 @@ SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){
** pExpr to the pParse->pConstExpr list with a register number of 0.
*/
SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){
- pParse->pConstExpr =
- sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr);
+ sqlite3ParserAddCleanup(pParse,
+ (void(*)(sqlite3*,void*))sqlite3ExprDelete,
+ pExpr);
}
/* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the
@@ -103826,8 +106483,7 @@ static int dupedExprStructSize(const Expr *p, int flags){
nSize = EXPR_FULLSIZE;
}else{
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
- assert( !ExprHasProperty(p, EP_FromJoin) );
- assert( !ExprHasProperty(p, EP_MemToken) );
+ assert( !ExprHasProperty(p, EP_OuterON) );
assert( !ExprHasVVAProperty(p, EP_NoReduce) );
if( p->pLeft || p->x.pList ){
nSize = EXPR_REDUCEDSIZE | EP_Reduced;
@@ -103931,7 +106587,7 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){
}
/* Set the EP_Reduced, EP_TokenOnly, and EP_Static flags appropriately. */
- pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken);
+ pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static);
pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly);
pNew->flags |= staticFlag;
ExprClearVVAProperties(pNew);
@@ -104006,6 +106662,7 @@ SQLITE_PRIVATE With *sqlite3WithDup(sqlite3 *db, With *p){
pRet->a[i].pSelect = sqlite3SelectDup(db, p->a[i].pSelect, 0);
pRet->a[i].pCols = sqlite3ExprListDup(db, p->a[i].pCols, 0);
pRet->a[i].zName = sqlite3DbStrDup(db, p->a[i].zName);
+ pRet->a[i].eM10d = p->a[i].eM10d;
}
}
}
@@ -104106,11 +106763,8 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, const ExprList *p, int
}
}
pItem->zEName = sqlite3DbStrDup(db, pOldItem->zEName);
- pItem->sortFlags = pOldItem->sortFlags;
- pItem->eEName = pOldItem->eEName;
- pItem->done = 0;
- pItem->bNulls = pOldItem->bNulls;
- pItem->bSorterRef = pOldItem->bSorterRef;
+ pItem->fg = pOldItem->fg;
+ pItem->fg.done = 0;
pItem->u = pOldItem->u;
}
return pNew;
@@ -104162,8 +106816,12 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, const SrcList *p, int fla
pTab->nTabRef++;
}
pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags);
- pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags);
- pNewItem->pUsing = sqlite3IdListDup(db, pOldItem->pUsing);
+ if( pOldItem->fg.isUsing ){
+ assert( pNewItem->fg.isUsing );
+ pNewItem->u3.pUsing = sqlite3IdListDup(db, pOldItem->u3.pUsing);
+ }else{
+ pNewItem->u3.pOn = sqlite3ExprDup(db, pOldItem->u3.pOn, flags);
+ }
pNewItem->colUsed = pOldItem->colUsed;
}
return pNew;
@@ -104173,22 +106831,16 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, const IdList *p){
int i;
assert( db!=0 );
if( p==0 ) return 0;
- pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) );
+ assert( p->eU4!=EU4_EXPR );
+ pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew)+(p->nId-1)*sizeof(p->a[0]) );
if( pNew==0 ) return 0;
pNew->nId = p->nId;
- pNew->a = sqlite3DbMallocRawNN(db, p->nId*sizeof(p->a[0]) );
- if( pNew->a==0 ){
- sqlite3DbFreeNN(db, pNew);
- return 0;
- }
- /* Note that because the size of the allocation for p->a[] is not
- ** necessarily a power of two, sqlite3IdListAppend() may not be called
- ** on the duplicate created by this function. */
+ pNew->eU4 = p->eU4;
for(i=0; inId; i++){
struct IdList_item *pNewItem = &pNew->a[i];
- struct IdList_item *pOldItem = &p->a[i];
+ const struct IdList_item *pOldItem = &p->a[i];
pNewItem->zName = sqlite3DbStrDup(db, pOldItem->zName);
- pNewItem->idx = pOldItem->idx;
+ pNewItem->u4 = pOldItem->u4;
}
return pNew;
}
@@ -104412,16 +107064,16 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList *p, int iSortOrder, int
);
pItem = &p->a[p->nExpr-1];
- assert( pItem->bNulls==0 );
+ assert( pItem->fg.bNulls==0 );
if( iSortOrder==SQLITE_SO_UNDEFINED ){
iSortOrder = SQLITE_SO_ASC;
}
- pItem->sortFlags = (u8)iSortOrder;
+ pItem->fg.sortFlags = (u8)iSortOrder;
if( eNulls!=SQLITE_SO_UNDEFINED ){
- pItem->bNulls = 1;
+ pItem->fg.bNulls = 1;
if( iSortOrder!=eNulls ){
- pItem->sortFlags |= KEYINFO_ORDER_BIGNULL;
+ pItem->fg.sortFlags |= KEYINFO_ORDER_BIGNULL;
}
}
}
@@ -104447,7 +107099,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetName(
assert( pList->nExpr>0 );
pItem = &pList->a[pList->nExpr-1];
assert( pItem->zEName==0 );
- assert( pItem->eEName==ENAME_NAME );
+ assert( pItem->fg.eEName==ENAME_NAME );
pItem->zEName = sqlite3DbStrNDup(pParse->db, pName->z, pName->n);
if( dequote ){
/* If dequote==0, then pName->z does not point to part of a DDL
@@ -104482,7 +107134,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSpan(
assert( pList->nExpr>0 );
if( pItem->zEName==0 ){
pItem->zEName = sqlite3DbSpanDup(db, zStart, zEnd);
- pItem->eEName = ENAME_SPAN;
+ pItem->fg.eEName = ENAME_SPAN;
}
}
}
@@ -104511,12 +107163,13 @@ static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){
int i = pList->nExpr;
struct ExprList_item *pItem = pList->a;
assert( pList->nExpr>0 );
+ assert( db!=0 );
do{
sqlite3ExprDelete(db, pItem->pExpr);
- sqlite3DbFree(db, pItem->zEName);
+ if( pItem->zEName ) sqlite3DbNNFreeNN(db, pItem->zEName);
pItem++;
}while( --i>0 );
- sqlite3DbFreeNN(db, pList);
+ sqlite3DbNNFreeNN(db, pList);
}
SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){
if( pList ) exprListDeleteNN(db, pList);
@@ -104654,9 +107307,9 @@ SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr *pExpr){
static int exprNodeIsConstant(Walker *pWalker, Expr *pExpr){
/* If pWalker->eCode is 2 then any term of the expression that comes from
- ** the ON or USING clauses of a left join disqualifies the expression
+ ** the ON or USING clauses of an outer join disqualifies the expression
** from being considered constant. */
- if( pWalker->eCode==2 && ExprHasProperty(pExpr, EP_FromJoin) ){
+ if( pWalker->eCode==2 && ExprHasProperty(pExpr, EP_OuterON) ){
pWalker->eCode = 0;
return WRC_Abort;
}
@@ -104779,7 +107432,7 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){
** Check pExpr to see if it is an invariant constraint on data source pSrc.
** This is an optimization. False negatives will perhaps cause slower
** queries, but false positives will yield incorrect answers. So when in
-** double, return 0.
+** doubt, return 0.
**
** To be an invariant constraint, the following must be true:
**
@@ -104787,24 +107440,28 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){
**
** (2) pExpr cannot use subqueries or non-deterministic functions.
**
-** (*) ** Not applicable to this branch **
+** (3) pSrc cannot be part of the left operand for a RIGHT JOIN.
+** (Is there some way to relax this constraint?)
**
** (4) If pSrc is the right operand of a LEFT JOIN, then...
** (4a) pExpr must come from an ON clause..
-** (4b) and specifically the ON clause associated with the LEFT JOIN.
+ (4b) and specifically the ON clause associated with the LEFT JOIN.
**
** (5) If pSrc is not the right operand of a LEFT JOIN or the left
** operand of a RIGHT JOIN, then pExpr must be from the WHERE
** clause, not an ON clause.
*/
SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc){
+ if( pSrc->fg.jointype & JT_LTORJ ){
+ return 0; /* rule (3) */
+ }
if( pSrc->fg.jointype & JT_LEFT ){
- if( !ExprHasProperty(pExpr, EP_FromJoin) ) return 0; /* rule (4a) */
- if( pExpr->w.iRightJoinTable!=pSrc->iCursor ) return 0; /* rule (4b) */
+ if( !ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* rule (4a) */
+ if( pExpr->w.iJoin!=pSrc->iCursor ) return 0; /* rule (4b) */
}else{
- if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0; /* rule (5) */
+ if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* rule (5) */
}
- return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */
+ return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */
}
@@ -105134,7 +107791,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** all members of the RHS set, skipping duplicates.
**
** A cursor is opened on the b-tree object that is the RHS of the IN operator
-** and pX->iTable is set to the index of that cursor.
+** and the *piTab parameter is set to the index of that cursor.
**
** The returned value of this function indicates the b-tree type, as follows:
**
@@ -105154,7 +107811,10 @@ static int sqlite3InRhsIsConstant(Expr *pIn){
** If the RHS of the IN operator is a list or a more complex subquery, then
** an ephemeral table might need to be generated from the RHS and then
** pX->iTable made to point to the ephemeral table instead of an
-** existing table.
+** existing table. In this case, the creation and initialization of the
+** ephmeral table might be put inside of a subroutine, the EP_Subrtn flag
+** will be set on pX and the pX->y.sub fields will be set to show where
+** the subroutine is coded.
**
** The inFlags parameter must contain, at a minimum, one of the bits
** IN_INDEX_MEMBERSHIP or IN_INDEX_LOOP but not both. If inFlags contains
@@ -105215,12 +107875,13 @@ SQLITE_PRIVATE int sqlite3FindInIndex(
){
Select *p; /* SELECT to the right of IN operator */
int eType = 0; /* Type of RHS table. IN_INDEX_* */
- int iTab = pParse->nTab++; /* Cursor of the RHS table */
+ int iTab; /* Cursor of the RHS table */
int mustBeUnique; /* True if RHS must be unique */
Vdbe *v = sqlite3GetVdbe(pParse); /* Virtual machine being coded */
assert( pX->op==TK_IN );
mustBeUnique = (inFlags & IN_INDEX_LOOP)!=0;
+ iTab = pParse->nTab++;
/* If the RHS of this IN(...) operator is a SELECT, and if it matters
** whether or not the SELECT result contains NULL values, check whether
@@ -105386,6 +108047,8 @@ SQLITE_PRIVATE int sqlite3FindInIndex(
&& ExprUseXList(pX)
&& (!sqlite3InRhsIsConstant(pX) || pX->x.pList->nExpr<=2)
){
+ pParse->nTab--; /* Back out the allocation of the unused cursor */
+ iTab = -1; /* Cursor is not allocated */
eType = IN_INDEX_NOOP;
}
@@ -105552,6 +108215,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN(
assert( ExprUseYSub(pExpr) );
sqlite3VdbeAddOp2(v, OP_Gosub, pExpr->y.sub.regReturn,
pExpr->y.sub.iAddr);
+ assert( iTab!=pExpr->iTable );
sqlite3VdbeAddOp2(v, OP_OpenDup, iTab, pExpr->iTable);
sqlite3VdbeJumpHere(v, addrOnce);
return;
@@ -105563,8 +108227,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN(
assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
pExpr->y.sub.regReturn = ++pParse->nMem;
pExpr->y.sub.iAddr =
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pExpr->y.sub.regReturn) + 1;
- VdbeComment((v, "return address"));
+ sqlite3VdbeAddOp2(v, OP_BeginSubrtn, 0, pExpr->y.sub.regReturn) + 1;
addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
}
@@ -105666,6 +108329,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN(
** expression we need to rerun this code each time.
*/
if( addrOnce && !sqlite3ExprIsConstant(pE2) ){
+ sqlite3VdbeChangeToNoop(v, addrOnce-1);
sqlite3VdbeChangeToNoop(v, addrOnce);
ExprClearProperty(pExpr, EP_Subrtn);
addrOnce = 0;
@@ -105683,11 +108347,15 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN(
sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO);
}
if( addrOnce ){
+ sqlite3VdbeAddOp1(v, OP_NullRow, iTab);
sqlite3VdbeJumpHere(v, addrOnce);
/* Subroutine return */
assert( ExprUseYSub(pExpr) );
- sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn);
- sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1);
+ assert( sqlite3VdbeGetOp(v,pExpr->y.sub.iAddr-1)->opcode==OP_BeginSubrtn
+ || pParse->nErr );
+ sqlite3VdbeAddOp3(v, OP_Return, pExpr->y.sub.regReturn,
+ pExpr->y.sub.iAddr, 1);
+ VdbeCoverage(v);
sqlite3ClearTempRegCache(pParse);
}
}
@@ -105741,9 +108409,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
ExprSetProperty(pExpr, EP_Subrtn);
pExpr->y.sub.regReturn = ++pParse->nMem;
pExpr->y.sub.iAddr =
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pExpr->y.sub.regReturn) + 1;
- VdbeComment((v, "return address"));
-
+ sqlite3VdbeAddOp2(v, OP_BeginSubrtn, 0, pExpr->y.sub.regReturn) + 1;
/* The evaluation of the EXISTS/SELECT must be repeated every time it
** is encountered if any of the following is true:
@@ -105795,7 +108461,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
pLimit = sqlite3PExpr(pParse, TK_NE,
sqlite3ExprDup(db, pSel->pLimit->pLeft, 0), pLimit);
}
- sqlite3ExprDelete(db, pSel->pLimit->pLeft);
+ sqlite3ExprDeferredDelete(pParse, pSel->pLimit->pLeft);
pSel->pLimit->pLeft = pLimit;
}else{
/* If there is no pre-existing limit add a limit of 1 */
@@ -105816,8 +108482,11 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
/* Subroutine return */
assert( ExprUseYSub(pExpr) );
- sqlite3VdbeAddOp1(v, OP_Return, pExpr->y.sub.regReturn);
- sqlite3VdbeChangeP1(v, pExpr->y.sub.iAddr-1, sqlite3VdbeCurrentAddr(v)-1);
+ assert( sqlite3VdbeGetOp(v,pExpr->y.sub.iAddr-1)->opcode==OP_BeginSubrtn
+ || pParse->nErr );
+ sqlite3VdbeAddOp3(v, OP_Return, pExpr->y.sub.regReturn,
+ pExpr->y.sub.iAddr, 1);
+ VdbeCoverage(v);
sqlite3ClearTempRegCache(pParse);
return rReg;
}
@@ -106245,12 +108914,10 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(
){
Column *pCol;
assert( v!=0 );
- if( pTab==0 ){
- sqlite3VdbeAddOp3(v, OP_Column, iTabCur, iCol, regOut);
- return;
- }
+ assert( pTab!=0 );
if( iCol<0 || iCol==pTab->iPKey ){
sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut);
+ VdbeComment((v, "%s.rowid", pTab->zName));
}else{
int op;
int x;
@@ -106305,7 +108972,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn(
assert( pParse->pVdbe!=0 );
sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pTab, iTable, iColumn, iReg);
if( p5 ){
- VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1);
+ VdbeOp *pOp = sqlite3VdbeGetLastOp(pParse->pVdbe);
if( pOp->opcode==OP_Column ) pOp->p5 = p5;
}
return iReg;
@@ -106374,7 +109041,7 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){
** so that a subsequent copy will not be merged into this one.
*/
static void setDoNotMergeFlagOnCopy(Vdbe *v){
- if( sqlite3VdbeGetOp(v, -1)->opcode==OP_Copy ){
+ if( sqlite3VdbeGetLastOp(v)->opcode==OP_Copy ){
sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergable */
}
}
@@ -106421,7 +109088,17 @@ static int exprCodeInlineFunction(
caseExpr.x.pList = pFarg;
return sqlite3ExprCodeTarget(pParse, &caseExpr, target);
}
-
+#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
+ case INLINEFUNC_sqlite_offset: {
+ Expr *pArg = pFarg->a[0].pExpr;
+ if( pArg->op==TK_COLUMN && pArg->iTable>=0 ){
+ sqlite3VdbeAddOp3(v, OP_Offset, pArg->iTable, pArg->iColumn, target);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Null, 0, target);
+ }
+ break;
+ }
+#endif
default: {
/* The UNLIKELY() function is a no-op. The result is the value
** of the first argument.
@@ -106487,6 +109164,53 @@ static int exprCodeInlineFunction(
return target;
}
+/*
+** Check to see if pExpr is one of the indexed expressions on pParse->pIdxExpr.
+** If it is, then resolve the expression by reading from the index and
+** return the register into which the value has been read. If pExpr is
+** not an indexed expression, then return negative.
+*/
+static SQLITE_NOINLINE int sqlite3IndexedExprLookup(
+ Parse *pParse, /* The parsing context */
+ Expr *pExpr, /* The expression to potentially bypass */
+ int target /* Where to store the result of the expression */
+){
+ IndexedExpr *p;
+ Vdbe *v;
+ for(p=pParse->pIdxExpr; p; p=p->pIENext){
+ int iDataCur = p->iDataCur;
+ if( iDataCur<0 ) continue;
+ if( pParse->iSelfTab ){
+ if( p->iDataCur!=pParse->iSelfTab-1 ) continue;
+ iDataCur = -1;
+ }
+ if( sqlite3ExprCompare(0, pExpr, p->pExpr, iDataCur)!=0 ) continue;
+ v = pParse->pVdbe;
+ assert( v!=0 );
+ if( p->bMaybeNullRow ){
+ /* If the index is on a NULL row due to an outer join, then we
+ ** cannot extract the value from the index. The value must be
+ ** computed using the original expression. */
+ int addr = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp3(v, OP_IfNullRow, p->iIdxCur, addr+3, target);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target);
+ VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol));
+ sqlite3VdbeGoto(v, 0);
+ p = pParse->pIdxExpr;
+ pParse->pIdxExpr = 0;
+ sqlite3ExprCode(pParse, pExpr, target);
+ pParse->pIdxExpr = p;
+ sqlite3VdbeJumpHere(v, addr+2);
+ }else{
+ sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target);
+ VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol));
+ }
+ return target;
+ }
+ return -1; /* Not found */
+}
+
/*
** Generate code into the current Vdbe to evaluate the given
@@ -106515,6 +109239,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
expr_code_doover:
if( pExpr==0 ){
op = TK_NULL;
+ }else if( pParse->pIdxExpr!=0
+ && !ExprHasProperty(pExpr, EP_Leaf)
+ && (r1 = sqlite3IndexedExprLookup(pParse, pExpr, target))>=0
+ ){
+ return r1;
}else{
assert( !ExprHasVVAProperty(pExpr,EP_Immutable) );
op = pExpr->op;
@@ -106535,7 +109264,7 @@ expr_code_doover:
pCol->iSorterColumn, target);
if( pCol->iColumn<0 ){
VdbeComment((v,"%s.rowid",pTab->zName));
- }else{
+ }else if( ALWAYS(pTab!=0) ){
VdbeComment((v,"%s.%s",
pTab->zName, pTab->aCol[pCol->iColumn].zCnName));
if( pTab->aCol[pCol->iColumn].affinity==SQLITE_AFF_REAL ){
@@ -106560,11 +109289,8 @@ expr_code_doover:
int aff;
iReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft,target);
assert( ExprUseYTab(pExpr) );
- if( pExpr->y.pTab ){
- aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
- }else{
- aff = pExpr->affExpr;
- }
+ assert( pExpr->y.pTab!=0 );
+ aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
if( aff>SQLITE_AFF_BLOB ){
static const char zAff[] = "B\000C\000D\000E";
assert( SQLITE_AFF_BLOB=='A' );
@@ -106626,12 +109352,10 @@ expr_code_doover:
}
}
assert( ExprUseYTab(pExpr) );
+ assert( pExpr->y.pTab!=0 );
iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab,
pExpr->iColumn, iTab, target,
pExpr->op2);
- if( pExpr->y.pTab==0 && pExpr->affExpr==SQLITE_AFF_REAL ){
- sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
- }
return iReg;
}
case TK_INTEGER: {
@@ -106960,20 +109684,8 @@ expr_code_doover:
if( !pColl ) pColl = db->pDfltColl;
sqlite3VdbeAddOp4(v, OP_CollSeq, 0, 0, 0, (char *)pColl, P4_COLLSEQ);
}
-#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
- if( (pDef->funcFlags & SQLITE_FUNC_OFFSET)!=0 && ALWAYS(pFarg!=0) ){
- Expr *pArg = pFarg->a[0].pExpr;
- if( pArg->op==TK_COLUMN ){
- sqlite3VdbeAddOp3(v, OP_Offset, pArg->iTable, pArg->iColumn, target);
- }else{
- sqlite3VdbeAddOp2(v, OP_Null, 0, target);
- }
- }else
-#endif
- {
- sqlite3VdbeAddFunctionCall(pParse, constMask, r1, target, nFarg,
- pDef, pExpr->op2);
- }
+ sqlite3VdbeAddFunctionCall(pParse, constMask, r1, target, nFarg,
+ pDef, pExpr->op2);
if( nFarg ){
if( constMask==0 ){
sqlite3ReleaseTempRange(pParse, r1, nFarg);
@@ -107003,16 +109715,18 @@ expr_code_doover:
}
case TK_SELECT_COLUMN: {
int n;
- if( pExpr->pLeft->iTable==0 ){
- pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft);
+ Expr *pLeft = pExpr->pLeft;
+ if( pLeft->iTable==0 || pParse->withinRJSubrtn > pLeft->op2 ){
+ pLeft->iTable = sqlite3CodeSubselect(pParse, pLeft);
+ pLeft->op2 = pParse->withinRJSubrtn;
}
- assert( pExpr->pLeft->op==TK_SELECT || pExpr->pLeft->op==TK_ERROR );
- n = sqlite3ExprVectorSize(pExpr->pLeft);
+ assert( pLeft->op==TK_SELECT || pLeft->op==TK_ERROR );
+ n = sqlite3ExprVectorSize(pLeft);
if( pExpr->iTable!=n ){
sqlite3ErrorMsg(pParse, "%d columns assigned %d values",
pExpr->iTable, n);
}
- return pExpr->pLeft->iTable + pExpr->iColumn;
+ return pLeft->iTable + pExpr->iColumn;
}
case TK_IN: {
int destIfFalse = sqlite3VdbeMakeLabel(pParse);
@@ -107043,8 +109757,24 @@ expr_code_doover:
exprCodeBetween(pParse, pExpr, target, 0, 0);
return target;
}
+ case TK_COLLATE: {
+ if( !ExprHasProperty(pExpr, EP_Collate)
+ && ALWAYS(pExpr->pLeft)
+ && pExpr->pLeft->op==TK_FUNCTION
+ ){
+ inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
+ if( inReg!=target ){
+ sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target);
+ inReg = target;
+ }
+ sqlite3VdbeAddOp1(v, OP_ClrSubtype, inReg);
+ return inReg;
+ }else{
+ pExpr = pExpr->pLeft;
+ goto expr_code_doover; /* 2018-04-28: Prevent deep recursion. */
+ }
+ }
case TK_SPAN:
- case TK_COLLATE:
case TK_UPLUS: {
pExpr = pExpr->pLeft;
goto expr_code_doover; /* 2018-04-28: Prevent deep recursion. OSSFuzz. */
@@ -107124,6 +109854,21 @@ expr_code_doover:
case TK_IF_NULL_ROW: {
int addrINR;
u8 okConstFactor = pParse->okConstFactor;
+ AggInfo *pAggInfo = pExpr->pAggInfo;
+ if( pAggInfo ){
+ assert( pExpr->iAgg>=0 && pExpr->iAggnColumn );
+ if( !pAggInfo->directMode ){
+ inReg = pAggInfo->aCol[pExpr->iAgg].iMem;
+ break;
+ }
+ if( pExpr->pAggInfo->useSortingIdx ){
+ sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab,
+ pAggInfo->aCol[pExpr->iAgg].iSorterColumn,
+ target);
+ inReg = target;
+ break;
+ }
+ }
addrINR = sqlite3VdbeAddOp1(v, OP_IfNullRow, pExpr->iTable);
/* Temporarily disable factoring of constant expressions, since
** even though expressions may appear to be constant, they are not
@@ -107285,7 +110030,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(
struct ExprList_item *pItem;
int i;
for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){
- if( pItem->reusable && sqlite3ExprCompare(0,pItem->pExpr,pExpr,-1)==0 ){
+ if( pItem->fg.reusable
+ && sqlite3ExprCompare(0,pItem->pExpr,pExpr,-1)==0
+ ){
return pItem->u.iConstExprReg;
}
}
@@ -107308,7 +110055,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce(
p = sqlite3ExprListAppend(pParse, p, pExpr);
if( p ){
struct ExprList_item *pItem = &p->a[p->nExpr-1];
- pItem->reusable = regDest<0;
+ pItem->fg.reusable = regDest<0;
if( regDest<0 ) regDest = ++pParse->nMem;
pItem->u.iConstExprReg = regDest;
}
@@ -107442,7 +110189,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
for(pItem=pList->a, i=0; ipExpr;
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
- if( pItem->bSorterRef ){
+ if( pItem->fg.bSorterRef ){
i--;
n--;
}else
@@ -107463,7 +110210,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
if( inReg!=target+i ){
VdbeOp *pOp;
if( copyOp==OP_Copy
- && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy
+ && (pOp=sqlite3VdbeGetLastOp(v))->opcode==OP_Copy
&& pOp->p1+pOp->p3+1==inReg
&& pOp->p2+pOp->p3+1==target+i
&& pOp->p5==0 /* The do-not-merge flag must be clear */
@@ -107536,8 +110283,8 @@ static void exprCodeBetween(
** so that the sqlite3ExprCodeTarget() routine will not attempt to move
** it into the Parse.pConstExpr list. We should use a new bit for this,
** for clarity, but we are out of bits in the Expr.flags field so we
- ** have to reuse the EP_FromJoin bit. Bummer. */
- pDel->flags |= EP_FromJoin;
+ ** have to reuse the EP_OuterON bit. Bummer. */
+ pDel->flags |= EP_OuterON;
sqlite3ExprCodeTarget(pParse, &exprAnd, dest);
}
sqlite3ReleaseTempReg(pParse, regFree1);
@@ -107662,6 +110409,7 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int
assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL );
assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL );
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1);
+ sqlite3VdbeTypeofColumn(v, r1);
sqlite3VdbeAddOp2(v, op, r1, dest);
VdbeCoverageIf(v, op==TK_ISNULL);
VdbeCoverageIf(v, op==TK_NOTNULL);
@@ -107836,6 +110584,7 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int
case TK_ISNULL:
case TK_NOTNULL: {
r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1);
+ sqlite3VdbeTypeofColumn(v, r1);
sqlite3VdbeAddOp2(v, op, r1, dest);
testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL);
testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL);
@@ -107989,7 +110738,13 @@ SQLITE_PRIVATE int sqlite3ExprCompare(
if( pB->op==TK_COLLATE && sqlite3ExprCompare(pParse, pA,pB->pLeft,iTab)<2 ){
return 1;
}
- return 2;
+ if( pA->op==TK_AGG_COLUMN && pB->op==TK_COLUMN
+ && pB->iTable<0 && pA->iTable==iTab
+ ){
+ /* fall through */
+ }else{
+ return 2;
+ }
}
assert( !ExprHasProperty(pA, EP_IntValue) );
assert( !ExprHasProperty(pB, EP_IntValue) );
@@ -108067,7 +110822,7 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB
int res;
Expr *pExprA = pA->a[i].pExpr;
Expr *pExprB = pB->a[i].pExpr;
- if( pA->a[i].sortFlags!=pB->a[i].sortFlags ) return 1;
+ if( pA->a[i].fg.sortFlags!=pB->a[i].fg.sortFlags ) return 1;
if( (res = sqlite3ExprCompare(0, pExprA, pExprB, iTab)) ) return res;
}
return 0;
@@ -108222,7 +110977,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(
static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_AGG_COLUMN );
testcase( pExpr->op==TK_AGG_FUNCTION );
- if( ExprHasProperty(pExpr, EP_FromJoin) ) return WRC_Prune;
+ if( ExprHasProperty(pExpr, EP_OuterON) ) return WRC_Prune;
switch( pExpr->op ){
case TK_ISNOT:
case TK_ISNULL:
@@ -108291,10 +111046,10 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) );
assert( pRight->op!=TK_COLUMN || ExprUseYTab(pRight) );
if( (pLeft->op==TK_COLUMN
- && pLeft->y.pTab!=0
+ && ALWAYS(pLeft->y.pTab!=0)
&& IsVirtual(pLeft->y.pTab))
|| (pRight->op==TK_COLUMN
- && pRight->y.pTab!=0
+ && ALWAYS(pRight->y.pTab!=0)
&& IsVirtual(pRight->y.pTab))
){
return WRC_Prune;
@@ -108319,8 +111074,8 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){
** False positives are not allowed, however. A false positive may result
** in an incorrect answer.
**
-** Terms of p that are marked with EP_FromJoin (and hence that come from
-** the ON or USING clauses of LEFT JOINS) are excluded from the analysis.
+** Terms of p that are marked with EP_OuterON (and hence that come from
+** the ON or USING clauses of OUTER JOINS) are excluded from the analysis.
**
** This routine is used to check if a LEFT JOIN can be converted into
** an ordinary JOIN. The p argument is the WHERE clause. If the WHERE
@@ -108499,6 +111254,7 @@ static int exprRefToSrcList(Walker *pWalker, Expr *pExpr){
SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList *pSrcList){
Walker w;
struct RefSrcList x;
+ assert( pParse->db!=0 );
memset(&w, 0, sizeof(w));
memset(&x, 0, sizeof(x));
w.xExprCallback = exprRefToSrcList;
@@ -108515,7 +111271,7 @@ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList
sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter);
}
#endif
- sqlite3DbFree(pParse->db, x.aiExclude);
+ if( x.aiExclude ) sqlite3DbNNFreeNN(pParse->db, x.aiExclude);
if( w.eCode & 0x01 ){
return 1;
}else if( w.eCode ){
@@ -108546,8 +111302,8 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){
int iAgg = pExpr->iAgg;
Parse *pParse = pWalker->pParse;
sqlite3 *db = pParse->db;
- assert( pExpr->op==TK_AGG_COLUMN || pExpr->op==TK_AGG_FUNCTION );
- if( pExpr->op==TK_AGG_COLUMN ){
+ if( pExpr->op!=TK_AGG_FUNCTION ){
+ assert( pExpr->op==TK_AGG_COLUMN || pExpr->op==TK_IF_NULL_ROW );
assert( iAgg>=0 && iAggnColumn );
if( pAggInfo->aCol[iAgg].pCExpr==pExpr ){
pExpr = sqlite3ExprDup(db, pExpr, 0);
@@ -108557,6 +111313,7 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){
}
}
}else{
+ assert( pExpr->op==TK_AGG_FUNCTION );
assert( iAgg>=0 && iAggnFunc );
if( pAggInfo->aFunc[iAgg].pFExpr==pExpr ){
pExpr = sqlite3ExprDup(db, pExpr, 0);
@@ -108627,10 +111384,12 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
assert( pNC->ncFlags & NC_UAggInfo );
switch( pExpr->op ){
+ case TK_IF_NULL_ROW:
case TK_AGG_COLUMN:
case TK_COLUMN: {
testcase( pExpr->op==TK_AGG_COLUMN );
testcase( pExpr->op==TK_COLUMN );
+ testcase( pExpr->op==TK_IF_NULL_ROW );
/* Check to see if the column is in one of the tables in the FROM
** clause of the aggregate query */
if( ALWAYS(pSrcList!=0) ){
@@ -108648,8 +111407,10 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
int k;
pCol = pAggInfo->aCol;
for(k=0; knColumn; k++, pCol++){
- if( pCol->iTable==pExpr->iTable &&
- pCol->iColumn==pExpr->iColumn ){
+ if( pCol->iTable==pExpr->iTable
+ && pCol->iColumn==pExpr->iColumn
+ && pExpr->op!=TK_IF_NULL_ROW
+ ){
break;
}
}
@@ -108664,15 +111425,17 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
pCol->iMem = ++pParse->nMem;
pCol->iSorterColumn = -1;
pCol->pCExpr = pExpr;
- if( pAggInfo->pGroupBy ){
+ if( pAggInfo->pGroupBy && pExpr->op!=TK_IF_NULL_ROW ){
int j, n;
ExprList *pGB = pAggInfo->pGroupBy;
struct ExprList_item *pTerm = pGB->a;
n = pGB->nExpr;
for(j=0; jpExpr;
- if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable &&
- pE->iColumn==pExpr->iColumn ){
+ if( pE->op==TK_COLUMN
+ && pE->iTable==pExpr->iTable
+ && pE->iColumn==pExpr->iColumn
+ ){
pCol->iSorterColumn = j;
break;
}
@@ -108689,7 +111452,9 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
*/
ExprSetVVAProperty(pExpr, EP_NoReduce);
pExpr->pAggInfo = pAggInfo;
- pExpr->op = TK_AGG_COLUMN;
+ if( pExpr->op==TK_COLUMN ){
+ pExpr->op = TK_AGG_COLUMN;
+ }
pExpr->iAgg = (i16)k;
break;
} /* endif pExpr->iTable==pItem->iCursor */
@@ -109734,11 +112499,10 @@ static void unmapColumnIdlistNames(
Parse *pParse,
const IdList *pIdList
){
- if( pIdList ){
- int ii;
- for(ii=0; iinId; ii++){
- sqlite3RenameTokenRemap(pParse, 0, (const void*)pIdList->a[ii].zName);
- }
+ int ii;
+ assert( pIdList!=0 );
+ for(ii=0; iinId; ii++){
+ sqlite3RenameTokenRemap(pParse, 0, (const void*)pIdList->a[ii].zName);
}
}
@@ -109757,7 +112521,7 @@ static int renameUnmapSelectCb(Walker *pWalker, Select *p){
if( ALWAYS(p->pEList) ){
ExprList *pList = p->pEList;
for(i=0; inExpr; i++){
- if( pList->a[i].zEName && pList->a[i].eEName==ENAME_NAME ){
+ if( pList->a[i].zEName && pList->a[i].fg.eEName==ENAME_NAME ){
sqlite3RenameTokenRemap(pParse, 0, (void*)pList->a[i].zEName);
}
}
@@ -109766,8 +112530,11 @@ static int renameUnmapSelectCb(Walker *pWalker, Select *p){
SrcList *pSrc = p->pSrc;
for(i=0; inSrc; i++){
sqlite3RenameTokenRemap(pParse, 0, (void*)pSrc->a[i].zName);
- sqlite3WalkExpr(pWalker, pSrc->a[i].pOn);
- unmapColumnIdlistNames(pParse, pSrc->a[i].pUsing);
+ if( pSrc->a[i].fg.isUsing==0 ){
+ sqlite3WalkExpr(pWalker, pSrc->a[i].u3.pOn);
+ }else{
+ unmapColumnIdlistNames(pParse, pSrc->a[i].u3.pUsing);
+ }
}
}
@@ -109803,7 +112570,7 @@ SQLITE_PRIVATE void sqlite3RenameExprlistUnmap(Parse *pParse, ExprList *pEList){
sWalker.xExprCallback = renameUnmapExprCb;
sqlite3WalkExprList(&sWalker, pEList);
for(i=0; inExpr; i++){
- if( ALWAYS(pEList->a[i].eEName==ENAME_NAME) ){
+ if( ALWAYS(pEList->a[i].fg.eEName==ENAME_NAME) ){
sqlite3RenameTokenRemap(pParse, 0, (void*)pEList->a[i].zEName);
}
}
@@ -109961,7 +112728,7 @@ static void renameColumnElistNames(
int i;
for(i=0; inExpr; i++){
const char *zName = pEList->a[i].zEName;
- if( ALWAYS(pEList->a[i].eEName==ENAME_NAME)
+ if( ALWAYS(pEList->a[i].fg.eEName==ENAME_NAME)
&& ALWAYS(zName!=0)
&& 0==sqlite3_stricmp(zName, zOld)
){
@@ -110191,27 +112958,33 @@ static int renameResolveTrigger(Parse *pParse){
if( rc==SQLITE_OK && pStep->zTarget ){
SrcList *pSrc = sqlite3TriggerStepSrc(pParse, pStep);
if( pSrc ){
- int i;
- for(i=0; inSrc && rc==SQLITE_OK; i++){
- SrcItem *p = &pSrc->a[i];
- p->iCursor = pParse->nTab++;
- if( p->pSelect ){
- sqlite3SelectPrep(pParse, p->pSelect, 0);
- sqlite3ExpandSubquery(pParse, p);
- assert( i>0 );
- assert( pStep->pFrom->a[i-1].pSelect );
- sqlite3SelectPrep(pParse, pStep->pFrom->a[i-1].pSelect, 0);
- }else{
- p->pTab = sqlite3LocateTableItem(pParse, 0, p);
- if( p->pTab==0 ){
- rc = SQLITE_ERROR;
- }else{
- p->pTab->nTabRef++;
- rc = sqlite3ViewGetColumnNames(pParse, p->pTab);
+ Select *pSel = sqlite3SelectNew(
+ pParse, pStep->pExprList, pSrc, 0, 0, 0, 0, 0, 0
+ );
+ if( pSel==0 ){
+ pStep->pExprList = 0;
+ pSrc = 0;
+ rc = SQLITE_NOMEM;
+ }else{
+ sqlite3SelectPrep(pParse, pSel, 0);
+ rc = pParse->nErr ? SQLITE_ERROR : SQLITE_OK;
+ assert( pStep->pExprList==0 || pStep->pExprList==pSel->pEList );
+ assert( pSrc==pSel->pSrc );
+ if( pStep->pExprList ) pSel->pEList = 0;
+ pSel->pSrc = 0;
+ sqlite3SelectDelete(db, pSel);
+ }
+ if( pStep->pFrom ){
+ int i;
+ for(i=0; ipFrom->nSrc && rc==SQLITE_OK; i++){
+ SrcItem *p = &pStep->pFrom->a[i];
+ if( p->pSelect ){
+ sqlite3SelectPrep(pParse, p->pSelect, 0);
}
}
}
- if( rc==SQLITE_OK && db->mallocFailed ){
+
+ if( db->mallocFailed ){
rc = SQLITE_NOMEM;
}
sNC.pSrcList = pSrc;
@@ -110663,6 +113436,15 @@ static void renameTableFunc(
if( pStep->zTarget && 0==sqlite3_stricmp(pStep->zTarget, zOld) ){
renameTokenFind(&sParse, &sCtx, pStep->zTarget);
}
+ if( pStep->pFrom ){
+ int i;
+ for(i=0; ipFrom->nSrc; i++){
+ SrcItem *pItem = &pStep->pFrom->a[i];
+ if( 0==sqlite3_stricmp(pItem->zName, zOld) ){
+ renameTokenFind(&sParse, &sCtx, pItem->zName);
+ }
+ }
+ }
}
}
}
@@ -111986,9 +114768,14 @@ static void statGet(
** * "WHERE a=? AND b=?" matches 2 rows.
**
** If D is the count of distinct values and K is the total number of
- ** rows, then each estimate is computed as:
+ ** rows, then each estimate is usually computed as:
**
** I = (K+D-1)/D
+ **
+ ** In other words, I is K/D rounded up to the next whole integer.
+ ** However, if I is between 1.0 and 1.1 (in other words if I is
+ ** close to 1.0 but just a little larger) then do not round up but
+ ** instead keep the I value at 1.0.
*/
sqlite3_str sStat; /* Text of the constructed "stat" line */
int i; /* Loop counter */
@@ -111999,6 +114786,7 @@ static void statGet(
for(i=0; inKeyCol; i++){
u64 nDistinct = p->current.anDLt[i] + 1;
u64 iVal = (p->nRow + nDistinct - 1) / nDistinct;
+ if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1;
sqlite3_str_appendf(&sStat, " %llu", iVal);
assert( p->current.anEq[i] );
}
@@ -112086,6 +114874,7 @@ static void analyzeVdbeCommentIndexWithColumnName(
if( NEVER(i==XN_ROWID) ){
VdbeComment((v,"%s.rowid",pIdx->zName));
}else if( i==XN_EXPR ){
+ assert( pIdx->bHasExpr );
VdbeComment((v,"%s.expr(%d)",pIdx->zName, k));
}else{
VdbeComment((v,"%s.%s", pIdx->zName, pIdx->pTable->aCol[i].zCnName));
@@ -112162,7 +114951,7 @@ static void analyzeOneTable(
memcpy(pStat1->zName, "sqlite_stat1", 13);
pStat1->nCol = 3;
pStat1->iPKey = -1;
- sqlite3VdbeAddOp4(pParse->pVdbe, OP_Noop, 0, 0, 0,(char*)pStat1,P4_DYNBLOB);
+ sqlite3VdbeAddOp4(pParse->pVdbe, OP_Noop, 0, 0, 0,(char*)pStat1,P4_DYNAMIC);
}
#endif
@@ -113552,7 +116341,11 @@ static int fixSelectCb(Walker *p, Select *pSelect){
pItem->fg.fromDDL = 1;
}
#if !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_TRIGGER)
- if( sqlite3WalkExpr(&pFix->w, pList->a[i].pOn) ) return WRC_Abort;
+ if( pList->a[i].fg.isUsing==0
+ && sqlite3WalkExpr(&pFix->w, pList->a[i].u3.pOn)
+ ){
+ return WRC_Abort;
+ }
#endif
}
if( pSelect->pWith ){
@@ -114084,6 +116877,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask m){
SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
sqlite3 *db;
Vdbe *v;
+ int iDb, i;
assert( pParse->pToplevel==0 );
db = pParse->db;
@@ -114113,12 +116907,9 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
if( pParse->bReturning ){
Returning *pReturning = pParse->u1.pReturning;
int addrRewind;
- int i;
int reg;
- if( NEVER(pReturning->nRetCol==0) ){
- assert( CORRUPT_DB );
- }else{
+ if( pReturning->nRetCol ){
sqlite3VdbeAddOp0(v, OP_FkCheck);
addrRewind =
sqlite3VdbeAddOp1(v, OP_Rewind, pReturning->iRetCur);
@@ -114152,76 +116943,69 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){
** transaction on each used database and to verify the schema cookie
** on each used database.
*/
- if( db->mallocFailed==0
- && (DbMaskNonZero(pParse->cookieMask) || pParse->pConstExpr)
- ){
- int iDb, i;
- assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init );
- sqlite3VdbeJumpHere(v, 0);
- for(iDb=0; iDbnDb; iDb++){
- Schema *pSchema;
- if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue;
- sqlite3VdbeUsesBtree(v, iDb);
- pSchema = db->aDb[iDb].pSchema;
- sqlite3VdbeAddOp4Int(v,
- OP_Transaction, /* Opcode */
- iDb, /* P1 */
- DbMaskTest(pParse->writeMask,iDb), /* P2 */
- pSchema->schema_cookie, /* P3 */
- pSchema->iGeneration /* P4 */
- );
- if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1);
- VdbeComment((v,
- "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite));
- }
+ assert( pParse->nErr>0 || sqlite3VdbeGetOp(v, 0)->opcode==OP_Init );
+ sqlite3VdbeJumpHere(v, 0);
+ assert( db->nDb>0 );
+ iDb = 0;
+ do{
+ Schema *pSchema;
+ if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue;
+ sqlite3VdbeUsesBtree(v, iDb);
+ pSchema = db->aDb[iDb].pSchema;
+ sqlite3VdbeAddOp4Int(v,
+ OP_Transaction, /* Opcode */
+ iDb, /* P1 */
+ DbMaskTest(pParse->writeMask,iDb), /* P2 */
+ pSchema->schema_cookie, /* P3 */
+ pSchema->iGeneration /* P4 */
+ );
+ if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1);
+ VdbeComment((v,
+ "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite));
+ }while( ++iDbnDb );
#ifndef SQLITE_OMIT_VIRTUALTABLE
- for(i=0; inVtabLock; i++){
- char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]);
- sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB);
- }
- pParse->nVtabLock = 0;
+ for(i=0; inVtabLock; i++){
+ char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]);
+ sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB);
+ }
+ pParse->nVtabLock = 0;
#endif
- /* Once all the cookies have been verified and transactions opened,
- ** obtain the required table-locks. This is a no-op unless the
- ** shared-cache feature is enabled.
- */
- codeTableLocks(pParse);
+ /* Once all the cookies have been verified and transactions opened,
+ ** obtain the required table-locks. This is a no-op unless the
+ ** shared-cache feature is enabled.
+ */
+ codeTableLocks(pParse);
- /* Initialize any AUTOINCREMENT data structures required.
- */
- sqlite3AutoincrementBegin(pParse);
+ /* Initialize any AUTOINCREMENT data structures required.
+ */
+ sqlite3AutoincrementBegin(pParse);
- /* Code constant expressions that where factored out of inner loops.
- **
- ** The pConstExpr list might also contain expressions that we simply
- ** want to keep around until the Parse object is deleted. Such
- ** expressions have iConstExprReg==0. Do not generate code for
- ** those expressions, of course.
- */
- if( pParse->pConstExpr ){
- ExprList *pEL = pParse->pConstExpr;
- pParse->okConstFactor = 0;
- for(i=0; inExpr; i++){
- int iReg = pEL->a[i].u.iConstExprReg;
- if( iReg>0 ){
- sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg);
- }
- }
+ /* Code constant expressions that where factored out of inner loops.
+ **
+ ** The pConstExpr list might also contain expressions that we simply
+ ** want to keep around until the Parse object is deleted. Such
+ ** expressions have iConstExprReg==0. Do not generate code for
+ ** those expressions, of course.
+ */
+ if( pParse->pConstExpr ){
+ ExprList *pEL = pParse->pConstExpr;
+ pParse->okConstFactor = 0;
+ for(i=0; inExpr; i++){
+ int iReg = pEL->a[i].u.iConstExprReg;
+ sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg);
}
+ }
- if( pParse->bReturning ){
- Returning *pRet = pParse->u1.pReturning;
- if( NEVER(pRet->nRetCol==0) ){
- assert( CORRUPT_DB );
- }else{
- sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol);
- }
+ if( pParse->bReturning ){
+ Returning *pRet = pParse->u1.pReturning;
+ if( pRet->nRetCol ){
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol);
}
-
- /* Finally, jump back to the beginning of the executable code. */
- sqlite3VdbeGoto(v, 1);
}
+
+ /* Finally, jump back to the beginning of the executable code. */
+ sqlite3VdbeGoto(v, 1);
}
/* Get the VDBE program ready for execution
@@ -114277,8 +117061,6 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){
memset(PARSE_TAIL(pParse), 0, PARSE_TAIL_SZ);
db->mDbFlags |= DBFLAG_PreferBuiltin;
sqlite3RunParser(pParse, zSql);
- sqlite3DbFree(db, pParse->zErrMsg);
- pParse->zErrMsg = 0;
db->mDbFlags = savedDbFlags;
sqlite3DbFree(db, zSql);
memcpy(PARSE_TAIL(pParse), saveBuf, PARSE_TAIL_SZ);
@@ -114408,7 +117190,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable(
/* If zName is the not the name of a table in the schema created using
** CREATE, then check to see if it is the name of an virtual table that
** can be an eponymous virtual table. */
- if( pParse->disableVtab==0 && db->init.busy==0 ){
+ if( (pParse->prepFlags & SQLITE_PREPARE_NO_VTAB)==0 && db->init.busy==0 ){
Module *pMod = (Module*)sqlite3HashFind(&db->aModule, zName);
if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){
pMod = sqlite3PragmaVtabRegister(db, zName);
@@ -114421,7 +117203,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable(
#endif
if( flags & LOCATE_NOERR ) return 0;
pParse->checkSchema = 1;
- }else if( IsVirtual(p) && pParse->disableVtab ){
+ }else if( IsVirtual(p) && (pParse->prepFlags & SQLITE_PREPARE_NO_VTAB)!=0 ){
p = 0;
}
@@ -114730,16 +117512,17 @@ SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){
int i;
Column *pCol;
assert( pTable!=0 );
+ assert( db!=0 );
if( (pCol = pTable->aCol)!=0 ){
for(i=0; inCol; i++, pCol++){
assert( pCol->zCnName==0 || pCol->hName==sqlite3StrIHash(pCol->zCnName) );
sqlite3DbFree(db, pCol->zCnName);
}
- sqlite3DbFree(db, pTable->aCol);
+ sqlite3DbNNFreeNN(db, pTable->aCol);
if( IsOrdinaryTable(pTable) ){
sqlite3ExprListDelete(db, pTable->u.tab.pDfltList);
}
- if( db==0 || db->pnBytesFreed==0 ){
+ if( db->pnBytesFreed==0 ){
pTable->aCol = 0;
pTable->nCol = 0;
if( IsOrdinaryTable(pTable) ){
@@ -114776,7 +117559,8 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){
** a Table object that was going to be marked ephemeral. So do not check
** that no lookaside memory is used in this case either. */
int nLookaside = 0;
- if( db && !db->mallocFailed && (pTable->tabFlags & TF_Ephemeral)==0 ){
+ assert( db!=0 );
+ if( !db->mallocFailed && (pTable->tabFlags & TF_Ephemeral)==0 ){
nLookaside = sqlite3LookasideUsed(db, 0);
}
#endif
@@ -114786,7 +117570,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){
pNext = pIndex->pNext;
assert( pIndex->pSchema==pTable->pSchema
|| (IsVirtual(pTable) && pIndex->idxType!=SQLITE_IDXTYPE_APPDEF) );
- if( (db==0 || db->pnBytesFreed==0) && !IsVirtual(pTable) ){
+ if( db->pnBytesFreed==0 && !IsVirtual(pTable) ){
char *zName = pIndex->zName;
TESTONLY ( Index *pOld = ) sqlite3HashInsert(
&pIndex->pSchema->idxHash, zName, 0
@@ -114823,8 +117607,9 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){
}
SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
/* Do not delete the table until the reference count reaches zero. */
+ assert( db!=0 );
if( !pTable ) return;
- if( ((!db || db->pnBytesFreed==0) && (--pTable->nTabRef)>0) ) return;
+ if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return;
deleteTable(db, pTable);
}
@@ -115836,7 +118621,7 @@ SQLITE_PRIVATE void sqlite3AddPrimaryKey(
pTab->keyConf = (u8)onError;
assert( autoInc==0 || autoInc==1 );
pTab->tabFlags |= autoInc*TF_Autoincrement;
- if( pList ) pParse->iPkSortOrder = pList->a[0].sortFlags;
+ if( pList ) pParse->iPkSortOrder = pList->a[0].fg.sortFlags;
(void)sqlite3HasExplicitNulls(pParse, pList);
}else if( autoInc ){
#ifndef SQLITE_OMIT_AUTOINCREMENT
@@ -116228,7 +119013,8 @@ static int isDupColumn(Index *pIdx, int nKey, Index *pPk, int iCol){
/* Recompute the colNotIdxed field of the Index.
**
** colNotIdxed is a bitmask that has a 0 bit representing each indexed
-** columns that are within the first 63 columns of the table. The
+** columns that are within the first 63 columns of the table and a 1 for
+** all other bits (all columns that are not in the index). The
** high-order bit of colNotIdxed is always 1. All unindexed columns
** of the table have a 1.
**
@@ -116256,7 +119042,7 @@ static void recomputeColumnsNotIndexed(Index *pIdx){
}
}
pIdx->colNotIdxed = ~m;
- assert( (pIdx->colNotIdxed>>63)==1 );
+ assert( (pIdx->colNotIdxed>>63)==1 ); /* See note-20221022-a */
}
/*
@@ -116330,7 +119116,7 @@ static void convertToWithoutRowidTable(Parse *pParse, Table *pTab){
if( IN_RENAME_OBJECT ){
sqlite3RenameTokenRemap(pParse, pList->a[0].pExpr, &pTab->iPKey);
}
- pList->a[0].sortFlags = pParse->iPkSortOrder;
+ pList->a[0].fg.sortFlags = pParse->iPkSortOrder;
assert( pParse->pNewTable==pTab );
pTab->iPKey = -1;
sqlite3CreateIndex(pParse, 0, 0, 0, pList, pTab->keyConf, 0, 0, 0, 0,
@@ -116997,11 +119783,10 @@ create_view_fail:
** the columns of the view in the pTable structure. Return the number
** of errors. If an error is seen leave an error message in pParse->zErrMsg.
*/
-SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
+static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){
Table *pSelTab; /* A fake table from which we get the result set */
Select *pSel; /* Copy of the SELECT that implements the view */
int nErr = 0; /* Number of errors encountered */
- int n; /* Temporarily holds the number of cursors assigned */
sqlite3 *db = pParse->db; /* Database connection for malloc errors */
#ifndef SQLITE_OMIT_VIRTUALTABLE
int rc;
@@ -117023,9 +119808,10 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
#ifndef SQLITE_OMIT_VIEW
/* A positive nCol means the columns names for this view are
- ** already known.
+ ** already known. This routine is not called unless either the
+ ** table is virtual or nCol is zero.
*/
- if( pTable->nCol>0 ) return 0;
+ assert( pTable->nCol<=0 );
/* A negative nCol is a special marker meaning that we are currently
** trying to compute the column names. If we enter this routine with
@@ -117059,8 +119845,9 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
pSel = sqlite3SelectDup(db, pTable->u.view.pSelect, 0);
if( pSel ){
u8 eParseMode = pParse->eParseMode;
+ int nTab = pParse->nTab;
+ int nSelect = pParse->nSelect;
pParse->eParseMode = PARSE_MODE_NORMAL;
- n = pParse->nTab;
sqlite3SrcListAssignCursors(pParse, pSel->pSrc);
pTable->nCol = -1;
DisableLookaside;
@@ -117072,7 +119859,8 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
#else
pSelTab = sqlite3ResultSetOfSelect(pParse, pSel, SQLITE_AFF_NONE);
#endif
- pParse->nTab = n;
+ pParse->nTab = nTab;
+ pParse->nSelect = nSelect;
if( pSelTab==0 ){
pTable->nCol = 0;
nErr++;
@@ -117119,6 +119907,11 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
#endif /* SQLITE_OMIT_VIEW */
return nErr;
}
+SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){
+ assert( pTable!=0 );
+ if( !IsVirtual(pTable) && pTable->nCol>0 ) return 0;
+ return viewGetColumnNames(pParse, pTable);
+}
#endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */
#ifndef SQLITE_OMIT_VIEW
@@ -117817,8 +120610,8 @@ SQLITE_PRIVATE int sqlite3HasExplicitNulls(Parse *pParse, ExprList *pList){
if( pList ){
int i;
for(i=0; inExpr; i++){
- if( pList->a[i].bNulls ){
- u8 sf = pList->a[i].sortFlags;
+ if( pList->a[i].fg.bNulls ){
+ u8 sf = pList->a[i].fg.sortFlags;
sqlite3ErrorMsg(pParse, "unsupported use of NULLS %s",
(sf==0 || sf==3) ? "FIRST" : "LAST"
);
@@ -117984,7 +120777,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
}
if( !IN_RENAME_OBJECT ){
if( !db->init.busy ){
- if( sqlite3FindTable(db, zName, 0)!=0 ){
+ if( sqlite3FindTable(db, zName, pDb->zDbSName)!=0 ){
sqlite3ErrorMsg(pParse, "there is already a table named %s", zName);
goto exit_create_index;
}
@@ -118137,6 +120930,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
j = XN_EXPR;
pIndex->aiColumn[i] = XN_EXPR;
pIndex->uniqNotNull = 0;
+ pIndex->bHasExpr = 1;
}else{
j = pCExpr->iColumn;
assert( j<=0x7fff );
@@ -118148,6 +120942,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
}
if( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ){
pIndex->bHasVCol = 1;
+ pIndex->bHasExpr = 1;
}
}
pIndex->aiColumn[i] = (i16)j;
@@ -118171,7 +120966,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
goto exit_create_index;
}
pIndex->azColl[i] = zColl;
- requestedSortOrder = pListItem->sortFlags & sortOrderMask;
+ requestedSortOrder = pListItem->fg.sortFlags & sortOrderMask;
pIndex->aSortOrder[i] = (u8)requestedSortOrder;
}
@@ -118614,18 +121409,17 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token *
if( pList==0 ){
pList = sqlite3DbMallocZero(db, sizeof(IdList) );
if( pList==0 ) return 0;
+ }else{
+ IdList *pNew;
+ pNew = sqlite3DbRealloc(db, pList,
+ sizeof(IdList) + pList->nId*sizeof(pList->a));
+ if( pNew==0 ){
+ sqlite3IdListDelete(db, pList);
+ return 0;
+ }
+ pList = pNew;
}
- pList->a = sqlite3ArrayAllocate(
- db,
- pList->a,
- sizeof(pList->a[0]),
- &pList->nId,
- &i
- );
- if( i<0 ){
- sqlite3IdListDelete(db, pList);
- return 0;
- }
+ i = pList->nId++;
pList->a[i].zName = sqlite3NameFromToken(db, pToken);
if( IN_RENAME_OBJECT && pList->a[i].zName ){
sqlite3RenameTokenMap(pParse, (void*)pList->a[i].zName, pToken);
@@ -118638,12 +121432,13 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token *
*/
SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){
int i;
+ assert( db!=0 );
if( pList==0 ) return;
+ assert( pList->eU4!=EU4_EXPR ); /* EU4_EXPR mode is not currently used */
for(i=0; inId; i++){
sqlite3DbFree(db, pList->a[i].zName);
}
- sqlite3DbFree(db, pList->a);
- sqlite3DbFreeNN(db, pList);
+ sqlite3DbNNFreeNN(db, pList);
}
/*
@@ -118652,7 +121447,7 @@ SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){
*/
SQLITE_PRIVATE int sqlite3IdListIndex(IdList *pList, const char *zName){
int i;
- if( pList==0 ) return -1;
+ assert( pList!=0 );
for(i=0; inId; i++){
if( sqlite3StrICmp(pList->a[i].zName, zName)==0 ) return i;
}
@@ -118846,19 +121641,23 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){
SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){
int i;
SrcItem *pItem;
+ assert( db!=0 );
if( pList==0 ) return;
for(pItem=pList->a, i=0; inSrc; i++, pItem++){
- if( pItem->zDatabase ) sqlite3DbFreeNN(db, pItem->zDatabase);
- sqlite3DbFree(db, pItem->zName);
- if( pItem->zAlias ) sqlite3DbFreeNN(db, pItem->zAlias);
+ if( pItem->zDatabase ) sqlite3DbNNFreeNN(db, pItem->zDatabase);
+ if( pItem->zName ) sqlite3DbNNFreeNN(db, pItem->zName);
+ if( pItem->zAlias ) sqlite3DbNNFreeNN(db, pItem->zAlias);
if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy);
if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg);
sqlite3DeleteTable(db, pItem->pTab);
if( pItem->pSelect ) sqlite3SelectDelete(db, pItem->pSelect);
- if( pItem->pOn ) sqlite3ExprDelete(db, pItem->pOn);
- if( pItem->pUsing ) sqlite3IdListDelete(db, pItem->pUsing);
+ if( pItem->fg.isUsing ){
+ sqlite3IdListDelete(db, pItem->u3.pUsing);
+ }else if( pItem->u3.pOn ){
+ sqlite3ExprDelete(db, pItem->u3.pOn);
+ }
}
- sqlite3DbFreeNN(db, pList);
+ sqlite3DbNNFreeNN(db, pList);
}
/*
@@ -118884,14 +121683,13 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(
Token *pDatabase, /* Name of the database containing pTable */
Token *pAlias, /* The right-hand side of the AS subexpression */
Select *pSubquery, /* A subquery used in place of a table name */
- Expr *pOn, /* The ON clause of a join */
- IdList *pUsing /* The USING clause of a join */
+ OnOrUsing *pOnUsing /* Either the ON clause or the USING clause */
){
SrcItem *pItem;
sqlite3 *db = pParse->db;
- if( !p && (pOn || pUsing) ){
+ if( !p && pOnUsing!=0 && (pOnUsing->pOn || pOnUsing->pUsing) ){
sqlite3ErrorMsg(pParse, "a JOIN clause is required before %s",
- (pOn ? "ON" : "USING")
+ (pOnUsing->pOn ? "ON" : "USING")
);
goto append_from_error;
}
@@ -118911,15 +121709,27 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendFromTerm(
if( pAlias->n ){
pItem->zAlias = sqlite3NameFromToken(db, pAlias);
}
- pItem->pSelect = pSubquery;
- pItem->pOn = pOn;
- pItem->pUsing = pUsing;
+ if( pSubquery ){
+ pItem->pSelect = pSubquery;
+ if( pSubquery->selFlags & SF_NestedFrom ){
+ pItem->fg.isNestedFrom = 1;
+ }
+ }
+ assert( pOnUsing==0 || pOnUsing->pOn==0 || pOnUsing->pUsing==0 );
+ assert( pItem->fg.isUsing==0 );
+ if( pOnUsing==0 ){
+ pItem->u3.pOn = 0;
+ }else if( pOnUsing->pUsing ){
+ pItem->fg.isUsing = 1;
+ pItem->u3.pUsing = pOnUsing->pUsing;
+ }else{
+ pItem->u3.pOn = pOnUsing->pOn;
+ }
return p;
append_from_error:
assert( p==0 );
- sqlite3ExprDelete(db, pOn);
- sqlite3IdListDelete(db, pUsing);
+ sqlite3ClearOnOrUsing(db, pOnUsing);
sqlite3SelectDelete(db, pSubquery);
return 0;
}
@@ -118964,6 +121774,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppendList(Parse *pParse, SrcList *p1, Src
p1 = pNew;
memcpy(&p1->a[1], p2->a, p2->nSrc*sizeof(SrcItem));
sqlite3DbFree(pParse->db, p2);
+ p1->a[0].fg.jointype |= (JT_LTORJ & p1->a[1].fg.jointype);
}
}
return p1;
@@ -119000,14 +121811,34 @@ SQLITE_PRIVATE void sqlite3SrcListFuncArgs(Parse *pParse, SrcList *p, ExprList *
** The operator is "natural cross join". The A and B operands are stored
** in p->a[0] and p->a[1], respectively. The parser initially stores the
** operator with A. This routine shifts that operator over to B.
+**
+** Additional changes:
+**
+** * All tables to the left of the right-most RIGHT JOIN are tagged with
+** JT_LTORJ (mnemonic: Left Table Of Right Join) so that the
+** code generator can easily tell that the table is part of
+** the left operand of at least one RIGHT JOIN.
*/
-SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(SrcList *p){
- if( p ){
- int i;
- for(i=p->nSrc-1; i>0; i--){
- p->a[i].fg.jointype = p->a[i-1].fg.jointype;
- }
+SQLITE_PRIVATE void sqlite3SrcListShiftJoinType(Parse *pParse, SrcList *p){
+ (void)pParse;
+ if( p && p->nSrc>1 ){
+ int i = p->nSrc-1;
+ u8 allFlags = 0;
+ do{
+ allFlags |= p->a[i].fg.jointype = p->a[i-1].fg.jointype;
+ }while( (--i)>0 );
p->a[0].fg.jointype = 0;
+
+ /* All terms to the left of a RIGHT JOIN should be tagged with the
+ ** JT_LTORJ flags */
+ if( allFlags & JT_RIGHT ){
+ for(i=p->nSrc-1; ALWAYS(i>0) && (p->a[i].fg.jointype&JT_RIGHT)==0; i--){}
+ i--;
+ assert( i>=0 );
+ do{
+ p->a[i].fg.jointype |= JT_LTORJ;
+ }while( (--i)>=0 );
+ }
}
}
@@ -120078,19 +122909,21 @@ SQLITE_PRIVATE void sqlite3SchemaClear(void *p){
Hash temp2;
HashElem *pElem;
Schema *pSchema = (Schema *)p;
+ sqlite3 xdb;
+ memset(&xdb, 0, sizeof(xdb));
temp1 = pSchema->tblHash;
temp2 = pSchema->trigHash;
sqlite3HashInit(&pSchema->trigHash);
sqlite3HashClear(&pSchema->idxHash);
for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){
- sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem));
+ sqlite3DeleteTrigger(&xdb, (Trigger*)sqliteHashData(pElem));
}
sqlite3HashClear(&temp2);
sqlite3HashInit(&pSchema->tblHash);
for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){
Table *pTab = sqliteHashData(pElem);
- sqlite3DeleteTable(0, pTab);
+ sqlite3DeleteTable(&xdb, pTab);
}
sqlite3HashClear(&temp1);
sqlite3HashClear(&pSchema->fkeyHash);
@@ -120189,18 +123022,42 @@ SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe *v, int regCounter, const char *
** 1) It is a virtual table and no implementation of the xUpdate method
** has been provided
**
-** 2) It is a system table (i.e. sqlite_schema), this call is not
+** 2) A trigger is currently being coded and the table is a virtual table
+** that is SQLITE_VTAB_DIRECTONLY or if PRAGMA trusted_schema=OFF and
+** the table is not SQLITE_VTAB_INNOCUOUS.
+**
+** 3) It is a system table (i.e. sqlite_schema), this call is not
** part of a nested parse and writable_schema pragma has not
** been specified
**
-** 3) The table is a shadow table, the database connection is in
+** 4) The table is a shadow table, the database connection is in
** defensive mode, and the current sqlite3_prepare()
** is for a top-level SQL statement.
*/
+static int vtabIsReadOnly(Parse *pParse, Table *pTab){
+ if( sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ){
+ return 1;
+ }
+
+ /* Within triggers:
+ ** * Do not allow DELETE, INSERT, or UPDATE of SQLITE_VTAB_DIRECTONLY
+ ** virtual tables
+ ** * Only allow DELETE, INSERT, or UPDATE of non-SQLITE_VTAB_INNOCUOUS
+ ** virtual tables if PRAGMA trusted_schema=ON.
+ */
+ if( pParse->pToplevel!=0
+ && pTab->u.vtab.p->eVtabRisk >
+ ((pParse->db->flags & SQLITE_TrustedSchema)!=0)
+ ){
+ sqlite3ErrorMsg(pParse, "unsafe use of virtual table \"%s\"",
+ pTab->zName);
+ }
+ return 0;
+}
static int tabIsReadOnly(Parse *pParse, Table *pTab){
sqlite3 *db;
if( IsVirtual(pTab) ){
- return sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0;
+ return vtabIsReadOnly(pParse, pTab);
}
if( (pTab->tabFlags & (TF_Readonly|TF_Shadow))==0 ) return 0;
db = pParse->db;
@@ -120212,9 +123069,11 @@ static int tabIsReadOnly(Parse *pParse, Table *pTab){
}
/*
-** Check to make sure the given table is writable. If it is not
-** writable, generate an error message and return 1. If it is
-** writable return 0;
+** Check to make sure the given table is writable.
+**
+** If pTab is not writable -> generate an error message and return 1.
+** If pTab is writable but other errors have occurred -> return 1.
+** If pTab is writable and no prior errors -> return 0;
*/
SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){
if( tabIsReadOnly(pParse, pTab) ){
@@ -120256,8 +123115,8 @@ SQLITE_PRIVATE void sqlite3MaterializeView(
assert( pFrom->nSrc==1 );
pFrom->a[0].zName = sqlite3DbStrDup(db, pView->zName);
pFrom->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName);
- assert( pFrom->a[0].pOn==0 );
- assert( pFrom->a[0].pUsing==0 );
+ assert( pFrom->a[0].fg.isUsing==0 );
+ assert( pFrom->a[0].u3.pOn==0 );
}
pSel = sqlite3SelectNew(pParse, 0, pFrom, pWhere, 0, 0, pOrderBy,
SF_IncludeHidden, pLimit);
@@ -120428,7 +123287,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
assert( db->mallocFailed==0 );
assert( pTabList->nSrc==1 );
-
/* Locate the table which we want to delete. This table has to be
** put in an SrcList structure because some of the subroutines we
** will be calling are designed to work with multiple tables and expect
@@ -120453,6 +123311,14 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
# define isView 0
#endif
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x10000 ){
+ sqlite3TreeViewLine(0, "In sqlite3Delete() at %s:%d", __FILE__, __LINE__);
+ sqlite3TreeViewDelete(pParse->pWith, pTabList, pWhere,
+ pOrderBy, pLimit, pTrigger);
+ }
+#endif
+
#ifdef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
if( !isView ){
pWhere = sqlite3LimitWhere(
@@ -120568,9 +123434,10 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
}
for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
assert( pIdx->pSchema==pTab->pSchema );
- sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb);
if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
- sqlite3VdbeChangeP3(v, -1, memCnt ? memCnt : -1);
+ sqlite3VdbeAddOp3(v, OP_Clear, pIdx->tnum, iDb, memCnt ? memCnt : -1);
+ }else{
+ sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb);
}
}
}else
@@ -120770,7 +123637,7 @@ delete_from_cleanup:
sqlite3ExprListDelete(db, pOrderBy);
sqlite3ExprDelete(db, pLimit);
#endif
- sqlite3DbFree(db, aToOpen);
+ if( aToOpen ) sqlite3DbNNFreeNN(db, aToOpen);
return;
}
/* Make sure "isView" and other macros defined above are undefined. Otherwise
@@ -121853,7 +124720,7 @@ static int patternCompare(
** c but in the other case and search the input string for either
** c or cx.
*/
- if( c<=0x80 ){
+ if( c<0x80 ){
char zStop[3];
int bMatch;
if( noCase ){
@@ -121936,7 +124803,13 @@ static int patternCompare(
** non-zero if there is no match.
*/
SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
- return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[');
+ if( zString==0 ){
+ return zGlobPattern!=0;
+ }else if( zGlobPattern==0 ){
+ return 1;
+ }else {
+ return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[');
+ }
}
/*
@@ -121944,7 +124817,13 @@ SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
** a miss - like strcmp().
*/
SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){
- return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc);
+ if( zStr==0 ){
+ return zPattern!=0;
+ }else if( zPattern==0 ){
+ return 1;
+ }else{
+ return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc);
+ }
}
/*
@@ -123211,11 +126090,11 @@ static void logFunc(
switch( SQLITE_PTR_TO_INT(sqlite3_user_data(context)) ){
case 1:
/* Convert from natural logarithm to log base 10 */
- ans *= 1.0/M_LN10;
+ ans /= M_LN10;
break;
case 2:
/* Convert from natural logarithm to log base 2 */
- ans *= 1.0/M_LN2;
+ ans /= M_LN2;
break;
default:
break;
@@ -123354,8 +126233,7 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
INLINE_FUNC(likelihood, 2, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY),
INLINE_FUNC(likely, 1, INLINEFUNC_unlikely, SQLITE_FUNC_UNLIKELY),
#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC
- {1, SQLITE_FUNC_BUILTIN|SQLITE_UTF8|SQLITE_FUNC_OFFSET|SQLITE_FUNC_TYPEOF,
- 0, 0, noopFunc, 0, 0, 0, "sqlite_offset", {0} },
+ INLINE_FUNC(sqlite_offset, 1, INLINEFUNC_sqlite_offset, 0 ),
#endif
FUNCTION(ltrim, 1, 1, 0, trimFunc ),
FUNCTION(ltrim, 2, 1, 0, trimFunc ),
@@ -123890,7 +126768,6 @@ static void fkLookupParent(
}else{
int nCol = pFKey->nCol;
int regTemp = sqlite3GetTempRange(pParse, nCol);
- int regRec = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_OpenRead, iCur, pIdx->tnum, iDb);
sqlite3VdbeSetP4KeyInfo(pParse, pIdx);
@@ -123930,11 +126807,10 @@ static void fkLookupParent(
sqlite3VdbeGoto(v, iOk);
}
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regTemp, nCol, regRec,
+ sqlite3VdbeAddOp4(v, OP_Affinity, regTemp, nCol, 0,
sqlite3IndexAffinityStr(pParse->db,pIdx), nCol);
- sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regRec, 0); VdbeCoverage(v);
-
- sqlite3ReleaseTempReg(pParse, regRec);
+ sqlite3VdbeAddOp4Int(v, OP_Found, iCur, iOk, regTemp, nCol);
+ VdbeCoverage(v);
sqlite3ReleaseTempRange(pParse, regTemp, nCol);
}
}
@@ -124036,14 +126912,10 @@ static Expr *exprTableColumn(
** Operation | FK type | Action taken
** --------------------------------------------------------------------------
** DELETE immediate Increment the "immediate constraint counter".
-** Or, if the ON (UPDATE|DELETE) action is RESTRICT,
-** throw a "FOREIGN KEY constraint failed" exception.
**
** INSERT immediate Decrement the "immediate constraint counter".
**
** DELETE deferred Increment the "deferred constraint counter".
-** Or, if the ON (UPDATE|DELETE) action is RESTRICT,
-** throw a "FOREIGN KEY constraint failed" exception.
**
** INSERT deferred Decrement the "deferred constraint counter".
**
@@ -124691,9 +127563,9 @@ SQLITE_PRIVATE int sqlite3FkRequired(
**
** It returns a pointer to a Trigger structure containing a trigger
** equivalent to the ON UPDATE or ON DELETE action specified by pFKey.
-** If the action is "NO ACTION" or "RESTRICT", then a NULL pointer is
-** returned (these actions require no special handling by the triggers
-** sub-system, code for them is created by fkScanChildren()).
+** If the action is "NO ACTION" then a NULL pointer is returned (these actions
+** require no special handling by the triggers sub-system, code for them is
+** created by fkScanChildren()).
**
** For example, if pFKey is the foreign key and pTab is table "p" in
** the following schema:
@@ -124822,18 +127694,23 @@ static Trigger *fkActionTrigger(
nFrom = sqlite3Strlen30(zFrom);
if( action==OE_Restrict ){
+ int iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
Token tFrom;
+ Token tDb;
Expr *pRaise;
tFrom.z = zFrom;
tFrom.n = nFrom;
+ tDb.z = db->aDb[iDb].zDbSName;
+ tDb.n = sqlite3Strlen30(tDb.z);
+
pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed");
if( pRaise ){
pRaise->affExpr = OE_Abort;
}
pSelect = sqlite3SelectNew(pParse,
sqlite3ExprListAppend(pParse, 0, pRaise),
- sqlite3SrcListAppend(pParse, 0, &tFrom, 0),
+ sqlite3SrcListAppend(pParse, 0, &tDb, &tFrom),
pWhere,
0, 0, 0, 0, 0
);
@@ -124940,11 +127817,12 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){
FKey *pNext; /* Copy of pFKey->pNextFrom */
assert( IsOrdinaryTable(pTab) );
+ assert( db!=0 );
for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pNext){
assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) );
/* Remove the FK from the fkeyHash hash table. */
- if( !db || db->pnBytesFreed==0 ){
+ if( db->pnBytesFreed==0 ){
if( pFKey->pPrevTo ){
pFKey->pPrevTo->pNextTo = pFKey->pNextTo;
}else{
@@ -125074,6 +127952,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
aff = SQLITE_AFF_INTEGER;
}else{
assert( x==XN_EXPR );
+ assert( pIdx->bHasExpr );
assert( pIdx->aColExpr!=0 );
aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr);
}
@@ -125087,6 +127966,28 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
return pIdx->zColAff;
}
+/*
+** Compute an affinity string for a table. Space is obtained
+** from sqlite3DbMalloc(). The caller is responsible for freeing
+** the space when done.
+*/
+SQLITE_PRIVATE char *sqlite3TableAffinityStr(sqlite3 *db, const Table *pTab){
+ char *zColAff;
+ zColAff = (char *)sqlite3DbMallocRaw(db, pTab->nCol+1);
+ if( zColAff ){
+ int i, j;
+ for(i=j=0; inCol; i++){
+ if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){
+ zColAff[j++] = pTab->aCol[i].affinity;
+ }
+ }
+ do{
+ zColAff[j--] = 0;
+ }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB );
+ }
+ return zColAff;
+}
+
/*
** Make changes to the evolving bytecode to do affinity transformations
** of values that are about to be gathered into a row for table pTab.
@@ -125128,7 +128029,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){
** Apply the type checking to that array of registers.
*/
SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
- int i, j;
+ int i;
char *zColAff;
if( pTab->tabFlags & TF_Strict ){
if( iReg==0 ){
@@ -125137,7 +128038,7 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
** OP_MakeRecord is found */
VdbeOp *pPrev;
sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
- pPrev = sqlite3VdbeGetOp(v, -1);
+ pPrev = sqlite3VdbeGetLastOp(v);
assert( pPrev!=0 );
assert( pPrev->opcode==OP_MakeRecord || sqlite3VdbeDb(v)->mallocFailed );
pPrev->opcode = OP_TypeCheck;
@@ -125151,22 +128052,11 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
}
zColAff = pTab->zColAff;
if( zColAff==0 ){
- sqlite3 *db = sqlite3VdbeDb(v);
- zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1);
+ zColAff = sqlite3TableAffinityStr(0, pTab);
if( !zColAff ){
- sqlite3OomFault(db);
+ sqlite3OomFault(sqlite3VdbeDb(v));
return;
}
-
- for(i=j=0; inCol; i++){
- assert( pTab->aCol[i].affinity!=0 || sqlite3VdbeParser(v)->nErr>0 );
- if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){
- zColAff[j++] = pTab->aCol[i].affinity;
- }
- }
- do{
- zColAff[j--] = 0;
- }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB );
pTab->zColAff = zColAff;
}
assert( zColAff!=0 );
@@ -125175,7 +128065,7 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){
if( iReg ){
sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i);
}else{
- assert( sqlite3VdbeGetOp(v, -1)->opcode==OP_MakeRecord
+ assert( sqlite3VdbeGetLastOp(v)->opcode==OP_MakeRecord
|| sqlite3VdbeDb(v)->mallocFailed );
sqlite3VdbeChangeP4(v, -1, zColAff, i);
}
@@ -125261,7 +128151,7 @@ SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns(
*/
sqlite3TableAffinity(pParse->pVdbe, pTab, iRegStore);
if( (pTab->tabFlags & TF_HasStored)!=0 ){
- pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1);
+ pOp = sqlite3VdbeGetLastOp(pParse->pVdbe);
if( pOp->opcode==OP_Affinity ){
/* Change the OP_Affinity argument to '@' (NONE) for all stored
** columns. '@' is the no-op affinity and those columns have not
@@ -125743,6 +128633,14 @@ SQLITE_PRIVATE void sqlite3Insert(
#endif
assert( (pTrigger && tmask) || (pTrigger==0 && tmask==0) );
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x10000 ){
+ sqlite3TreeViewLine(0, "In sqlite3Insert() at %s:%d", __FILE__, __LINE__);
+ sqlite3TreeViewInsert(pParse->pWith, pTabList, pColumn, pSelect, pList,
+ onError, pUpsert, pTrigger);
+ }
+#endif
+
/* If pTab is really a view, make sure it has been initialized.
** ViewGetColumnNames() is a no-op if pTab is not a view.
*/
@@ -125821,13 +128719,15 @@ SQLITE_PRIVATE void sqlite3Insert(
*/
bIdListInOrder = (pTab->tabFlags & (TF_OOOHidden|TF_HasStored))==0;
if( pColumn ){
+ assert( pColumn->eU4!=EU4_EXPR );
+ pColumn->eU4 = EU4_IDX;
for(i=0; inId; i++){
- pColumn->a[i].idx = -1;
+ pColumn->a[i].u4.idx = -1;
}
for(i=0; inId; i++){
for(j=0; jnCol; j++){
if( sqlite3StrICmp(pColumn->a[i].zName, pTab->aCol[j].zCnName)==0 ){
- pColumn->a[i].idx = j;
+ pColumn->a[i].u4.idx = j;
if( i!=j ) bIdListInOrder = 0;
if( j==pTab->iPKey ){
ipkColumn = i; assert( !withoutRowid );
@@ -126129,7 +129029,8 @@ SQLITE_PRIVATE void sqlite3Insert(
}
}
if( pColumn ){
- for(j=0; jnId && pColumn->a[j].idx!=i; j++){}
+ assert( pColumn->eU4==EU4_IDX );
+ for(j=0; jnId && pColumn->a[j].u4.idx!=i; j++){}
if( j>=pColumn->nId ){
/* A column not named in the insert column list gets its
** default value */
@@ -126156,7 +129057,12 @@ SQLITE_PRIVATE void sqlite3Insert(
sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+k, iRegStore);
}
}else{
- sqlite3ExprCode(pParse, pList->a[k].pExpr, iRegStore);
+ Expr *pX = pList->a[k].pExpr;
+ int y = sqlite3ExprCodeTarget(pParse, pX, iRegStore);
+ if( y!=iRegStore ){
+ sqlite3VdbeAddOp2(v,
+ ExprHasProperty(pX, EP_Subquery) ? OP_Copy : OP_SCopy, y, iRegStore);
+ }
}
}
@@ -126293,7 +129199,9 @@ SQLITE_PRIVATE void sqlite3Insert(
sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur,
regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0, pUpsert
);
- sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0);
+ if( db->flags & SQLITE_ForeignKeys ){
+ sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0);
+ }
/* Set the OPFLAG_USESEEKRESULT flag if either (a) there are no REPLACE
** constraints or (b) there are no triggers and this table is not a
@@ -126377,7 +129285,7 @@ insert_cleanup:
sqlite3UpsertDelete(db, pUpsert);
sqlite3SelectDelete(db, pSelect);
sqlite3IdListDelete(db, pColumn);
- sqlite3DbFree(db, aRegIdx);
+ if( aRegIdx ) sqlite3DbNNFreeNN(db, aRegIdx);
}
/* Make sure "isView" and other macros defined above are undefined. Otherwise
@@ -127260,7 +130168,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
if( isUpdate ){
/* If currently processing the PRIMARY KEY of a WITHOUT ROWID
** table, only conflict if the new PRIMARY KEY values are actually
- ** different from the old.
+ ** different from the old. See TH3 withoutrowid04.test.
**
** For a UNIQUE index, only conflict if the PRIMARY KEY values
** of the matched index row are different from the original PRIMARY
@@ -128604,9 +131512,9 @@ struct sqlite3_api_routines {
const char *(*filename_journal)(const char*);
const char *(*filename_wal)(const char*);
/* Version 3.32.0 and later */
- char *(*create_filename)(const char*,const char*,const char*,
+ const char *(*create_filename)(const char*,const char*,const char*,
int,const char**);
- void (*free_filename)(char*);
+ void (*free_filename)(const char*);
sqlite3_file *(*database_file_object)(const char*);
/* Version 3.34.0 and later */
int (*txn_state)(sqlite3*,const char*);
@@ -128624,6 +131532,14 @@ struct sqlite3_api_routines {
int (*vtab_in)(sqlite3_index_info*,int,int);
int (*vtab_in_first)(sqlite3_value*,sqlite3_value**);
int (*vtab_in_next)(sqlite3_value*,sqlite3_value**);
+ /* Version 3.39.0 and later */
+ int (*deserialize)(sqlite3*,const char*,unsigned char*,
+ sqlite3_int64,sqlite3_int64,unsigned);
+ unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*,
+ unsigned int);
+ const char *(*db_name)(sqlite3*,int);
+ /* Version 3.40.0 and later */
+ int (*value_encoding)(sqlite3_value*);
};
/*
@@ -128942,6 +131858,14 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_vtab_in sqlite3_api->vtab_in
#define sqlite3_vtab_in_first sqlite3_api->vtab_in_first
#define sqlite3_vtab_in_next sqlite3_api->vtab_in_next
+/* Version 3.39.0 and later */
+#ifndef SQLITE_OMIT_DESERIALIZE
+#define sqlite3_deserialize sqlite3_api->deserialize
+#define sqlite3_serialize sqlite3_api->serialize
+#endif
+#define sqlite3_db_name sqlite3_api->db_name
+/* Version 3.40.0 and later */
+#define sqlite3_value_encoding sqlite3_api->value_encoding
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
@@ -129433,11 +132357,30 @@ static const sqlite3_api_routines sqlite3Apis = {
sqlite3_autovacuum_pages,
/* Version 3.38.0 and later */
sqlite3_error_offset,
+#ifndef SQLITE_OMIT_VIRTUALTABLE
sqlite3_vtab_rhs_value,
sqlite3_vtab_distinct,
sqlite3_vtab_in,
sqlite3_vtab_in_first,
- sqlite3_vtab_in_next
+ sqlite3_vtab_in_next,
+#else
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+#endif
+ /* Version 3.39.0 and later */
+#ifndef SQLITE_OMIT_DESERIALIZE
+ sqlite3_deserialize,
+ sqlite3_serialize,
+#else
+ 0,
+ 0,
+#endif
+ sqlite3_db_name,
+ /* Version 3.40.0 and later */
+ sqlite3_value_encoding
};
/* True if x is the directory separator character
@@ -130108,7 +133051,7 @@ static const PragmaName aPragmaName[] = {
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
{/* zName: */ "database_list",
/* ePragTyp: */ PragTyp_DATABASE_LIST,
- /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0,
+ /* ePragFlg: */ PragFlg_Result0,
/* ColNames: */ 47, 3,
/* iArg: */ 0 },
#endif
@@ -130796,15 +133739,16 @@ static void pragmaFunclistLine(
int isBuiltin, /* True if this is a built-in function */
int showInternFuncs /* True if showing internal functions */
){
+ u32 mask =
+ SQLITE_DETERMINISTIC |
+ SQLITE_DIRECTONLY |
+ SQLITE_SUBTYPE |
+ SQLITE_INNOCUOUS |
+ SQLITE_FUNC_INTERNAL
+ ;
+ if( showInternFuncs ) mask = 0xffffffff;
for(; p; p=p->pNext){
const char *zType;
- static const u32 mask =
- SQLITE_DETERMINISTIC |
- SQLITE_DIRECTONLY |
- SQLITE_SUBTYPE |
- SQLITE_INNOCUOUS |
- SQLITE_FUNC_INTERNAL
- ;
static const char *azEnc[] = { 0, "utf8", "utf16le", "utf16be" };
assert( SQLITE_FUNC_ENCMASK==0x3 );
@@ -131296,7 +134240,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
#ifndef SQLITE_OMIT_AUTOVACUUM
case PragTyp_INCREMENTAL_VACUUM: {
- int iLimit, addr;
+ int iLimit = 0, addr;
if( zRight==0 || !sqlite3GetInt32(zRight, &iLimit) || iLimit<=0 ){
iLimit = 0x7fffffff;
}
@@ -131453,6 +134397,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
**
*/
case PragTyp_TEMP_STORE_DIRECTORY: {
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( !zRight ){
returnSingleText(v, sqlite3_temp_directory);
}else{
@@ -131462,6 +134407,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res);
if( rc!=SQLITE_OK || res==0 ){
sqlite3ErrorMsg(pParse, "not a writable directory");
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
goto pragma_out;
}
}
@@ -131479,6 +134425,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
#endif /* SQLITE_OMIT_WSD */
}
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
break;
}
@@ -131497,6 +134444,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
**
*/
case PragTyp_DATA_STORE_DIRECTORY: {
+ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
if( !zRight ){
returnSingleText(v, sqlite3_data_directory);
}else{
@@ -131506,6 +134454,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
rc = sqlite3OsAccess(db->pVfs, zRight, SQLITE_ACCESS_READWRITE, &res);
if( rc!=SQLITE_OK || res==0 ){
sqlite3ErrorMsg(pParse, "not a writable directory");
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
goto pragma_out;
}
}
@@ -131517,6 +134466,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
#endif /* SQLITE_OMIT_WSD */
}
+ sqlite3_mutex_leave(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR));
break;
}
#endif
@@ -131984,7 +134934,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
HashElem *k; /* Loop counter: Next table in schema */
int x; /* result variable */
int regResult; /* 3 registers to hold a result row */
- int regKey; /* Register to hold key for checking the FK */
int regRow; /* Registers to hold a row from pTab */
int addrTop; /* Top of a loop checking foreign keys */
int addrOk; /* Jump here if the key is OK */
@@ -131992,7 +134941,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
regResult = pParse->nMem+1;
pParse->nMem += 4;
- regKey = ++pParse->nMem;
regRow = ++pParse->nMem;
k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash);
while( k ){
@@ -132059,9 +135007,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
/* Generate code to query the parent index for a matching parent
** key. If a match is found, jump to addrOk. */
if( pIdx ){
- sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, pFK->nCol, regKey,
+ sqlite3VdbeAddOp4(v, OP_Affinity, regRow, pFK->nCol, 0,
sqlite3IndexAffinityStr(db,pIdx), pFK->nCol);
- sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regKey, 0);
+ sqlite3VdbeAddOp4Int(v, OP_Found, i, addrOk, regRow, pFK->nCol);
VdbeCoverage(v);
}else if( pParent ){
int jmp = sqlite3VdbeCurrentAddr(v)+2;
@@ -132232,15 +135180,24 @@ SQLITE_PRIVATE void sqlite3Pragma(
for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){
Table *pTab = sqliteHashData(x);
Index *pIdx, *pPk;
- Index *pPrior = 0;
+ Index *pPrior = 0; /* Previous index */
int loopTop;
int iDataCur, iIdxCur;
int r1 = -1;
- int bStrict;
+ int bStrict; /* True for a STRICT table */
+ int r2; /* Previous key for WITHOUT ROWID tables */
+ int mxCol; /* Maximum non-virtual column number */
if( !IsOrdinaryTable(pTab) ) continue;
if( pObjTab && pObjTab!=pTab ) continue;
- pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab);
+ if( isQuick || HasRowid(pTab) ){
+ pPk = 0;
+ r2 = 0;
+ }else{
+ pPk = sqlite3PrimaryKeyIndex(pTab);
+ r2 = sqlite3GetTempRange(pParse, pPk->nKeyCol);
+ sqlite3VdbeAddOp3(v, OP_Null, 1, r2, r2+pPk->nKeyCol-1);
+ }
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0,
1, 0, &iDataCur, &iIdxCur);
/* reg[7] counts the number of entries in the table.
@@ -132254,52 +135211,157 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( sqlite3NoTempsInRange(pParse,1,7+j) );
sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v);
loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1);
+
+ /* Fetch the right-most column from the table. This will cause
+ ** the entire record header to be parsed and sanity checked. It
+ ** will also prepopulate the cursor column cache that is used
+ ** by the OP_IsType code, so it is a required step.
+ */
+ mxCol = pTab->nCol-1;
+ while( mxCol>=0
+ && ((pTab->aCol[mxCol].colFlags & COLFLAG_VIRTUAL)!=0
+ || pTab->iPKey==mxCol) ) mxCol--;
+ if( mxCol>=0 ){
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, mxCol, 3);
+ sqlite3VdbeTypeofColumn(v, 3);
+ }
+
if( !isQuick ){
- /* Sanity check on record header decoding */
- sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nNVCol-1,3);
- sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
- VdbeComment((v, "(right-most column)"));
+ if( pPk ){
+ /* Verify WITHOUT ROWID keys are in ascending order */
+ int a1;
+ char *zErr;
+ a1 = sqlite3VdbeAddOp4Int(v, OP_IdxGT, iDataCur, 0,r2,pPk->nKeyCol);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp1(v, OP_IsNull, r2); VdbeCoverage(v);
+ zErr = sqlite3MPrintf(db,
+ "row not in PRIMARY KEY order for %s",
+ pTab->zName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
+ integrityCheckResultRow(v);
+ sqlite3VdbeJumpHere(v, a1);
+ sqlite3VdbeJumpHere(v, a1+1);
+ for(j=0; jnKeyCol; j++){
+ sqlite3ExprCodeLoadIndexColumn(pParse, pPk, iDataCur, j, r2+j);
+ }
+ }
}
- /* Verify that all NOT NULL columns really are NOT NULL. At the
- ** same time verify the type of the content of STRICT tables */
+ /* Verify datatypes for all columns:
+ **
+ ** (1) NOT NULL columns may not contain a NULL
+ ** (2) Datatype must be exact for non-ANY columns in STRICT tables
+ ** (3) Datatype for TEXT columns in non-STRICT tables must be
+ ** NULL, TEXT, or BLOB.
+ ** (4) Datatype for numeric columns in non-STRICT tables must not
+ ** be a TEXT value that can be losslessly converted to numeric.
+ */
bStrict = (pTab->tabFlags & TF_Strict)!=0;
for(j=0; jnCol; j++){
char *zErr;
- Column *pCol = pTab->aCol + j;
- int doError, jmp2;
+ Column *pCol = pTab->aCol + j; /* The column to be checked */
+ int labelError; /* Jump here to report an error */
+ int labelOk; /* Jump here if all looks ok */
+ int p1, p3, p4; /* Operands to the OP_IsType opcode */
+ int doTypeCheck; /* Check datatypes (besides NOT NULL) */
+
if( j==pTab->iPKey ) continue;
- if( pCol->notNull==0 && !bStrict ) continue;
- doError = bStrict ? sqlite3VdbeMakeLabel(pParse) : 0;
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3);
- if( sqlite3VdbeGetOp(v,-1)->opcode==OP_Column ){
- sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG);
+ if( bStrict ){
+ doTypeCheck = pCol->eCType>COLTYPE_ANY;
+ }else{
+ doTypeCheck = pCol->affinity>SQLITE_AFF_BLOB;
}
+ if( pCol->notNull==0 && !doTypeCheck ) continue;
+
+ /* Compute the operands that will be needed for OP_IsType */
+ p4 = SQLITE_NULL;
+ if( pCol->colFlags & COLFLAG_VIRTUAL ){
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3);
+ p1 = -1;
+ p3 = 3;
+ }else{
+ if( pCol->iDflt ){
+ sqlite3_value *pDfltValue = 0;
+ sqlite3ValueFromExpr(db, sqlite3ColumnExpr(pTab,pCol), ENC(db),
+ pCol->affinity, &pDfltValue);
+ if( pDfltValue ){
+ p4 = sqlite3_value_type(pDfltValue);
+ sqlite3ValueFree(pDfltValue);
+ }
+ }
+ p1 = iDataCur;
+ if( !HasRowid(pTab) ){
+ testcase( j!=sqlite3TableColumnToStorage(pTab, j) );
+ p3 = sqlite3TableColumnToIndex(sqlite3PrimaryKeyIndex(pTab), j);
+ }else{
+ p3 = sqlite3TableColumnToStorage(pTab,j);
+ testcase( p3!=j);
+ }
+ }
+
+ labelError = sqlite3VdbeMakeLabel(pParse);
+ labelOk = sqlite3VdbeMakeLabel(pParse);
if( pCol->notNull ){
- jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v);
+ /* (1) NOT NULL columns may not contain a NULL */
+ int jmp2 = sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4);
+ sqlite3VdbeChangeP5(v, 0x0f);
+ VdbeCoverage(v);
zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName,
pCol->zCnName);
sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
- if( bStrict && pCol->eCType!=COLTYPE_ANY ){
- sqlite3VdbeGoto(v, doError);
+ if( doTypeCheck ){
+ sqlite3VdbeGoto(v, labelError);
+ sqlite3VdbeJumpHere(v, jmp2);
}else{
- integrityCheckResultRow(v);
+ /* VDBE byte code will fall thru */
}
- sqlite3VdbeJumpHere(v, jmp2);
}
- if( (pTab->tabFlags & TF_Strict)!=0
- && pCol->eCType!=COLTYPE_ANY
- ){
- jmp2 = sqlite3VdbeAddOp3(v, OP_IsNullOrType, 3, 0,
- sqlite3StdTypeMap[pCol->eCType-1]);
+ if( bStrict && doTypeCheck ){
+ /* (2) Datatype must be exact for non-ANY columns in STRICT tables*/
+ static unsigned char aStdTypeMask[] = {
+ 0x1f, /* ANY */
+ 0x18, /* BLOB */
+ 0x11, /* INT */
+ 0x11, /* INTEGER */
+ 0x13, /* REAL */
+ 0x14 /* TEXT */
+ };
+ sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4);
+ assert( pCol->eCType>=1 && pCol->eCType<=sizeof(aStdTypeMask) );
+ sqlite3VdbeChangeP5(v, aStdTypeMask[pCol->eCType-1]);
VdbeCoverage(v);
zErr = sqlite3MPrintf(db, "non-%s value in %s.%s",
sqlite3StdType[pCol->eCType-1],
pTab->zName, pTab->aCol[j].zCnName);
sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
- sqlite3VdbeResolveLabel(v, doError);
- integrityCheckResultRow(v);
- sqlite3VdbeJumpHere(v, jmp2);
+ }else if( !bStrict && pCol->affinity==SQLITE_AFF_TEXT ){
+ /* (3) Datatype for TEXT columns in non-STRICT tables must be
+ ** NULL, TEXT, or BLOB. */
+ sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4);
+ sqlite3VdbeChangeP5(v, 0x1c); /* NULL, TEXT, or BLOB */
+ VdbeCoverage(v);
+ zErr = sqlite3MPrintf(db, "NUMERIC value in %s.%s",
+ pTab->zName, pTab->aCol[j].zCnName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
+ }else if( !bStrict && pCol->affinity>=SQLITE_AFF_NUMERIC ){
+ /* (4) Datatype for numeric columns in non-STRICT tables must not
+ ** be a TEXT value that can be converted to numeric. */
+ sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4);
+ sqlite3VdbeChangeP5(v, 0x1b); /* NULL, INT, FLOAT, or BLOB */
+ VdbeCoverage(v);
+ if( p1>=0 ){
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3);
+ }
+ sqlite3VdbeAddOp4(v, OP_Affinity, 3, 1, 0, "C", P4_STATIC);
+ sqlite3VdbeAddOp4Int(v, OP_IsType, -1, labelOk, 3, p4);
+ sqlite3VdbeChangeP5(v, 0x1c); /* NULL, TEXT, or BLOB */
+ VdbeCoverage(v);
+ zErr = sqlite3MPrintf(db, "TEXT value in %s.%s",
+ pTab->zName, pTab->aCol[j].zCnName);
+ sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC);
}
+ sqlite3VdbeResolveLabel(v, labelError);
+ integrityCheckResultRow(v);
+ sqlite3VdbeResolveLabel(v, labelOk);
}
/* Verify CHECK constraints */
if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){
@@ -132387,6 +135449,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
integrityCheckResultRow(v);
sqlite3VdbeJumpHere(v, addr);
}
+ if( pPk ){
+ sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol);
+ }
}
}
}
@@ -132537,6 +135602,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
aOp[1].p2 = iCookie;
aOp[1].p3 = sqlite3Atoi(zRight);
aOp[1].p5 = 1;
+ if( iCookie==BTREE_SCHEMA_VERSION && (db->flags & SQLITE_Defensive)!=0 ){
+ /* Do not allow the use of PRAGMA schema_version=VALUE in defensive
+ ** mode. Change the OP_SetCookie opcode into a no-op. */
+ aOp[1].opcode = OP_Noop;
+ }
}else{
/* Read the specified cookie value */
static const VdbeOpList readCookie[] = {
@@ -133785,15 +136855,15 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){
assert( db->pParse==pParse );
assert( pParse->nested==0 );
#ifndef SQLITE_OMIT_SHARED_CACHE
- sqlite3DbFree(db, pParse->aTableLock);
+ if( pParse->aTableLock ) sqlite3DbNNFreeNN(db, pParse->aTableLock);
#endif
while( pParse->pCleanup ){
ParseCleanup *pCleanup = pParse->pCleanup;
pParse->pCleanup = pCleanup->pNext;
pCleanup->xCleanup(db, pCleanup->pPtr);
- sqlite3DbFreeNN(db, pCleanup);
+ sqlite3DbNNFreeNN(db, pCleanup);
}
- sqlite3DbFree(db, pParse->aLabel);
+ if( pParse->aLabel ) sqlite3DbNNFreeNN(db, pParse->aLabel);
if( pParse->pConstExpr ){
sqlite3ExprListDelete(db, pParse->pConstExpr);
}
@@ -133916,7 +136986,7 @@ static int sqlite3Prepare(
sParse.disableLookaside++;
DisableLookaside;
}
- sParse.disableVtab = (prepFlags & SQLITE_PREPARE_NO_VTAB)!=0;
+ sParse.prepFlags = prepFlags & 0xff;
/* Check to verify that it is possible to get a read lock on all
** database schemas. The inability to get a read lock indicates that
@@ -133957,7 +137027,9 @@ static int sqlite3Prepare(
}
}
- sqlite3VtabUnlockList(db);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+ if( db->pDisconnect ) sqlite3VtabUnlockList(db);
+#endif
if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){
char *zSqlCopy;
@@ -134297,7 +137369,7 @@ SQLITE_API int sqlite3_prepare16_v3(
*/
typedef struct DistinctCtx DistinctCtx;
struct DistinctCtx {
- u8 isTnct; /* True if the DISTINCT keyword is present */
+ u8 isTnct; /* 0: Not distinct. 1: DISTICT 2: DISTINCT and ORDER BY */
u8 eTnctType; /* One of the WHERE_DISTINCT_* operators */
int tabTnct; /* Ephemeral table used for DISTINCT processing */
int addrTnct; /* Address of OP_OpenEphemeral opcode for tabTnct */
@@ -134352,6 +137424,7 @@ struct SortCtx {
** If bFree==0, Leave the first Select object unfreed
*/
static void clearSelect(sqlite3 *db, Select *p, int bFree){
+ assert( db!=0 );
while( p ){
Select *pPrior = p->pPrior;
sqlite3ExprListDelete(db, p->pEList);
@@ -134371,7 +137444,7 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){
sqlite3WindowUnlinkFromSelect(p->pWin);
}
#endif
- if( bFree ) sqlite3DbFreeNN(db, p);
+ if( bFree ) sqlite3DbNNFreeNN(db, p);
p = pPrior;
bFree = 1;
}
@@ -134480,6 +137553,52 @@ static Select *findRightmost(Select *p){
**
** If an illegal or unsupported join type is seen, then still return
** a join type, but put an error in the pParse structure.
+**
+** These are the valid join types:
+**
+**
+** pA pB pC Return Value
+** ------- ----- ----- ------------
+** CROSS - - JT_CROSS
+** INNER - - JT_INNER
+** LEFT - - JT_LEFT|JT_OUTER
+** LEFT OUTER - JT_LEFT|JT_OUTER
+** RIGHT - - JT_RIGHT|JT_OUTER
+** RIGHT OUTER - JT_RIGHT|JT_OUTER
+** FULL - - JT_LEFT|JT_RIGHT|JT_OUTER
+** FULL OUTER - JT_LEFT|JT_RIGHT|JT_OUTER
+** NATURAL INNER - JT_NATURAL|JT_INNER
+** NATURAL LEFT - JT_NATURAL|JT_LEFT|JT_OUTER
+** NATURAL LEFT OUTER JT_NATURAL|JT_LEFT|JT_OUTER
+** NATURAL RIGHT - JT_NATURAL|JT_RIGHT|JT_OUTER
+** NATURAL RIGHT OUTER JT_NATURAL|JT_RIGHT|JT_OUTER
+** NATURAL FULL - JT_NATURAL|JT_LEFT|JT_RIGHT
+** NATURAL FULL OUTER JT_NATRUAL|JT_LEFT|JT_RIGHT
+**
+** To preserve historical compatibly, SQLite also accepts a variety
+** of other non-standard and in many cases non-sensical join types.
+** This routine makes as much sense at it can from the nonsense join
+** type and returns a result. Examples of accepted nonsense join types
+** include but are not limited to:
+**
+** INNER CROSS JOIN -> same as JOIN
+** NATURAL CROSS JOIN -> same as NATURAL JOIN
+** OUTER LEFT JOIN -> same as LEFT JOIN
+** LEFT NATURAL JOIN -> same as NATURAL LEFT JOIN
+** LEFT RIGHT JOIN -> same as FULL JOIN
+** RIGHT OUTER FULL JOIN -> same as FULL JOIN
+** CROSS CROSS CROSS JOIN -> same as JOIN
+**
+** The only restrictions on the join type name are:
+**
+** * "INNER" cannot appear together with "OUTER", "LEFT", "RIGHT",
+** or "FULL".
+**
+** * "CROSS" cannot appear together with "OUTER", "LEFT", "RIGHT,
+** or "FULL".
+**
+** * If "OUTER" is present then there must also be one of
+** "LEFT", "RIGHT", or "FULL"
*/
SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *pC){
int jointype = 0;
@@ -134492,13 +137611,13 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p
u8 nChar; /* Length of the keyword in characters */
u8 code; /* Join type mask */
} aKeyword[] = {
- /* natural */ { 0, 7, JT_NATURAL },
- /* left */ { 6, 4, JT_LEFT|JT_OUTER },
- /* outer */ { 10, 5, JT_OUTER },
- /* right */ { 14, 5, JT_RIGHT|JT_OUTER },
- /* full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER },
- /* inner */ { 23, 5, JT_INNER },
- /* cross */ { 28, 5, JT_INNER|JT_CROSS },
+ /* (0) natural */ { 0, 7, JT_NATURAL },
+ /* (1) left */ { 6, 4, JT_LEFT|JT_OUTER },
+ /* (2) outer */ { 10, 5, JT_OUTER },
+ /* (3) right */ { 14, 5, JT_RIGHT|JT_OUTER },
+ /* (4) full */ { 19, 4, JT_LEFT|JT_RIGHT|JT_OUTER },
+ /* (5) inner */ { 23, 5, JT_INNER },
+ /* (6) cross */ { 28, 5, JT_INNER|JT_CROSS },
};
int i, j;
apAll[0] = pA;
@@ -134521,18 +137640,15 @@ SQLITE_PRIVATE int sqlite3JoinType(Parse *pParse, Token *pA, Token *pB, Token *p
}
if(
(jointype & (JT_INNER|JT_OUTER))==(JT_INNER|JT_OUTER) ||
- (jointype & JT_ERROR)!=0
+ (jointype & JT_ERROR)!=0 ||
+ (jointype & (JT_OUTER|JT_LEFT|JT_RIGHT))==JT_OUTER
){
- const char *zSp = " ";
- assert( pB!=0 );
- if( pC==0 ){ zSp++; }
- sqlite3ErrorMsg(pParse, "unknown or unsupported join type: "
- "%T %T%s%T", pA, pB, zSp, pC);
- jointype = JT_INNER;
- }else if( (jointype & JT_OUTER)!=0
- && (jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ){
- sqlite3ErrorMsg(pParse,
- "RIGHT and FULL OUTER JOINs are not currently supported");
+ const char *zSp1 = " ";
+ const char *zSp2 = " ";
+ if( pB==0 ){ zSp1++; }
+ if( pC==0 ){ zSp2++; }
+ sqlite3ErrorMsg(pParse, "unknown join type: "
+ "%T%s%T%s%T", pA, zSp1, pB, zSp2, pC);
jointype = JT_INNER;
}
return jointype;
@@ -134553,8 +137669,25 @@ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){
}
/*
-** Search the first N tables in pSrc, from left to right, looking for a
-** table that has a column named zCol.
+** Mark a subquery result column as having been used.
+*/
+SQLITE_PRIVATE void sqlite3SrcItemColumnUsed(SrcItem *pItem, int iCol){
+ assert( pItem!=0 );
+ assert( (int)pItem->fg.isNestedFrom == IsNestedFrom(pItem->pSelect) );
+ if( pItem->fg.isNestedFrom ){
+ ExprList *pResults;
+ assert( pItem->pSelect!=0 );
+ pResults = pItem->pSelect->pEList;
+ assert( pResults!=0 );
+ assert( iCol>=0 && iColnExpr );
+ pResults->a[iCol].fg.bUsed = 1;
+ }
+}
+
+/*
+** Search the tables iStart..iEnd (inclusive) in pSrc, looking for a
+** table that has a column named zCol. The search is left-to-right.
+** The first match found is returned.
**
** When found, set *piTab and *piCol to the table index and column index
** of the matching column and return TRUE.
@@ -134563,22 +137696,27 @@ SQLITE_PRIVATE int sqlite3ColumnIndex(Table *pTab, const char *zCol){
*/
static int tableAndColumnIndex(
SrcList *pSrc, /* Array of tables to search */
- int N, /* Number of tables in pSrc->a[] to search */
+ int iStart, /* First member of pSrc->a[] to check */
+ int iEnd, /* Last member of pSrc->a[] to check */
const char *zCol, /* Name of the column we are looking for */
int *piTab, /* Write index of pSrc->a[] here */
int *piCol, /* Write index of pSrc->a[*piTab].pTab->aCol[] here */
- int bIgnoreHidden /* True to ignore hidden columns */
+ int bIgnoreHidden /* Ignore hidden columns */
){
int i; /* For looping over tables in pSrc */
int iCol; /* Index of column matching zCol */
+ assert( iEndnSrc );
+ assert( iStart>=0 );
assert( (piTab==0)==(piCol==0) ); /* Both or neither are NULL */
- for(i=0; ia[i].pTab, zCol);
if( iCol>=0
&& (bIgnoreHidden==0 || IsHiddenColumn(&pSrc->a[i].pTab->aCol[iCol])==0)
){
if( piTab ){
+ sqlite3SrcItemColumnUsed(&pSrc->a[i], iCol);
*piTab = i;
*piCol = iCol;
}
@@ -134589,66 +137727,19 @@ static int tableAndColumnIndex(
}
/*
-** This function is used to add terms implied by JOIN syntax to the
-** WHERE clause expression of a SELECT statement. The new term, which
-** is ANDed with the existing WHERE clause, is of the form:
-**
-** (tab1.col1 = tab2.col2)
-**
-** where tab1 is the iSrc'th table in SrcList pSrc and tab2 is the
-** (iSrc+1)'th. Column col1 is column iColLeft of tab1, and col2 is
-** column iColRight of tab2.
-*/
-static void addWhereTerm(
- Parse *pParse, /* Parsing context */
- SrcList *pSrc, /* List of tables in FROM clause */
- int iLeft, /* Index of first table to join in pSrc */
- int iColLeft, /* Index of column in first table */
- int iRight, /* Index of second table in pSrc */
- int iColRight, /* Index of column in second table */
- int isOuterJoin, /* True if this is an OUTER join */
- Expr **ppWhere /* IN/OUT: The WHERE clause to add to */
-){
- sqlite3 *db = pParse->db;
- Expr *pE1;
- Expr *pE2;
- Expr *pEq;
-
- assert( iLeftnSrc>iRight );
- assert( pSrc->a[iLeft].pTab );
- assert( pSrc->a[iRight].pTab );
-
- pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft);
- pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight);
-
- pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2);
- assert( pE2!=0 || pEq==0 ); /* Due to db->mallocFailed test
- ** in sqlite3DbMallocRawNN() called from
- ** sqlite3PExpr(). */
- if( pEq && isOuterJoin ){
- ExprSetProperty(pEq, EP_FromJoin);
- assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) );
- ExprSetVVAProperty(pEq, EP_NoReduce);
- pEq->w.iRightJoinTable = pE2->iTable;
- }
- *ppWhere = sqlite3ExprAnd(pParse, *ppWhere, pEq);
-}
-
-/*
-** Set the EP_FromJoin property on all terms of the given expression.
-** And set the Expr.w.iRightJoinTable to iTable for every term in the
+** Set the EP_OuterON property on all terms of the given expression.
+** And set the Expr.w.iJoin to iTable for every term in the
** expression.
**
-** The EP_FromJoin property is used on terms of an expression to tell
-** the LEFT OUTER JOIN processing logic that this term is part of the
+** The EP_OuterON property is used on terms of an expression to tell
+** the OUTER JOIN processing logic that this term is part of the
** join restriction specified in the ON or USING clause and not a part
** of the more general WHERE clause. These terms are moved over to the
** WHERE clause during join processing but we need to remember that they
** originated in the ON or USING clause.
**
-** The Expr.w.iRightJoinTable tells the WHERE clause processing that the
-** expression depends on table w.iRightJoinTable even if that table is not
+** The Expr.w.iJoin tells the WHERE clause processing that the
+** expression depends on table w.iJoin even if that table is not
** explicitly mentioned in the expression. That information is needed
** for cases like this:
**
@@ -134661,39 +137752,48 @@ static void addWhereTerm(
** after the t1 loop and rows with t1.x!=5 will never appear in
** the output, which is incorrect.
*/
-SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable){
+SQLITE_PRIVATE void sqlite3SetJoinExpr(Expr *p, int iTable, u32 joinFlag){
+ assert( joinFlag==EP_OuterON || joinFlag==EP_InnerON );
while( p ){
- ExprSetProperty(p, EP_FromJoin);
+ ExprSetProperty(p, joinFlag);
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
ExprSetVVAProperty(p, EP_NoReduce);
- p->w.iRightJoinTable = iTable;
+ p->w.iJoin = iTable;
if( p->op==TK_FUNCTION ){
assert( ExprUseXList(p) );
if( p->x.pList ){
int i;
for(i=0; ix.pList->nExpr; i++){
- sqlite3SetJoinExpr(p->x.pList->a[i].pExpr, iTable);
+ sqlite3SetJoinExpr(p->x.pList->a[i].pExpr, iTable, joinFlag);
}
}
}
- sqlite3SetJoinExpr(p->pLeft, iTable);
+ sqlite3SetJoinExpr(p->pLeft, iTable, joinFlag);
p = p->pRight;
}
}
-/* Undo the work of sqlite3SetJoinExpr(). In the expression p, convert every
-** term that is marked with EP_FromJoin and w.iRightJoinTable==iTable into
-** an ordinary term that omits the EP_FromJoin mark.
+/* Undo the work of sqlite3SetJoinExpr(). This is used when a LEFT JOIN
+** is simplified into an ordinary JOIN, and when an ON expression is
+** "pushed down" into the WHERE clause of a subquery.
+**
+** Convert every term that is marked with EP_OuterON and w.iJoin==iTable into
+** an ordinary term that omits the EP_OuterON mark. Or if iTable<0, then
+** just clear every EP_OuterON and EP_InnerON mark from the expression tree.
**
-** This happens when a LEFT JOIN is simplified into an ordinary JOIN.
+** If nullable is true, that means that Expr p might evaluate to NULL even
+** if it is a reference to a NOT NULL column. This can happen, for example,
+** if the table that p references is on the left side of a RIGHT JOIN.
+** If nullable is true, then take care to not remove the EP_CanBeNull bit.
+** See forum thread https://sqlite.org/forum/forumpost/b40696f50145d21c
*/
-static void unsetJoinExpr(Expr *p, int iTable){
+static void unsetJoinExpr(Expr *p, int iTable, int nullable){
while( p ){
- if( ExprHasProperty(p, EP_FromJoin)
- && (iTable<0 || p->w.iRightJoinTable==iTable) ){
- ExprClearProperty(p, EP_FromJoin);
+ if( iTable<0 || (ExprHasProperty(p, EP_OuterON) && p->w.iJoin==iTable) ){
+ ExprClearProperty(p, EP_OuterON|EP_InnerON);
+ if( iTable>=0 ) ExprSetProperty(p, EP_InnerON);
}
- if( p->op==TK_COLUMN && p->iTable==iTable ){
+ if( p->op==TK_COLUMN && p->iTable==iTable && !nullable ){
ExprClearProperty(p, EP_CanBeNull);
}
if( p->op==TK_FUNCTION ){
@@ -134701,30 +137801,37 @@ static void unsetJoinExpr(Expr *p, int iTable){
if( p->x.pList ){
int i;
for(i=0; ix.pList->nExpr; i++){
- unsetJoinExpr(p->x.pList->a[i].pExpr, iTable);
+ unsetJoinExpr(p->x.pList->a[i].pExpr, iTable, nullable);
}
}
}
- unsetJoinExpr(p->pLeft, iTable);
+ unsetJoinExpr(p->pLeft, iTable, nullable);
p = p->pRight;
}
}
/*
** This routine processes the join information for a SELECT statement.
-** ON and USING clauses are converted into extra terms of the WHERE clause.
-** NATURAL joins also create extra WHERE clause terms.
+**
+** * A NATURAL join is converted into a USING join. After that, we
+** do not need to be concerned with NATURAL joins and we only have
+** think about USING joins.
+**
+** * ON and USING clauses result in extra terms being added to the
+** WHERE clause to enforce the specified constraints. The extra
+** WHERE clause terms will be tagged with EP_OuterON or
+** EP_InnerON so that we know that they originated in ON/USING.
**
** The terms of a FROM clause are contained in the Select.pSrc structure.
** The left most table is the first entry in Select.pSrc. The right-most
** table is the last entry. The join operator is held in the entry to
-** the left. Thus entry 0 contains the join operator for the join between
+** the right. Thus entry 1 contains the join operator for the join between
** entries 0 and 1. Any ON or USING clauses associated with the join are
-** also attached to the left entry.
+** also attached to the right entry.
**
** This routine returns the number of errors encountered.
*/
-static int sqliteProcessJoin(Parse *pParse, Select *p){
+static int sqlite3ProcessJoin(Parse *pParse, Select *p){
SrcList *pSrc; /* All tables in the FROM clause */
int i, j; /* Loop counters */
SrcItem *pLeft; /* Left table being joined */
@@ -134735,49 +137842,41 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){
pRight = &pLeft[1];
for(i=0; inSrc-1; i++, pRight++, pLeft++){
Table *pRightTab = pRight->pTab;
- int isOuter;
+ u32 joinType;
if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue;
- isOuter = (pRight->fg.jointype & JT_OUTER)!=0;
+ joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON;
- /* When the NATURAL keyword is present, add WHERE clause terms for
- ** every column that the two tables have in common.
+ /* If this is a NATURAL join, synthesize an approprate USING clause
+ ** to specify which columns should be joined.
*/
if( pRight->fg.jointype & JT_NATURAL ){
- if( pRight->pOn || pRight->pUsing ){
+ IdList *pUsing = 0;
+ if( pRight->fg.isUsing || pRight->u3.pOn ){
sqlite3ErrorMsg(pParse, "a NATURAL join may not have "
"an ON or USING clause", 0);
return 1;
}
for(j=0; jnCol; j++){
char *zName; /* Name of column in the right table */
- int iLeft; /* Matching left table */
- int iLeftCol; /* Matching column in the left table */
if( IsHiddenColumn(&pRightTab->aCol[j]) ) continue;
zName = pRightTab->aCol[j].zCnName;
- if( tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 1) ){
- addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, j,
- isOuter, &p->pWhere);
+ if( tableAndColumnIndex(pSrc, 0, i, zName, 0, 0, 1) ){
+ pUsing = sqlite3IdListAppend(pParse, pUsing, 0);
+ if( pUsing ){
+ assert( pUsing->nId>0 );
+ assert( pUsing->a[pUsing->nId-1].zName==0 );
+ pUsing->a[pUsing->nId-1].zName = sqlite3DbStrDup(pParse->db, zName);
+ }
}
}
- }
-
- /* Disallow both ON and USING clauses in the same join
- */
- if( pRight->pOn && pRight->pUsing ){
- sqlite3ErrorMsg(pParse, "cannot have both ON and USING "
- "clauses in the same join");
- return 1;
- }
-
- /* Add the ON clause to the end of the WHERE clause, connected by
- ** an AND operator.
- */
- if( pRight->pOn ){
- if( isOuter ) sqlite3SetJoinExpr(pRight->pOn, pRight->iCursor);
- p->pWhere = sqlite3ExprAnd(pParse, p->pWhere, pRight->pOn);
- pRight->pOn = 0;
+ if( pUsing ){
+ pRight->fg.isUsing = 1;
+ pRight->fg.isSynthUsing = 1;
+ pRight->u3.pUsing = pUsing;
+ }
+ if( pParse->nErr ) return 1;
}
/* Create extra terms on the WHERE clause for each column named
@@ -134787,27 +137886,88 @@ static int sqliteProcessJoin(Parse *pParse, Select *p){
** Report an error if any column mentioned in the USING clause is
** not contained in both tables to be joined.
*/
- if( pRight->pUsing ){
- IdList *pList = pRight->pUsing;
+ if( pRight->fg.isUsing ){
+ IdList *pList = pRight->u3.pUsing;
+ sqlite3 *db = pParse->db;
+ assert( pList!=0 );
for(j=0; jnId; j++){
char *zName; /* Name of the term in the USING clause */
int iLeft; /* Table on the left with matching column name */
int iLeftCol; /* Column number of matching column on the left */
int iRightCol; /* Column number of matching column on the right */
+ Expr *pE1; /* Reference to the column on the LEFT of the join */
+ Expr *pE2; /* Reference to the column on the RIGHT of the join */
+ Expr *pEq; /* Equality constraint. pE1 == pE2 */
zName = pList->a[j].zName;
iRightCol = sqlite3ColumnIndex(pRightTab, zName);
if( iRightCol<0
- || !tableAndColumnIndex(pSrc, i+1, zName, &iLeft, &iLeftCol, 0)
+ || tableAndColumnIndex(pSrc, 0, i, zName, &iLeft, &iLeftCol,
+ pRight->fg.isSynthUsing)==0
){
sqlite3ErrorMsg(pParse, "cannot join using column %s - column "
"not present in both tables", zName);
return 1;
}
- addWhereTerm(pParse, pSrc, iLeft, iLeftCol, i+1, iRightCol,
- isOuter, &p->pWhere);
+ pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iLeftCol);
+ sqlite3SrcItemColumnUsed(&pSrc->a[iLeft], iLeftCol);
+ if( (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){
+ /* This branch runs if the query contains one or more RIGHT or FULL
+ ** JOINs. If only a single table on the left side of this join
+ ** contains the zName column, then this branch is a no-op.
+ ** But if there are two or more tables on the left side
+ ** of the join, construct a coalesce() function that gathers all
+ ** such tables. Raise an error if more than one of those references
+ ** to zName is not also within a prior USING clause.
+ **
+ ** We really ought to raise an error if there are two or more
+ ** non-USING references to zName on the left of an INNER or LEFT
+ ** JOIN. But older versions of SQLite do not do that, so we avoid
+ ** adding a new error so as to not break legacy applications.
+ */
+ ExprList *pFuncArgs = 0; /* Arguments to the coalesce() */
+ static const Token tkCoalesce = { "coalesce", 8 };
+ while( tableAndColumnIndex(pSrc, iLeft+1, i, zName, &iLeft, &iLeftCol,
+ pRight->fg.isSynthUsing)!=0 ){
+ if( pSrc->a[iLeft].fg.isUsing==0
+ || sqlite3IdListIndex(pSrc->a[iLeft].u3.pUsing, zName)<0
+ ){
+ sqlite3ErrorMsg(pParse, "ambiguous reference to %s in USING()",
+ zName);
+ break;
+ }
+ pFuncArgs = sqlite3ExprListAppend(pParse, pFuncArgs, pE1);
+ pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iLeftCol);
+ sqlite3SrcItemColumnUsed(&pSrc->a[iLeft], iLeftCol);
+ }
+ if( pFuncArgs ){
+ pFuncArgs = sqlite3ExprListAppend(pParse, pFuncArgs, pE1);
+ pE1 = sqlite3ExprFunction(pParse, pFuncArgs, &tkCoalesce, 0);
+ }
+ }
+ pE2 = sqlite3CreateColumnExpr(db, pSrc, i+1, iRightCol);
+ sqlite3SrcItemColumnUsed(pRight, iRightCol);
+ pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2);
+ assert( pE2!=0 || pEq==0 );
+ if( pEq ){
+ ExprSetProperty(pEq, joinType);
+ assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) );
+ ExprSetVVAProperty(pEq, EP_NoReduce);
+ pEq->w.iJoin = pE2->iTable;
+ }
+ p->pWhere = sqlite3ExprAnd(pParse, p->pWhere, pEq);
}
}
+
+ /* Add the ON clause to the end of the WHERE clause, connected by
+ ** an AND operator.
+ */
+ else if( pRight->u3.pOn ){
+ sqlite3SetJoinExpr(pRight->u3.pOn, pRight->iCursor, joinType);
+ p->pWhere = sqlite3ExprAnd(pParse, p->pWhere, pRight->u3.pOn);
+ pRight->u3.pOn = 0;
+ pRight->fg.isOn = 1;
+ }
}
return 0;
}
@@ -135196,7 +138356,7 @@ static void fixDistinctOpenEph(
** retrieved directly from table t1. If the values are very large, this
** can be more efficient than storing them directly in the sorter records.
**
-** The ExprList_item.bSorterRef flag is set for each expression in pEList
+** The ExprList_item.fg.bSorterRef flag is set for each expression in pEList
** for which the sorter-reference optimization should be enabled.
** Additionally, the pSort->aDefer[] array is populated with entries
** for all cursors required to evaluate all selected expressions. Finally.
@@ -135256,7 +138416,7 @@ static void selectExprDefer(
nDefer++;
}
}
- pItem->bSorterRef = 1;
+ pItem->fg.bSorterRef = 1;
}
}
}
@@ -135387,7 +138547,7 @@ static void selectInnerLoop(
for(i=0; inExpr; i++){
if( pEList->a[i].u.x.iOrderByCol>0
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
- || pEList->a[i].bSorterRef
+ || pEList->a[i].fg.bSorterRef
#endif
){
nResultCol--;
@@ -135475,6 +138635,9 @@ static void selectInnerLoop(
testcase( eDest==SRT_Fifo );
testcase( eDest==SRT_DistFifo );
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
+ if( pDest->zAffSdst ){
+ sqlite3VdbeChangeP4(v, -1, pDest->zAffSdst, nResultCol);
+ }
#ifndef SQLITE_OMIT_CTE
if( eDest==SRT_DistFifo ){
/* If the destination is DistFifo, then cursor (iParm+1) is open
@@ -135690,9 +138853,10 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){
*/
SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){
if( p ){
+ assert( p->db!=0 );
assert( p->nRef>0 );
p->nRef--;
- if( p->nRef==0 ) sqlite3DbFreeNN(p->db, p);
+ if( p->nRef==0 ) sqlite3DbNNFreeNN(p->db, p);
}
}
@@ -135749,7 +138913,7 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoFromExprList(
assert( sqlite3KeyInfoIsWriteable(pInfo) );
for(i=iStart, pItem=pList->a+iStart; iaColl[i-iStart] = sqlite3ExprNNCollSeq(pParse, pItem->pExpr);
- pInfo->aSortFlags[i-iStart] = pItem->sortFlags;
+ pInfo->aSortFlags[i-iStart] = pItem->fg.sortFlags;
}
}
return pInfo;
@@ -135877,7 +139041,7 @@ static void generateSortTail(
if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
addr = 1 + sqlite3VdbeAddOp2(v, OP_SorterSort, iTab, addrBreak);
VdbeCoverage(v);
- codeOffset(v, p->iOffset, addrContinue);
+ assert( p->iLimit==0 && p->iOffset==0 );
sqlite3VdbeAddOp3(v, OP_SorterData, iTab, regSortOut, iSortTab);
bSeq = 0;
}else{
@@ -135885,10 +139049,13 @@ static void generateSortTail(
codeOffset(v, p->iOffset, addrContinue);
iSortTab = iTab;
bSeq = 1;
+ if( p->iOffset>0 ){
+ sqlite3VdbeAddOp2(v, OP_AddImm, p->iLimit, -1);
+ }
}
for(i=0, iCol=nKey+bSeq-1; i=0; i--){
#ifdef SQLITE_ENABLE_SORTER_REFERENCES
- if( aOutEx[i].bSorterRef ){
+ if( aOutEx[i].fg.bSorterRef ){
sqlite3ExprCode(pParse, aOutEx[i].pExpr, regRow+i);
}else
#endif
@@ -136009,9 +139176,6 @@ static void generateSortTail(
** Return a pointer to a string containing the 'declaration type' of the
** expression pExpr. The string may be treated as static by the caller.
**
-** Also try to estimate the size of the returned value and return that
-** result in *pEstWidth.
-**
** The declaration type is the exact datatype definition extracted from the
** original CREATE TABLE statement if the expression is a column. The
** declaration type for a ROWID field is INTEGER. Exactly when an expression
@@ -136291,7 +139455,7 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames(
assert( p->op!=TK_AGG_COLUMN ); /* Agg processing has not run yet */
assert( p->op!=TK_COLUMN
|| (ExprUseYTab(p) && p->y.pTab!=0) ); /* Covering idx not yet coded */
- if( pEList->a[i].zEName && pEList->a[i].eEName==ENAME_NAME ){
+ if( pEList->a[i].zEName && pEList->a[i].fg.eEName==ENAME_NAME ){
/* An AS clause always takes first priority */
char *zName = pEList->a[i].zEName;
sqlite3VdbeSetColName(v, i, COLNAME_NAME, zName, SQLITE_TRANSIENT);
@@ -136376,22 +139540,25 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
*paCol = aCol;
for(i=0, pCol=aCol; imallocFailed; i++, pCol++){
+ struct ExprList_item *pX = &pEList->a[i];
+ struct ExprList_item *pCollide;
/* Get an appropriate name for the column
*/
- if( (zName = pEList->a[i].zEName)!=0 && pEList->a[i].eEName==ENAME_NAME ){
+ if( (zName = pX->zEName)!=0 && pX->fg.eEName==ENAME_NAME ){
/* If the column contains an "AS " phrase, use as the name */
}else{
- Expr *pColExpr = sqlite3ExprSkipCollateAndLikely(pEList->a[i].pExpr);
+ Expr *pColExpr = sqlite3ExprSkipCollateAndLikely(pX->pExpr);
while( ALWAYS(pColExpr!=0) && pColExpr->op==TK_DOT ){
pColExpr = pColExpr->pRight;
assert( pColExpr!=0 );
}
if( pColExpr->op==TK_COLUMN
&& ALWAYS( ExprUseYTab(pColExpr) )
- && (pTab = pColExpr->y.pTab)!=0
+ && ALWAYS( pColExpr->y.pTab!=0 )
){
/* For columns use the column name name */
int iCol = pColExpr->iColumn;
+ pTab = pColExpr->y.pTab;
if( iCol<0 ) iCol = pTab->iPKey;
zName = iCol>=0 ? pTab->aCol[iCol].zCnName : "rowid";
}else if( pColExpr->op==TK_ID ){
@@ -136399,7 +139566,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
zName = pColExpr->u.zToken;
}else{
/* Use the original text of the column expression as its name */
- zName = pEList->a[i].zEName;
+ assert( zName==pX->zEName ); /* pointer comparison intended */
}
}
if( zName && !sqlite3IsTrueOrFalse(zName) ){
@@ -136412,7 +139579,10 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
** append an integer to the name so that it becomes unique.
*/
cnt = 0;
- while( zName && sqlite3HashFind(&ht, zName)!=0 ){
+ while( zName && (pCollide = sqlite3HashFind(&ht, zName))!=0 ){
+ if( pCollide->fg.bUsingTerm ){
+ pCol->colFlags |= COLFLAG_NOEXPAND;
+ }
nName = sqlite3Strlen30(zName);
if( nName>0 ){
for(j=nName-1; j>0 && sqlite3Isdigit(zName[j]); j--){}
@@ -136423,8 +139593,11 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
}
pCol->zCnName = zName;
pCol->hName = sqlite3StrIHash(zName);
+ if( pX->fg.bNoExpand ){
+ pCol->colFlags |= COLFLAG_NOEXPAND;
+ }
sqlite3ColumnPropertiesFromName(0, pCol);
- if( zName && sqlite3HashInsert(&ht, zName, pCol)==pCol ){
+ if( zName && sqlite3HashInsert(&ht, zName, pX)==pX ){
sqlite3OomFault(db);
}
}
@@ -136681,7 +139854,7 @@ static KeyInfo *multiSelectOrderByKeyInfo(Parse *pParse, Select *p, int nExtra){
}
assert( sqlite3KeyInfoIsWriteable(pRet) );
pRet->aColl[i] = pColl;
- pRet->aSortFlags[i] = pOrderBy->a[i].sortFlags;
+ pRet->aSortFlags[i] = pOrderBy->a[i].fg.sortFlags;
}
}
@@ -136899,7 +140072,7 @@ static int multiSelectOrderBy(
** The "LIMIT of exactly 1" case of condition (1) comes about when a VALUES
** clause occurs within scalar expression (ex: "SELECT (VALUES(1),(2),(3))").
** The sqlite3CodeSubselect will have added the LIMIT 1 clause in tht case.
-** Since the limit is exactly 1, we only need to evalutes the left-most VALUES.
+** Since the limit is exactly 1, we only need to evaluate the left-most VALUES.
*/
static int multiSelectValues(
Parse *pParse, /* Parsing context */
@@ -137868,10 +141041,11 @@ static int multiSelectOrderBy(
*/
sqlite3VdbeResolveLabel(v, labelEnd);
- /* Reassembly the compound query so that it will be freed correctly
- ** by the calling function */
+ /* Make arrangements to free the 2nd and subsequent arms of the compound
+ ** after the parse has finished */
if( pSplit->pPrior ){
- sqlite3SelectDelete(db, pSplit->pPrior);
+ sqlite3ParserAddCleanup(pParse,
+ (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior);
}
pSplit->pPrior = pPrior;
pPrior->pNext = pSplit;
@@ -137892,13 +141066,42 @@ static int multiSelectOrderBy(
**
** All references to columns in table iTable are to be replaced by corresponding
** expressions in pEList.
+**
+** ## About "isOuterJoin":
+**
+** The isOuterJoin column indicates that the replacement will occur into a
+** position in the parent that NULL-able due to an OUTER JOIN. Either the
+** target slot in the parent is the right operand of a LEFT JOIN, or one of
+** the left operands of a RIGHT JOIN. In either case, we need to potentially
+** bypass the substituted expression with OP_IfNullRow.
+**
+** Suppose the original expression is an integer constant. Even though the table
+** has the nullRow flag set, because the expression is an integer constant,
+** it will not be NULLed out. So instead, we insert an OP_IfNullRow opcode
+** that checks to see if the nullRow flag is set on the table. If the nullRow
+** flag is set, then the value in the register is set to NULL and the original
+** expression is bypassed. If the nullRow flag is not set, then the original
+** expression runs to populate the register.
+**
+** Example where this is needed:
+**
+** CREATE TABLE t1(a INTEGER PRIMARY KEY, b INT);
+** CREATE TABLE t2(x INT UNIQUE);
+**
+** SELECT a,b,m,x FROM t1 LEFT JOIN (SELECT 59 AS m,x FROM t2) ON b=x;
+**
+** When the subquery on the right side of the LEFT JOIN is flattened, we
+** have to add OP_IfNullRow in front of the OP_Integer that implements the
+** "m" value of the subquery so that a NULL will be loaded instead of 59
+** when processing a non-matched row of the left.
*/
typedef struct SubstContext {
Parse *pParse; /* The parsing context */
int iTable; /* Replace references to this table */
int iNewTable; /* New table number */
- int isLeftJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */
+ int isOuterJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */
ExprList *pEList; /* Replacement expressions */
+ ExprList *pCList; /* Collation sequences for replacement expr */
} SubstContext;
/* Forward Declarations */
@@ -137923,10 +141126,11 @@ static Expr *substExpr(
Expr *pExpr /* Expr in which substitution occurs */
){
if( pExpr==0 ) return 0;
- if( ExprHasProperty(pExpr, EP_FromJoin)
- && pExpr->w.iRightJoinTable==pSubst->iTable
+ if( ExprHasProperty(pExpr, EP_OuterON|EP_InnerON)
+ && pExpr->w.iJoin==pSubst->iTable
){
- pExpr->w.iRightJoinTable = pSubst->iNewTable;
+ testcase( ExprHasProperty(pExpr, EP_InnerON) );
+ pExpr->w.iJoin = pSubst->iNewTable;
}
if( pExpr->op==TK_COLUMN
&& pExpr->iTable==pSubst->iTable
@@ -137939,19 +141143,21 @@ static Expr *substExpr(
#endif
{
Expr *pNew;
- Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr;
+ int iColumn = pExpr->iColumn;
+ Expr *pCopy = pSubst->pEList->a[iColumn].pExpr;
Expr ifNullRow;
- assert( pSubst->pEList!=0 && pExpr->iColumnpEList->nExpr );
+ assert( pSubst->pEList!=0 && iColumnpEList->nExpr );
assert( pExpr->pRight==0 );
if( sqlite3ExprIsVector(pCopy) ){
sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
}else{
sqlite3 *db = pSubst->pParse->db;
- if( pSubst->isLeftJoin && pCopy->op!=TK_COLUMN ){
+ if( pSubst->isOuterJoin && pCopy->op!=TK_COLUMN ){
memset(&ifNullRow, 0, sizeof(ifNullRow));
ifNullRow.op = TK_IF_NULL_ROW;
ifNullRow.pLeft = pCopy;
ifNullRow.iTable = pSubst->iNewTable;
+ ifNullRow.iColumn = -99;
ifNullRow.flags = EP_IfNullRow;
pCopy = &ifNullRow;
}
@@ -137961,22 +141167,33 @@ static Expr *substExpr(
sqlite3ExprDelete(db, pNew);
return pExpr;
}
- if( pSubst->isLeftJoin ){
+ if( pSubst->isOuterJoin ){
ExprSetProperty(pNew, EP_CanBeNull);
}
- if( ExprHasProperty(pExpr,EP_FromJoin) ){
- sqlite3SetJoinExpr(pNew, pExpr->w.iRightJoinTable);
+ if( ExprHasProperty(pExpr,EP_OuterON|EP_InnerON) ){
+ sqlite3SetJoinExpr(pNew, pExpr->w.iJoin,
+ pExpr->flags & (EP_OuterON|EP_InnerON));
}
sqlite3ExprDelete(db, pExpr);
pExpr = pNew;
+ if( pExpr->op==TK_TRUEFALSE ){
+ pExpr->u.iValue = sqlite3ExprTruthValue(pExpr);
+ pExpr->op = TK_INTEGER;
+ ExprSetProperty(pExpr, EP_IntValue);
+ }
/* Ensure that the expression now has an implicit collation sequence,
** just as it did when it was a column of a view or sub-query. */
- if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){
- CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr);
- pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr,
- (pColl ? pColl->zName : "BINARY")
+ {
+ CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pExpr);
+ CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse,
+ pSubst->pCList->a[iColumn].pExpr
);
+ if( pNat!=pColl || (pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE) ){
+ pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr,
+ (pColl ? pColl->zName : "BINARY")
+ );
+ }
}
ExprClearProperty(pExpr, EP_Collate);
}
@@ -138129,8 +141346,8 @@ static int renumberCursorsCb(Walker *pWalker, Expr *pExpr){
if( op==TK_COLUMN || op==TK_IF_NULL_ROW ){
renumberCursorDoMapping(pWalker, &pExpr->iTable);
}
- if( ExprHasProperty(pExpr, EP_FromJoin) ){
- renumberCursorDoMapping(pWalker, &pExpr->w.iRightJoinTable);
+ if( ExprHasProperty(pExpr, EP_OuterON) ){
+ renumberCursorDoMapping(pWalker, &pExpr->w.iJoin);
}
return WRC_Continue;
}
@@ -138169,6 +141386,18 @@ static void renumberCursors(
}
#endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */
+/*
+** If pSel is not part of a compound SELECT, return a pointer to its
+** expression list. Otherwise, return a pointer to the expression list
+** of the leftmost SELECT in the compound.
+*/
+static ExprList *findLeftmostExprlist(Select *pSel){
+ while( pSel->pPrior ){
+ pSel = pSel->pPrior;
+ }
+ return pSel->pEList;
+}
+
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/*
** This routine attempts to flatten subqueries as a performance optimization.
@@ -138213,8 +141442,10 @@ static void renumberCursors(
** (3a) the subquery may not be a join and
** (3b) the FROM clause of the subquery may not contain a virtual
** table and
-** (3c) the outer query may not be an aggregate.
+** (**) Was: "The outer query may not have a GROUP BY." This case
+** is now managed correctly
** (3d) the outer query may not be DISTINCT.
+** See also (26) for restrictions on RIGHT JOIN.
**
** (4) The subquery can not be DISTINCT.
**
@@ -138266,6 +141497,11 @@ static void renumberCursors(
** (17d2) DISTINCT
** (17e) the subquery may not contain window functions, and
** (17f) the subquery must not be the RHS of a LEFT JOIN.
+** (17g) either the subquery is the first element of the outer
+** query or there are no RIGHT or FULL JOINs in any arm
+** of the subquery. (This is a duplicate of condition (27b).)
+** (17h) The corresponding result set expressions in all arms of the
+** compound must have the same affinity.
**
** The parent and sub-query may contain WHERE clauses. Subject to
** rules (11), (13) and (14), they may also contain ORDER BY,
@@ -138313,6 +141549,17 @@ static void renumberCursors(
** function in the select list or ORDER BY clause, flattening
** is not attempted.
**
+** (26) The subquery may not be the right operand of a RIGHT JOIN.
+** See also (3) for restrictions on LEFT JOIN.
+**
+** (27) The subquery may not contain a FULL or RIGHT JOIN unless it
+** is the first element of the parent query. Two subcases:
+** (27a) the subquery is not a compound query.
+** (27b) the subquery is a compound query and the RIGHT JOIN occurs
+** in any arm of the compound query. (See also (17g).)
+**
+** (28) The subquery is not a MATERIALIZED CTE.
+**
**
** In this routine, the "p" parameter is a pointer to the outer query.
** The subquery is p->pSrc->a[iFrom]. isAgg is true if the outer query
@@ -138338,7 +141585,7 @@ static int flattenSubquery(
SrcList *pSubSrc; /* The FROM clause of the subquery */
int iParent; /* VDBE cursor number of the pSub result set temp table */
int iNewParent = -1;/* Replacement table for iParent */
- int isLeftJoin = 0; /* True if pSub is the right side of a LEFT JOIN */
+ int isOuterJoin = 0; /* True if pSub is the right side of a LEFT JOIN */
int i; /* Loop counter */
Expr *pWhere; /* The WHERE clause */
SrcItem *pSubitem; /* The subquery */
@@ -138404,32 +141651,26 @@ static int flattenSubquery(
**
** which is not at all the same thing.
**
- ** If the subquery is the right operand of a LEFT JOIN, then the outer
- ** query cannot be an aggregate. (3c) This is an artifact of the way
- ** aggregates are processed - there is no mechanism to determine if
- ** the LEFT JOIN table should be all-NULL.
- **
** See also tickets #306, #350, and #3300.
*/
- if( (pSubitem->fg.jointype & JT_OUTER)!=0 ){
- isLeftJoin = 1;
- if( pSubSrc->nSrc>1 /* (3a) */
- || isAgg /* (3b) */
- || IsVirtual(pSubSrc->a[0].pTab) /* (3c) */
- || (p->selFlags & SF_Distinct)!=0 /* (3d) */
+ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){
+ if( pSubSrc->nSrc>1 /* (3a) */
+ || IsVirtual(pSubSrc->a[0].pTab) /* (3b) */
+ || (p->selFlags & SF_Distinct)!=0 /* (3d) */
+ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */
){
return 0;
}
+ isOuterJoin = 1;
}
-#ifdef SQLITE_EXTRA_IFNULLROW
- else if( iFrom>0 && !isAgg ){
- /* Setting isLeftJoin to -1 causes OP_IfNullRow opcodes to be generated for
- ** every reference to any result column from subquery in a join, even
- ** though they are not necessary. This will stress-test the OP_IfNullRow
- ** opcode. */
- isLeftJoin = -1;
+
+ assert( pSubSrc->nSrc>0 ); /* True by restriction (7) */
+ if( iFrom>0 && (pSubSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){
+ return 0; /* Restriction (27a) */
+ }
+ if( pSubitem->fg.isCte && pSubitem->u2.pCteUse->eM10d==M10d_Yes ){
+ return 0; /* (28) */
}
-#endif
/* Restriction (17): If the sub-query is a compound SELECT, then it must
** use only the UNION ALL operator. And none of the simple select queries
@@ -138437,10 +141678,11 @@ static int flattenSubquery(
** queries.
*/
if( pSub->pPrior ){
+ int ii;
if( pSub->pOrderBy ){
return 0; /* Restriction (20) */
}
- if( isAgg || (p->selFlags & SF_Distinct)!=0 || isLeftJoin>0 ){
+ if( isAgg || (p->selFlags & SF_Distinct)!=0 || isOuterJoin>0 ){
return 0; /* (17d1), (17d2), or (17f) */
}
for(pSub1=pSub; pSub1; pSub1=pSub1->pPrior){
@@ -138458,12 +141700,17 @@ static int flattenSubquery(
){
return 0;
}
+ if( iFrom>0 && (pSub1->pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){
+ /* Without this restriction, the JT_LTORJ flag would end up being
+ ** omitted on left-hand tables of the right join that is being
+ ** flattened. */
+ return 0; /* Restrictions (17g), (27b) */
+ }
testcase( pSub1->pSrc->nSrc>1 );
}
/* Restriction (18). */
if( p->pOrderBy ){
- int ii;
for(ii=0; iipOrderBy->nExpr; ii++){
if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0;
}
@@ -138472,8 +141719,24 @@ static int flattenSubquery(
/* Restriction (23) */
if( (p->selFlags & SF_Recursive) ) return 0;
+ /* Restriction (17h) */
+ for(ii=0; iipEList->nExpr; ii++){
+ char aff;
+ assert( pSub->pEList->a[ii].pExpr!=0 );
+ aff = sqlite3ExprAffinity(pSub->pEList->a[ii].pExpr);
+ for(pSub1=pSub->pPrior; pSub1; pSub1=pSub1->pPrior){
+ assert( pSub1->pEList!=0 );
+ assert( pSub1->pEList->nExpr>ii );
+ assert( pSub1->pEList->a[ii].pExpr!=0 );
+ if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){
+ return 0;
+ }
+ }
+ }
+
if( pSrc->nSrc>1 ){
if( pParse->nSelect>500 ) return 0;
+ if( OptimizationDisabled(db, SQLITE_FlttnUnionAll) ) return 0;
aCsrMap = sqlite3DbMallocZero(db, ((i64)pParse->nTab+1)*sizeof(int));
if( aCsrMap ) aCsrMap[0] = pParse->nTab;
}
@@ -138498,7 +141761,7 @@ static int flattenSubquery(
pSubitem->zName = 0;
pSubitem->zAlias = 0;
pSubitem->pSelect = 0;
- assert( pSubitem->pOn==0 );
+ assert( pSubitem->fg.isUsing!=0 || pSubitem->u3.pOn==0 );
/* If the sub-query is a compound SELECT statement, then (by restrictions
** 17 and 18 above) it must be a UNION ALL and the parent query must
@@ -138608,6 +141871,7 @@ static int flattenSubquery(
for(pParent=p; pParent; pParent=pParent->pPrior, pSub=pSub->pPrior){
int nSubSrc;
u8 jointype = 0;
+ u8 ltorj = pSrc->a[iFrom].fg.jointype & JT_LTORJ;
assert( pSub!=0 );
pSubSrc = pSub->pSrc; /* FROM clause of subquery */
nSubSrc = pSubSrc->nSrc; /* Number of terms in subquery FROM clause */
@@ -138642,13 +141906,16 @@ static int flattenSubquery(
** outer query.
*/
for(i=0; ia[i+iFrom].pUsing);
- assert( pSrc->a[i+iFrom].fg.isTabFunc==0 );
- pSrc->a[i+iFrom] = pSubSrc->a[i];
+ SrcItem *pItem = &pSrc->a[i+iFrom];
+ if( pItem->fg.isUsing ) sqlite3IdListDelete(db, pItem->u3.pUsing);
+ assert( pItem->fg.isTabFunc==0 );
+ *pItem = pSubSrc->a[i];
+ pItem->fg.jointype |= ltorj;
iNewParent = pSubSrc->a[i].iCursor;
memset(&pSubSrc->a[i], 0, sizeof(pSubSrc->a[i]));
}
- pSrc->a[iFrom].fg.jointype = jointype;
+ pSrc->a[iFrom].fg.jointype &= JT_LTORJ;
+ pSrc->a[iFrom].fg.jointype |= jointype | ltorj;
/* Now begin substituting subquery result set expressions for
** references to the iParent in the outer query.
@@ -138683,8 +141950,8 @@ static int flattenSubquery(
}
pWhere = pSub->pWhere;
pSub->pWhere = 0;
- if( isLeftJoin>0 ){
- sqlite3SetJoinExpr(pWhere, iNewParent);
+ if( isOuterJoin>0 ){
+ sqlite3SetJoinExpr(pWhere, iNewParent, EP_OuterON);
}
if( pWhere ){
if( pParent->pWhere ){
@@ -138698,8 +141965,9 @@ static int flattenSubquery(
x.pParse = pParse;
x.iTable = iParent;
x.iNewTable = iNewParent;
- x.isLeftJoin = isLeftJoin;
+ x.isOuterJoin = isOuterJoin;
x.pEList = pSub->pEList;
+ x.pCList = findLeftmostExprlist(pSub);
substSelect(&x, pParent, 0);
}
@@ -138719,7 +141987,7 @@ static int flattenSubquery(
pSub->pLimit = 0;
}
- /* Recompute the SrcList_item.colUsed masks for the flattened
+ /* Recompute the SrcItem.colUsed masks for the flattened
** tables. */
for(i=0; ia[i+iFrom]);
@@ -138733,8 +142001,8 @@ static int flattenSubquery(
sqlite3WalkSelect(&w,pSub1);
sqlite3SelectDelete(db, pSub1);
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x100 ){
SELECTTRACE(0x100,pParse,p,("After flattening:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -138755,6 +142023,8 @@ struct WhereConst {
int nConst; /* Number for COLUMN=CONSTANT terms */
int nChng; /* Number of times a constant is propagated */
int bHasAffBlob; /* At least one column in apExpr[] as affinity BLOB */
+ u32 mExcludeOn; /* Which ON expressions to exclude from considertion.
+ ** Either EP_OuterON or EP_InnerON|EP_OuterON */
Expr **apExpr; /* [i*2] is COLUMN and [i*2+1] is VALUE */
};
@@ -138817,7 +142087,11 @@ static void constInsert(
static void findConstInWhere(WhereConst *pConst, Expr *pExpr){
Expr *pRight, *pLeft;
if( NEVER(pExpr==0) ) return;
- if( ExprHasProperty(pExpr, EP_FromJoin) ) return;
+ if( ExprHasProperty(pExpr, pConst->mExcludeOn) ){
+ testcase( ExprHasProperty(pExpr, EP_OuterON) );
+ testcase( ExprHasProperty(pExpr, EP_InnerON) );
+ return;
+ }
if( pExpr->op==TK_AND ){
findConstInWhere(pConst, pExpr->pRight);
findConstInWhere(pConst, pExpr->pLeft);
@@ -138853,9 +142127,10 @@ static int propagateConstantExprRewriteOne(
int i;
if( pConst->pOomFault[0] ) return WRC_Prune;
if( pExpr->op!=TK_COLUMN ) return WRC_Continue;
- if( ExprHasProperty(pExpr, EP_FixedCol|EP_FromJoin) ){
+ if( ExprHasProperty(pExpr, EP_FixedCol|pConst->mExcludeOn) ){
testcase( ExprHasProperty(pExpr, EP_FixedCol) );
- testcase( ExprHasProperty(pExpr, EP_FromJoin) );
+ testcase( ExprHasProperty(pExpr, EP_OuterON) );
+ testcase( ExprHasProperty(pExpr, EP_InnerON) );
return WRC_Continue;
}
for(i=0; inConst; i++){
@@ -138979,6 +142254,17 @@ static int propagateConstants(
x.nChng = 0;
x.apExpr = 0;
x.bHasAffBlob = 0;
+ if( ALWAYS(p->pSrc!=0)
+ && p->pSrc->nSrc>0
+ && (p->pSrc->a[0].fg.jointype & JT_LTORJ)!=0
+ ){
+ /* Do not propagate constants on any ON clause if there is a
+ ** RIGHT JOIN anywhere in the query */
+ x.mExcludeOn = EP_InnerON | EP_OuterON;
+ }else{
+ /* Do not propagate constants through the ON clause of a LEFT JOIN */
+ x.mExcludeOn = EP_OuterON;
+ }
findConstInWhere(&x, p->pWhere);
if( x.nConst ){
memset(&w, 0, sizeof(w));
@@ -139091,6 +142377,13 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){
** be materialized. (This restriction is implemented in the calling
** routine.)
**
+** (8) The subquery may not be a compound that uses UNION, INTERSECT,
+** or EXCEPT. (We could, perhaps, relax this restriction to allow
+** this case if none of the comparisons operators between left and
+** right arms of the compound use a collation other than BINARY.
+** But it is a lot of work to check that case for an obscure and
+** minor optimization, so we omit it for now.)
+**
** Return 0 if no changes are made and non-zero if one or more WHERE clause
** terms are duplicated into the subquery.
*/
@@ -139104,11 +142397,16 @@ static int pushDownWhereTerms(
int nChng = 0;
if( pWhere==0 ) return 0;
if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0;
+ if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ) return 0;
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSubq->pPrior ){
Select *pSel;
for(pSel=pSubq; pSel; pSel=pSel->pPrior){
+ u8 op = pSel->op;
+ assert( op==TK_ALL || op==TK_SELECT
+ || op==TK_UNION || op==TK_INTERSECT || op==TK_EXCEPT );
+ if( op!=TK_ALL && op!=TK_SELECT ) return 0; /* restriction (8) */
if( pSel->pWin ) return 0; /* restriction (6b) */
}
}else{
@@ -139139,13 +142437,13 @@ static int pushDownWhereTerms(
#if 0 /* Legacy code. Checks now done by sqlite3ExprIsTableConstraint() */
if( isLeftJoin
- && (ExprHasProperty(pWhere,EP_FromJoin)==0
- || pWhere->w.iRightJoinTable!=iCursor)
+ && (ExprHasProperty(pWhere,EP_OuterON)==0
+ || pWhere->w.iJoin!=iCursor)
){
return 0; /* restriction (4) */
}
- if( ExprHasProperty(pWhere,EP_FromJoin)
- && pWhere->w.iRightJoinTable!=iCursor
+ if( ExprHasProperty(pWhere,EP_OuterON)
+ && pWhere->w.iJoin!=iCursor
){
return 0; /* restriction (5) */
}
@@ -139157,12 +142455,13 @@ static int pushDownWhereTerms(
while( pSubq ){
SubstContext x;
pNew = sqlite3ExprDup(pParse->db, pWhere, 0);
- unsetJoinExpr(pNew, -1);
+ unsetJoinExpr(pNew, -1, 1);
x.pParse = pParse;
x.iTable = pSrc->iCursor;
x.iNewTable = pSrc->iCursor;
- x.isLeftJoin = 0;
+ x.isOuterJoin = 0;
x.pEList = pSubq->pEList;
+ x.pCList = findLeftmostExprlist(pSubq);
pNew = substExpr(&x, pNew);
#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSubq->pWin && 0==pushDownWindowCheck(pParse, pSubq, pNew) ){
@@ -139234,7 +142533,7 @@ static u8 minMaxQuery(sqlite3 *db, Expr *pFunc, ExprList **ppMinMax){
}
*ppMinMax = pOrderBy = sqlite3ExprListDup(db, pEList, 0);
assert( pOrderBy!=0 || db->mallocFailed );
- if( pOrderBy ) pOrderBy->a[0].sortFlags = sortFlags;
+ if( pOrderBy ) pOrderBy->a[0].fg.sortFlags = sortFlags;
return eRet;
}
@@ -139266,6 +142565,7 @@ static Table *isSimpleCount(Select *p, AggInfo *pAggInfo){
|| p->pSrc->nSrc!=1
|| p->pSrc->a[0].pSelect
|| pAggInfo->nFunc!=1
+ || p->pHaving
){
return 0;
}
@@ -139370,7 +142670,7 @@ static int convertCompoundSelectToSubquery(Walker *pWalker, Select *p){
pNew = sqlite3DbMallocZero(db, sizeof(*pNew) );
if( pNew==0 ) return WRC_Abort;
memset(&dummy, 0, sizeof(dummy));
- pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0,0);
+ pNewSrc = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&dummy,pNew,0);
if( pNewSrc==0 ) return WRC_Abort;
*pNew = *p;
p->pSrc = pNewSrc;
@@ -139686,9 +142986,9 @@ SQLITE_PRIVATE void sqlite3SelectPopWith(Walker *pWalker, Select *p){
#endif
/*
-** The SrcList_item structure passed as the second argument represents a
+** The SrcItem structure passed as the second argument represents a
** sub-query in the FROM clause of a SELECT statement. This function
-** allocates and populates the SrcList_item.pTab object. If successful,
+** allocates and populates the SrcItem.pTab object. If successful,
** SQLITE_OK is returned. Otherwise, if an OOM error is encountered,
** SQLITE_NOMEM.
*/
@@ -139703,7 +143003,7 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){
if( pFrom->zAlias ){
pTab->zName = sqlite3DbStrDup(pParse->db, pFrom->zAlias);
}else{
- pTab->zName = sqlite3MPrintf(pParse->db, "subquery_%u", pSel->selId);
+ pTab->zName = sqlite3MPrintf(pParse->db, "%!S", pFrom);
}
while( pSel->pPrior ){ pSel = pSel->pPrior; }
sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
@@ -139715,11 +143015,35 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){
#else
pTab->tabFlags |= TF_Ephemeral; /* Legacy compatibility mode */
#endif
+ return pParse->nErr ? SQLITE_ERROR : SQLITE_OK;
+}
- return pParse->nErr ? SQLITE_ERROR : SQLITE_OK;
+/*
+** Check the N SrcItem objects to the right of pBase. (N might be zero!)
+** If any of those SrcItem objects have a USING clause containing zName
+** then return true.
+**
+** If N is zero, or none of the N SrcItem objects to the right of pBase
+** contains a USING clause, or if none of the USING clauses contain zName,
+** then return false.
+*/
+static int inAnyUsingClause(
+ const char *zName, /* Name we are looking for */
+ SrcItem *pBase, /* The base SrcItem. Looking at pBase[1] and following */
+ int N /* How many SrcItems to check */
+){
+ while( N>0 ){
+ N--;
+ pBase++;
+ if( pBase->fg.isUsing==0 ) continue;
+ if( NEVER(pBase->u3.pUsing==0) ) continue;
+ if( sqlite3IdListIndex(pBase->u3.pUsing, zName)>=0 ) return 1;
+ }
+ return 0;
}
+
/*
** This routine is a Walker callback for "expanding" a SELECT statement.
** "Expanding" means to do the following:
@@ -139869,7 +143193,7 @@ static int selectExpander(Walker *pWalker, Select *p){
/* Process NATURAL keywords, and ON and USING clauses of joins.
*/
assert( db->mallocFailed==0 || pParse->nErr!=0 );
- if( pParse->nErr || sqliteProcessJoin(pParse, p) ){
+ if( pParse->nErr || sqlite3ProcessJoin(pParse, p) ){
return WRC_Abort;
}
@@ -139917,7 +143241,7 @@ static int selectExpander(Walker *pWalker, Select *p){
pNew = sqlite3ExprListAppend(pParse, pNew, a[k].pExpr);
if( pNew ){
pNew->a[pNew->nExpr-1].zEName = a[k].zEName;
- pNew->a[pNew->nExpr-1].eEName = a[k].eEName;
+ pNew->a[pNew->nExpr-1].fg.eEName = a[k].fg.eEName;
a[k].zEName = 0;
}
a[k].pExpr = 0;
@@ -139932,32 +143256,60 @@ static int selectExpander(Walker *pWalker, Select *p){
zTName = pE->pLeft->u.zToken;
}
for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){
- Table *pTab = pFrom->pTab;
- Select *pSub = pFrom->pSelect;
- char *zTabName = pFrom->zAlias;
- const char *zSchemaName = 0;
- int iDb;
- if( zTabName==0 ){
+ Table *pTab = pFrom->pTab; /* Table for this data source */
+ ExprList *pNestedFrom; /* Result-set of a nested FROM clause */
+ char *zTabName; /* AS name for this data source */
+ const char *zSchemaName = 0; /* Schema name for this data source */
+ int iDb; /* Schema index for this data src */
+ IdList *pUsing; /* USING clause for pFrom[1] */
+
+ if( (zTabName = pFrom->zAlias)==0 ){
zTabName = pTab->zName;
}
if( db->mallocFailed ) break;
- if( pSub==0 || (pSub->selFlags & SF_NestedFrom)==0 ){
- pSub = 0;
+ assert( (int)pFrom->fg.isNestedFrom == IsNestedFrom(pFrom->pSelect) );
+ if( pFrom->fg.isNestedFrom ){
+ assert( pFrom->pSelect!=0 );
+ pNestedFrom = pFrom->pSelect->pEList;
+ assert( pNestedFrom!=0 );
+ assert( pNestedFrom->nExpr==pTab->nCol );
+ }else{
if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){
continue;
}
+ pNestedFrom = 0;
iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
zSchemaName = iDb>=0 ? db->aDb[iDb].zDbSName : "*";
}
+ if( i+1nSrc
+ && pFrom[1].fg.isUsing
+ && (selFlags & SF_NestedFrom)!=0
+ ){
+ int ii;
+ pUsing = pFrom[1].u3.pUsing;
+ for(ii=0; iinId; ii++){
+ const char *zUName = pUsing->a[ii].zName;
+ pRight = sqlite3Expr(db, TK_ID, zUName);
+ pNew = sqlite3ExprListAppend(pParse, pNew, pRight);
+ if( pNew ){
+ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
+ assert( pX->zEName==0 );
+ pX->zEName = sqlite3MPrintf(db,"..%s", zUName);
+ pX->fg.eEName = ENAME_TAB;
+ pX->fg.bUsingTerm = 1;
+ }
+ }
+ }else{
+ pUsing = 0;
+ }
for(j=0; jnCol; j++){
char *zName = pTab->aCol[j].zCnName;
- char *zColname; /* The computed column name */
- char *zToFree; /* Malloced string that needs to be freed */
- Token sColname; /* Computed column name as a token */
+ struct ExprList_item *pX; /* Newly added ExprList term */
assert( zName );
- if( zTName && pSub
- && sqlite3MatchEName(&pSub->pEList->a[j], 0, zTName, 0)==0
+ if( zTName
+ && pNestedFrom
+ && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0)==0
){
continue;
}
@@ -139971,57 +143323,75 @@ static int selectExpander(Walker *pWalker, Select *p){
){
continue;
}
+ if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
+ && zTName==0
+ && (selFlags & (SF_NestedFrom))==0
+ ){
+ continue;
+ }
tableSeen = 1;
- if( i>0 && zTName==0 ){
- if( (pFrom->fg.jointype & JT_NATURAL)!=0
- && tableAndColumnIndex(pTabList, i, zName, 0, 0, 1)
+ if( i>0 && zTName==0 && (selFlags & SF_NestedFrom)==0 ){
+ if( pFrom->fg.isUsing
+ && sqlite3IdListIndex(pFrom->u3.pUsing, zName)>=0
){
- /* In a NATURAL join, omit the join columns from the
- ** table to the right of the join */
- continue;
- }
- if( sqlite3IdListIndex(pFrom->pUsing, zName)>=0 ){
/* In a join with a USING clause, omit columns in the
** using clause from the table on the right. */
continue;
}
}
pRight = sqlite3Expr(db, TK_ID, zName);
- zColname = zName;
- zToFree = 0;
- if( longNames || pTabList->nSrc>1 ){
+ if( (pTabList->nSrc>1
+ && ( (pFrom->fg.jointype & JT_LTORJ)==0
+ || (selFlags & SF_NestedFrom)!=0
+ || !inAnyUsingClause(zName,pFrom,pTabList->nSrc-i-1)
+ )
+ )
+ || IN_RENAME_OBJECT
+ ){
Expr *pLeft;
pLeft = sqlite3Expr(db, TK_ID, zTabName);
pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
+ if( IN_RENAME_OBJECT && pE->pLeft ){
+ sqlite3RenameTokenRemap(pParse, pLeft, pE->pLeft);
+ }
if( zSchemaName ){
pLeft = sqlite3Expr(db, TK_ID, zSchemaName);
pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr);
}
- if( longNames ){
- zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName);
- zToFree = zColname;
- }
}else{
pExpr = pRight;
}
pNew = sqlite3ExprListAppend(pParse, pNew, pExpr);
- sqlite3TokenInit(&sColname, zColname);
- sqlite3ExprListSetName(pParse, pNew, &sColname, 0);
- if( pNew && (p->selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){
- struct ExprList_item *pX = &pNew->a[pNew->nExpr-1];
- sqlite3DbFree(db, pX->zEName);
- if( pSub ){
- pX->zEName = sqlite3DbStrDup(db, pSub->pEList->a[j].zEName);
+ if( pNew==0 ){
+ break; /* OOM */
+ }
+ pX = &pNew->a[pNew->nExpr-1];
+ assert( pX->zEName==0 );
+ if( (selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){
+ if( pNestedFrom ){
+ pX->zEName = sqlite3DbStrDup(db, pNestedFrom->a[j].zEName);
testcase( pX->zEName==0 );
}else{
pX->zEName = sqlite3MPrintf(db, "%s.%s.%s",
- zSchemaName, zTabName, zColname);
+ zSchemaName, zTabName, zName);
testcase( pX->zEName==0 );
}
- pX->eEName = ENAME_TAB;
+ pX->fg.eEName = ENAME_TAB;
+ if( (pFrom->fg.isUsing
+ && sqlite3IdListIndex(pFrom->u3.pUsing, zName)>=0)
+ || (pUsing && sqlite3IdListIndex(pUsing, zName)>=0)
+ || (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0
+ ){
+ pX->fg.bNoExpand = 1;
+ }
+ }else if( longNames ){
+ pX->zEName = sqlite3MPrintf(db, "%s.%s", zTabName, zName);
+ pX->fg.eEName = ENAME_NAME;
+ }else{
+ pX->zEName = sqlite3DbStrDup(db, zName);
+ pX->fg.eEName = ENAME_NAME;
}
- sqlite3DbFree(db, zToFree);
}
}
if( !tableSeen ){
@@ -140045,6 +143415,12 @@ static int selectExpander(Walker *pWalker, Select *p){
p->selFlags |= SF_ComplexResult;
}
}
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x100 ){
+ SELECTTRACE(0x100,pParse,p,("After result-set wildcard expansion:\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ }
+#endif
return WRC_Continue;
}
@@ -140435,8 +143811,8 @@ static void havingToWhere(Parse *pParse, Select *p){
sWalker.xExprCallback = havingToWhereExprCb;
sWalker.u.pSelect = p;
sqlite3WalkExpr(&sWalker, p->pHaving);
-#if SELECTTRACE_ENABLED
- if( sWalker.eCode && (sqlite3SelectTrace & 0x100)!=0 ){
+#if TREETRACE_ENABLED
+ if( sWalker.eCode && (sqlite3TreeTrace & 0x100)!=0 ){
SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -140445,7 +143821,7 @@ static void havingToWhere(Parse *pParse, Select *p){
/*
** Check to see if the pThis entry of pTabList is a self-join of a prior view.
-** If it is, then return the SrcList_item for the prior view. If it is not,
+** If it is, then return the SrcItem for the prior view. If it is not,
** then return 0.
*/
static SrcItem *isSelfJoinView(
@@ -140568,8 +143944,8 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
p->pEList->a[0].pExpr = pExpr;
p->selFlags &= ~SF_Aggregate;
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x400 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x400 ){
SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -140578,6 +143954,29 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
}
#endif /* SQLITE_COUNTOFVIEW_OPTIMIZATION */
+/*
+** If any term of pSrc, or any SF_NestedFrom sub-query, is not the same
+** as pSrcItem but has the same alias as p0, then return true.
+** Otherwise return false.
+*/
+static int sameSrcAlias(SrcItem *p0, SrcList *pSrc){
+ int i;
+ for(i=0; inSrc; i++){
+ SrcItem *p1 = &pSrc->a[i];
+ if( p1==p0 ) continue;
+ if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){
+ return 1;
+ }
+ if( p1->pSelect
+ && (p1->pSelect->selFlags & SF_NestedFrom)!=0
+ && sameSrcAlias(p0, p1->pSelect->pSrc)
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
/*
** Generate code for the SELECT statement given in the p argument.
**
@@ -140622,10 +144021,14 @@ SQLITE_PRIVATE int sqlite3Select(
}
assert( db->mallocFailed==0 );
if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
-#if SELECTTRACE_ENABLED
+#if TREETRACE_ENABLED
SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain));
- if( sqlite3SelectTrace & 0x100 ){
- sqlite3TreeViewSelect(0, p, 0);
+ if( sqlite3TreeTrace & 0x10100 ){
+ if( (sqlite3TreeTrace & 0x10001)==0x10000 ){
+ sqlite3TreeViewLine(0, "In sqlite3Select() at %s:%d",
+ __FILE__, __LINE__);
+ }
+ sqlite3ShowSelect(p);
}
#endif
@@ -140639,9 +144042,9 @@ SQLITE_PRIVATE int sqlite3Select(
pDest->eDest==SRT_DistQueue || pDest->eDest==SRT_DistFifo );
/* All of these destinations are also able to ignore the ORDER BY clause */
if( p->pOrderBy ){
-#if SELECTTRACE_ENABLED
+#if TREETRACE_ENABLED
SELECTTRACE(1,pParse,p, ("dropping superfluous ORDER BY:\n"));
- if( sqlite3SelectTrace & 0x100 ){
+ if( sqlite3TreeTrace & 0x100 ){
sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY");
}
#endif
@@ -140660,8 +144063,8 @@ SQLITE_PRIVATE int sqlite3Select(
}
assert( db->mallocFailed==0 );
assert( p->pEList!=0 );
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x104 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x104 ){
SELECTTRACE(0x104,pParse,p, ("after name resolution:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -140678,15 +144081,12 @@ SQLITE_PRIVATE int sqlite3Select(
** disallow it altogether. */
if( p->selFlags & SF_UFSrcCheck ){
SrcItem *p0 = &p->pSrc->a[0];
- for(i=1; ipSrc->nSrc; i++){
- SrcItem *p1 = &p->pSrc->a[i];
- if( p0->pTab==p1->pTab && 0==sqlite3_stricmp(p0->zAlias, p1->zAlias) ){
- sqlite3ErrorMsg(pParse,
- "target object/alias may not appear in FROM clause: %s",
- p0->zAlias ? p0->zAlias : p0->pTab->zName
- );
- goto select_end;
- }
+ if( sameSrcAlias(p0, p->pSrc) ){
+ sqlite3ErrorMsg(pParse,
+ "target object/alias may not appear in FROM clause: %s",
+ p0->zAlias ? p0->zAlias : p0->pTab->zName
+ );
+ goto select_end;
}
/* Clear the SF_UFSrcCheck flag. The check has already been performed,
@@ -140705,8 +144105,8 @@ SQLITE_PRIVATE int sqlite3Select(
assert( pParse->nErr );
goto select_end;
}
-#if SELECTTRACE_ENABLED
- if( p->pWin && (sqlite3SelectTrace & 0x108)!=0 ){
+#if TREETRACE_ENABLED
+ if( p->pWin && (sqlite3TreeTrace & 0x108)!=0 ){
SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -140734,14 +144134,16 @@ SQLITE_PRIVATE int sqlite3Select(
/* Convert LEFT JOIN into JOIN if there are terms of the right table
** of the LEFT JOIN used in the WHERE clause.
*/
- if( (pItem->fg.jointype & JT_LEFT)!=0
+ if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==JT_LEFT
&& sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor)
&& OptimizationEnabled(db, SQLITE_SimplifyJoin)
){
SELECTTRACE(0x100,pParse,p,
("LEFT-JOIN simplifies to JOIN on term %d\n",i));
pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
- unsetJoinExpr(p->pWhere, pItem->iCursor);
+ assert( pItem->iCursor>=0 );
+ unsetJoinExpr(p->pWhere, pItem->iCursor,
+ pTabList->a[0].fg.jointype & JT_LTORJ);
}
/* No futher action if this term of the FROM clause is no a subquery */
@@ -140794,7 +144196,9 @@ SQLITE_PRIVATE int sqlite3Select(
){
SELECTTRACE(0x100,pParse,p,
("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1));
- sqlite3ExprListDelete(db, pSub->pOrderBy);
+ sqlite3ParserAddCleanup(pParse,
+ (void(*)(sqlite3*,void*))sqlite3ExprListDelete,
+ pSub->pOrderBy);
pSub->pOrderBy = 0;
}
@@ -140820,7 +144224,7 @@ SQLITE_PRIVATE int sqlite3Select(
&& i==0
&& (p->selFlags & SF_ComplexResult)!=0
&& (pTabList->nSrc==1
- || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0)
+ || (pTabList->a[1].fg.jointype&(JT_OUTER|JT_CROSS))!=0)
){
continue;
}
@@ -140844,9 +144248,9 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( p->pPrior ){
rc = multiSelect(pParse, p, pDest);
-#if SELECTTRACE_ENABLED
+#if TREETRACE_ENABLED
SELECTTRACE(0x1,pParse,p,("end compound-select processing\n"));
- if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
+ if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -140865,8 +144269,8 @@ SQLITE_PRIVATE int sqlite3Select(
&& OptimizationEnabled(db, SQLITE_PropagateConst)
&& propagateConstants(pParse, p)
){
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x100 ){
SELECTTRACE(0x100,pParse,p,("After constant propagation:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -140944,8 +144348,8 @@ SQLITE_PRIVATE int sqlite3Select(
|| (pItem->u2.pCteUse->eM10d!=M10d_Yes && pItem->u2.pCteUse->nUse<2))
&& pushDownWhereTerms(pParse, pSub, p->pWhere, pItem)
){
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x100 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x100 ){
SELECTTRACE(0x100,pParse,p,
("After WHERE-clause push-down into subquery %d:\n", pSub->selId));
sqlite3TreeViewSelect(0, p, 0);
@@ -140961,18 +144365,19 @@ SQLITE_PRIVATE int sqlite3Select(
/* Generate code to implement the subquery
**
- ** The subquery is implemented as a co-routine if:
+ ** The subquery is implemented as a co-routine if all of the following are
+ ** true:
+ **
** (1) the subquery is guaranteed to be the outer loop (so that
** it does not need to be computed more than once), and
** (2) the subquery is not a CTE that should be materialized
- **
- ** TODO: Are there other reasons beside (1) and (2) to use a co-routine
- ** implementation?
+ ** (3) the subquery is not part of a left operand for a RIGHT JOIN
*/
if( i==0
&& (pTabList->nSrc==1
- || (pTabList->a[1].fg.jointype&(JT_LEFT|JT_CROSS))!=0) /* (1) */
- && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */
+ || (pTabList->a[1].fg.jointype&(JT_OUTER|JT_CROSS))!=0) /* (1) */
+ && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */
+ && (pTabList->a[0].fg.jointype & JT_LTORJ)==0 /* (3) */
){
/* Implement a co-routine that will return a single row of the result
** set on each invocation.
@@ -141018,11 +144423,11 @@ SQLITE_PRIVATE int sqlite3Select(
** the same view can reuse the materialization. */
int topAddr;
int onceAddr = 0;
- int retAddr;
pItem->regReturn = ++pParse->nMem;
- topAddr = sqlite3VdbeAddOp2(v, OP_Integer, 0, pItem->regReturn);
+ topAddr = sqlite3VdbeAddOp0(v, OP_Goto);
pItem->addrFillSub = topAddr+1;
+ pItem->fg.isMaterialized = 1;
if( pItem->fg.isCorrelated==0 ){
/* If the subquery is not correlated and if we are not inside of
** a trigger, then we only need to compute the value of the subquery
@@ -141034,12 +144439,15 @@ SQLITE_PRIVATE int sqlite3Select(
}
sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor);
ExplainQueryPlan((pParse, 1, "MATERIALIZE %!S", pItem));
+ dest.zAffSdst = sqlite3TableAffinityStr(db, pItem->pTab);
sqlite3Select(pParse, pSub, &dest);
+ sqlite3DbFree(db, dest.zAffSdst);
+ dest.zAffSdst = 0;
pItem->pTab->nRowLogEst = pSub->nSelectRow;
if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr);
- retAddr = sqlite3VdbeAddOp1(v, OP_Return, pItem->regReturn);
+ sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1);
VdbeComment((v, "end %!S", pItem));
- sqlite3VdbeChangeP1(v, topAddr, retAddr);
+ sqlite3VdbeJumpHere(v, topAddr);
sqlite3ClearTempRegCache(pParse);
if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){
CteUse *pCteUse = pItem->u2.pCteUse;
@@ -141063,8 +144471,8 @@ SQLITE_PRIVATE int sqlite3Select(
pHaving = p->pHaving;
sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0;
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x400 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x400 ){
SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -141098,9 +144506,10 @@ SQLITE_PRIVATE int sqlite3Select(
** the sDistinct.isTnct is still set. Hence, isTnct represents the
** original setting of the SF_Distinct flag, not the current setting */
assert( sDistinct.isTnct );
+ sDistinct.isTnct = 2;
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x400 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x400 ){
SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
@@ -141133,6 +144542,18 @@ SQLITE_PRIVATE int sqlite3Select(
*/
if( pDest->eDest==SRT_EphemTab ){
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pDest->iSDParm, pEList->nExpr);
+ if( p->selFlags & SF_NestedFrom ){
+ /* Delete or NULL-out result columns that will never be used */
+ int ii;
+ for(ii=pEList->nExpr-1; ii>0 && pEList->a[ii].fg.bUsed==0; ii--){
+ sqlite3ExprDelete(db, pEList->a[ii].pExpr);
+ sqlite3DbFree(db, pEList->a[ii].zEName);
+ pEList->nExpr--;
+ }
+ for(ii=0; iinExpr; ii++){
+ if( pEList->a[ii].fg.bUsed==0 ) pEList->a[ii].pExpr->op = TK_NULL;
+ }
+ }
}
/* Set the limiter.
@@ -141141,7 +144562,7 @@ SQLITE_PRIVATE int sqlite3Select(
if( (p->selFlags & SF_FixedLimit)==0 ){
p->nSelectRow = 320; /* 4 billion rows */
}
- computeLimitRegisters(pParse, p, iEnd);
+ if( p->pLimit ) computeLimitRegisters(pParse, p, iEnd);
if( p->iLimit==0 && sSort.addrSortIndex>=0 ){
sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen);
sSort.sortFlags |= SORTFLAG_UseSorter;
@@ -141282,8 +144703,9 @@ SQLITE_PRIVATE int sqlite3Select(
** ORDER BY to maximize the chances of rows being delivered in an
** order that makes the ORDER BY redundant. */
for(ii=0; iinExpr; ii++){
- u8 sortFlags = sSort.pOrderBy->a[ii].sortFlags & KEYINFO_ORDER_DESC;
- pGroupBy->a[ii].sortFlags = sortFlags;
+ u8 sortFlags;
+ sortFlags = sSort.pOrderBy->a[ii].fg.sortFlags & KEYINFO_ORDER_DESC;
+ pGroupBy->a[ii].fg.sortFlags = sortFlags;
}
if( sqlite3ExprListCompare(pGroupBy, sSort.pOrderBy, -1)==0 ){
orderByGrp = 1;
@@ -141352,8 +144774,8 @@ SQLITE_PRIVATE int sqlite3Select(
}
pAggInfo->mxReg = pParse->nMem;
if( db->mallocFailed ) goto select_end;
-#if SELECTTRACE_ENABLED
- if( sqlite3SelectTrace & 0x400 ){
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x400 ){
int ii;
SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo));
sqlite3TreeViewSelect(0, p, 0);
@@ -141362,8 +144784,13 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3TreeViewExprList(0, pMinMaxOrderBy, 0, "ORDERBY");
}
for(ii=0; iinColumn; ii++){
- sqlite3DebugPrintf("agg-column[%d] iMem=%d\n",
- ii, pAggInfo->aCol[ii].iMem);
+ struct AggInfo_col *pCol = &pAggInfo->aCol[ii];
+ sqlite3DebugPrintf(
+ "agg-column[%d] pTab=%s iTable=%d iColumn=%d iMem=%d"
+ " iSorterColumn=%d\n",
+ ii, pCol->pTab ? pCol->pTab->zName : "NULL",
+ pCol->iTable, pCol->iColumn, pCol->iMem,
+ pCol->iSorterColumn);
sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0);
}
for(ii=0; iinFunc; ii++){
@@ -141441,7 +144868,8 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
SELECTTRACE(1,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct,
- 0, (WHERE_GROUPBY|(orderByGrp ? WHERE_SORTBYGROUP : 0)|distFlag), 0
+ p, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY)
+ | (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0
);
if( pWInfo==0 ){
sqlite3ExprListDelete(db, pDistinct);
@@ -141483,15 +144911,15 @@ SQLITE_PRIVATE int sqlite3Select(
regBase = sqlite3GetTempRange(pParse, nCol);
sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0);
j = nGroupBy;
+ pAggInfo->directMode = 1;
for(i=0; inColumn; i++){
struct AggInfo_col *pCol = &pAggInfo->aCol[i];
if( pCol->iSorterColumn>=j ){
- int r1 = j + regBase;
- sqlite3ExprCodeGetColumnOfTable(v,
- pCol->pTab, pCol->iTable, pCol->iColumn, r1);
+ sqlite3ExprCode(pParse, pCol->pCExpr, j + regBase);
j++;
}
}
+ pAggInfo->directMode = 0;
regRecord = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord);
sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord);
@@ -141623,7 +145051,7 @@ SQLITE_PRIVATE int sqlite3Select(
VdbeComment((v, "indicate accumulator empty"));
sqlite3VdbeAddOp1(v, OP_Return, regReset);
- if( eDist!=WHERE_DISTINCT_NOOP ){
+ if( distFlag!=0 && eDist!=WHERE_DISTINCT_NOOP ){
struct AggInfo_func *pF = &pAggInfo->aFunc[0];
fixDistinctOpenEph(pParse, eDist, pF->iDistinct, pF->iDistAddr);
}
@@ -141739,7 +145167,7 @@ SQLITE_PRIVATE int sqlite3Select(
SELECTTRACE(1,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy,
- pDistinct, 0, minMaxFlag|distFlag, 0);
+ pDistinct, p, minMaxFlag|distFlag, 0);
if( pWInfo==0 ){
goto select_end;
}
@@ -141747,8 +145175,10 @@ SQLITE_PRIVATE int sqlite3Select(
eDist = sqlite3WhereIsDistinct(pWInfo);
updateAccumulator(pParse, regAcc, pAggInfo, eDist);
if( eDist!=WHERE_DISTINCT_NOOP ){
- struct AggInfo_func *pF = &pAggInfo->aFunc[0];
- fixDistinctOpenEph(pParse, eDist, pF->iDistinct, pF->iDistAddr);
+ struct AggInfo_func *pF = pAggInfo->aFunc;
+ if( pF ){
+ fixDistinctOpenEph(pParse, eDist, pF->iDistinct, pF->iDistAddr);
+ }
}
if( regAcc ) sqlite3VdbeAddOp2(v, OP_Integer, 1, regAcc);
@@ -141815,9 +145245,9 @@ select_end:
}
#endif
-#if SELECTTRACE_ENABLED
+#if TREETRACE_ENABLED
SELECTTRACE(0x1,pParse,p,("end processing\n"));
- if( (sqlite3SelectTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
+ if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -142082,9 +145512,7 @@ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){
Trigger *pList; /* List of triggers to return */
HashElem *p; /* Loop variable for TEMP triggers */
- if( pParse->disableTriggers ){
- return 0;
- }
+ assert( pParse->disableTriggers==0 );
pTmpSchema = pParse->db->aDb[1].pSchema;
p = sqliteHashFirst(&pTmpSchema->trigHash);
pList = pTab->pTrigger;
@@ -142093,15 +145521,14 @@ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){
if( pTrig->pTabSchema==pTab->pSchema
&& pTrig->table
&& 0==sqlite3StrICmp(pTrig->table, pTab->zName)
- && pTrig->pTabSchema!=pTmpSchema
+ && (pTrig->pTabSchema!=pTmpSchema || pTrig->bReturning)
){
pTrig->pNext = pList;
pList = pTrig;
- }else if( pTrig->op==TK_RETURNING
+ }else if( pTrig->op==TK_RETURNING ){
#ifndef SQLITE_OMIT_VIRTUALTABLE
- && pParse->db->pVtabCtx==0
+ assert( pParse->db->pVtabCtx==0 );
#endif
- ){
assert( pParse->bReturning );
assert( &(pParse->u1.pReturning->retTrig) == pTrig );
pTrig->table = pTab->zName;
@@ -142384,6 +145811,23 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
Vdbe *v;
char *z;
+ /* If this is a new CREATE TABLE statement, and if shadow tables
+ ** are read-only, and the trigger makes a change to a shadow table,
+ ** then raise an error - do not allow the trigger to be created. */
+ if( sqlite3ReadOnlyShadowTables(db) ){
+ TriggerStep *pStep;
+ for(pStep=pTrig->step_list; pStep; pStep=pStep->pNext){
+ if( pStep->zTarget!=0
+ && sqlite3ShadowTableName(db, pStep->zTarget)
+ ){
+ sqlite3ErrorMsg(pParse,
+ "trigger \"%s\" may not write to shadow table \"%s\"",
+ pTrig->zName, pStep->zTarget);
+ goto triggerfinish_cleanup;
+ }
+ }
+ }
+
/* Make an entry in the sqlite_schema table */
v = sqlite3GetVdbe(pParse);
if( v==0 ) goto triggerfinish_cleanup;
@@ -142547,7 +145991,7 @@ SQLITE_PRIVATE TriggerStep *sqlite3TriggerInsertStep(
SQLITE_PRIVATE TriggerStep *sqlite3TriggerUpdateStep(
Parse *pParse, /* Parser */
Token *pTableName, /* Name of the table to be updated */
- SrcList *pFrom,
+ SrcList *pFrom, /* FROM clause for an UPDATE-FROM, or NULL */
ExprList *pEList, /* The SET clause: list of column and new values */
Expr *pWhere, /* The WHERE clause */
u8 orconf, /* The conflict algorithm. (OE_Abort, OE_Ignore, etc) */
@@ -142760,13 +146204,22 @@ static int checkColumnOverlap(IdList *pIdList, ExprList *pEList){
return 0;
}
+/*
+** Return true if any TEMP triggers exist
+*/
+static int tempTriggersExist(sqlite3 *db){
+ if( NEVER(db->aDb[1].pSchema==0) ) return 0;
+ if( sqliteHashFirst(&db->aDb[1].pSchema->trigHash)==0 ) return 0;
+ return 1;
+}
+
/*
** Return a list of all triggers on table pTab if there exists at least
** one trigger that must be fired when an operation of type 'op' is
** performed on the table, and, if that operation is an UPDATE, if at
** least one of the columns in pChanges is being modified.
*/
-SQLITE_PRIVATE Trigger *sqlite3TriggersExist(
+static SQLITE_NOINLINE Trigger *triggersReallyExist(
Parse *pParse, /* Parse context */
Table *pTab, /* The table the contains the triggers */
int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */
@@ -142829,6 +146282,22 @@ exit_triggers_exist:
}
return (mask ? pList : 0);
}
+SQLITE_PRIVATE Trigger *sqlite3TriggersExist(
+ Parse *pParse, /* Parse context */
+ Table *pTab, /* The table the contains the triggers */
+ int op, /* one of TK_DELETE, TK_INSERT, TK_UPDATE */
+ ExprList *pChanges, /* Columns that change in an UPDATE statement */
+ int *pMask /* OUT: Mask of TRIGGER_BEFORE|TRIGGER_AFTER */
+){
+ assert( pTab!=0 );
+ if( (pTab->pTrigger==0 && !tempTriggersExist(pParse->db))
+ || pParse->disableTriggers
+ ){
+ if( pMask ) *pMask = 0;
+ return 0;
+ }
+ return triggersReallyExist(pParse,pTab,op,pChanges,pMask);
+}
/*
** Convert the pStep->zTarget string into a SrcList and return a pointer
@@ -142858,6 +146327,14 @@ SQLITE_PRIVATE SrcList *sqlite3TriggerStepSrc(
}
if( pStep->pFrom ){
SrcList *pDup = sqlite3SrcListDup(db, pStep->pFrom, 0);
+ if( pDup && pDup->nSrc>1 && !IN_RENAME_OBJECT ){
+ Select *pSubquery;
+ Token as;
+ pSubquery = sqlite3SelectNew(pParse,0,pDup,0,0,0,0,SF_NestedFrom,0);
+ as.n = 0;
+ as.z = 0;
+ pDup = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0);
+ }
pSrc = sqlite3SrcListAppendList(pParse, pSrc, pDup);
}
}else{
@@ -142913,7 +146390,7 @@ static ExprList *sqlite3ExpandReturning(
if( !db->mallocFailed ){
struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1];
pItem->zEName = sqlite3DbStrDup(db, pTab->aCol[jj].zCnName);
- pItem->eEName = ENAME_NAME;
+ pItem->fg.eEName = ENAME_NAME;
}
}
}else{
@@ -142922,7 +146399,7 @@ static ExprList *sqlite3ExpandReturning(
if( !db->mallocFailed && ALWAYS(pList->a[i].zEName!=0) ){
struct ExprList_item *pItem = &pNew->a[pNew->nExpr-1];
pItem->zEName = sqlite3DbStrDup(db, pList->a[i].zEName);
- pItem->eEName = pList->a[i].eEName;
+ pItem->fg.eEName = pList->a[i].fg.eEName;
}
}
}
@@ -143174,7 +146651,7 @@ static TriggerPrg *codeRowTrigger(
sSubParse.zAuthContext = pTrigger->zName;
sSubParse.eTriggerOp = pTrigger->op;
sSubParse.nQueryLoop = pParse->nQueryLoop;
- sSubParse.disableVtab = pParse->disableVtab;
+ sSubParse.prepFlags = pParse->prepFlags;
v = sqlite3GetVdbe(&sSubParse);
if( v ){
@@ -143520,11 +146997,14 @@ static void updateVirtualTable(
** it has been converted into REAL.
*/
SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){
+ Column *pCol;
assert( pTab!=0 );
- if( !IsView(pTab) ){
+ assert( pTab->nCol>i );
+ pCol = &pTab->aCol[i];
+ if( pCol->iDflt ){
sqlite3_value *pValue = 0;
u8 enc = ENC(sqlite3VdbeDb(v));
- Column *pCol = &pTab->aCol[i];
+ assert( !IsView(pTab) );
VdbeComment((v, "%s.%s", pTab->zName, pCol->zCnName));
assert( inCol );
sqlite3ValueFromExpr(sqlite3VdbeDb(v),
@@ -143535,7 +147015,7 @@ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){
}
}
#ifndef SQLITE_OMIT_FLOATING_POINT
- if( pTab->aCol[i].affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){
+ if( pCol->affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){
sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
}
#endif
@@ -143837,6 +147317,14 @@ SQLITE_PRIVATE void sqlite3Update(
# define isView 0
#endif
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x10000 ){
+ sqlite3TreeViewLine(0, "In sqlite3Update() at %s:%d", __FILE__, __LINE__);
+ sqlite3TreeViewUpdate(pParse->pWith, pTabList, pChanges, pWhere,
+ onError, pOrderBy, pLimit, pUpsert, pTrigger);
+ }
+#endif
+
/* If there was a FROM clause, set nChangeFrom to the number of expressions
** in the change-list. Otherwise, set it to 0. There cannot be a FROM
** clause if this function is being called to generate code for part of
@@ -144481,7 +147969,7 @@ SQLITE_PRIVATE void sqlite3Update(
}else{
sqlite3VdbeAddOp3(v, OP_NotExists, iDataCur, labelContinue,regOldRowid);
}
- VdbeCoverageNeverTaken(v);
+ VdbeCoverage(v);
}
/* Do FK constraint checks. */
@@ -144967,6 +148455,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(
if( pIdx->aiColumn[ii]==XN_EXPR ){
assert( pIdx->aColExpr!=0 );
assert( pIdx->aColExpr->nExpr>ii );
+ assert( pIdx->bHasExpr );
pExpr = pIdx->aColExpr->a[ii].pExpr;
if( pExpr->op!=TK_COLLATE ){
sCol[0].pLeft = pExpr;
@@ -145280,6 +148769,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
int nDb; /* Number of attached databases */
const char *zDbMain; /* Schema name of database to vacuum */
const char *zOut; /* Name of output file */
+ u32 pgflags = PAGER_SYNCHRONOUS_OFF; /* sync flags for output db */
if( !db->autoCommit ){
sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction");
@@ -145351,12 +148841,17 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
goto end_of_vacuum;
}
db->mDbFlags |= DBFLAG_VacuumInto;
+
+ /* For a VACUUM INTO, the pager-flags are set to the same values as
+ ** they are for the database being vacuumed, except that PAGER_CACHESPILL
+ ** is always set. */
+ pgflags = db->aDb[iDb].safety_level | (db->flags & PAGER_FLAGS_MASK);
}
nRes = sqlite3BtreeGetRequestedReserve(pMain);
sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size);
sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0));
- sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF|PAGER_CACHESPILL);
+ sqlite3BtreeSetPagerFlags(pTemp, pgflags|PAGER_CACHESPILL);
/* Begin a transaction and take an exclusive lock on the main database
** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below,
@@ -145487,6 +148982,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum(
assert( rc==SQLITE_OK );
if( pOut==0 ){
+ nRes = sqlite3BtreeGetRequestedReserve(pTemp);
rc = sqlite3BtreeSetPageSize(pMain, sqlite3BtreeGetPageSize(pTemp), nRes,1);
}
@@ -145868,7 +149364,8 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){
*/
SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){
assert( IsVirtual(p) );
- if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p);
+ assert( db!=0 );
+ if( db->pnBytesFreed==0 ) vtabDisconnectAll(0, p);
if( p->u.vtab.azArg ){
int i;
for(i=0; iu.vtab.nArg; i++){
@@ -146668,7 +150165,7 @@ SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(
if( pExpr->op!=TK_COLUMN ) return pDef;
assert( ExprUseYTab(pExpr) );
pTab = pExpr->y.pTab;
- if( pTab==0 ) return pDef;
+ if( NEVER(pTab==0) ) return pDef;
if( !IsVirtual(pTab) ) return pDef;
pVtab = sqlite3GetVTable(db, pTab)->pVtab;
assert( pVtab!=0 );
@@ -146929,6 +150426,28 @@ typedef struct WhereLoopBuilder WhereLoopBuilder;
typedef struct WhereScan WhereScan;
typedef struct WhereOrCost WhereOrCost;
typedef struct WhereOrSet WhereOrSet;
+typedef struct WhereMemBlock WhereMemBlock;
+typedef struct WhereRightJoin WhereRightJoin;
+
+/*
+** This object is a header on a block of allocated memory that will be
+** automatically freed when its WInfo oject is destructed.
+*/
+struct WhereMemBlock {
+ WhereMemBlock *pNext; /* Next block in the chain */
+ u64 sz; /* Bytes of space */
+};
+
+/*
+** Extra information attached to a WhereLevel that is a RIGHT JOIN.
+*/
+struct WhereRightJoin {
+ int iMatch; /* Cursor used to determine prior matched rows */
+ int regBloom; /* Bloom filter for iRJMatch */
+ int regReturn; /* Return register for the interior subroutine */
+ int addrSubrtn; /* Starting address for the interior subroutine */
+ int endSubrtn; /* The last opcode in the interior subroutine */
+};
/*
** This object contains information needed to implement a single nested
@@ -146962,6 +150481,7 @@ struct WhereLevel {
int addrLikeRep; /* LIKE range processing address */
#endif
int regFilter; /* Bloom filter */
+ WhereRightJoin *pRJ; /* Extra information for RIGHT JOIN */
u8 iFrom; /* Which entry in the FROM clause */
u8 op, p3, p5; /* Opcode, P3 & P5 of the opcode that ends the loop */
int p1, p2; /* Operands of the opcode used to end the loop */
@@ -147252,7 +150772,7 @@ struct WhereAndInfo {
** between VDBE cursor numbers and bits of the bitmasks in WhereTerm.
**
** The VDBE cursor numbers are small integers contained in
-** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE
+** SrcItem.iCursor and Expr.iTable fields. For any given WHERE
** clause, the cursor numbers might not begin with 0 and they might
** contain gaps in the numbering sequence. But we want to make maximum
** use of the bits in our bitmasks. This structure provides a mapping
@@ -147323,20 +150843,6 @@ struct WhereLoopBuilder {
# define SQLITE_QUERY_PLANNER_LIMIT_INCR 1000
#endif
-/*
-** Each instance of this object records a change to a single node
-** in an expression tree to cause that node to point to a column
-** of an index rather than an expression or a virtual column. All
-** such transformations need to be undone at the end of WHERE clause
-** processing.
-*/
-typedef struct WhereExprMod WhereExprMod;
-struct WhereExprMod {
- WhereExprMod *pNext; /* Next translation on a list of them all */
- Expr *pExpr; /* The Expr node that was transformed */
- Expr orig; /* Original value of the Expr node */
-};
-
/*
** The WHERE clause processing routine has two halves. The
** first part does the start of the WHERE loop and the second
@@ -147352,10 +150858,10 @@ struct WhereInfo {
SrcList *pTabList; /* List of tables in the join */
ExprList *pOrderBy; /* The ORDER BY clause or NULL */
ExprList *pResultSet; /* Result set of the query */
+#if WHERETRACE_ENABLED
Expr *pWhere; /* The complete WHERE clause */
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- Select *pLimit; /* Used to access LIMIT expr/registers for vtabs */
#endif
+ Select *pSelect; /* The entire SELECT statement containing WHERE */
int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */
int iContinue; /* Jump here to continue with next record */
int iBreak; /* Jump here to break out of the loop */
@@ -147374,7 +150880,7 @@ struct WhereInfo {
int iTop; /* The very beginning of the WHERE loop */
int iEndWhere; /* End of the WHERE clause itself */
WhereLoop *pLoops; /* List of all WhereLoop objects */
- WhereExprMod *pExprMods; /* Expression modifications */
+ WhereMemBlock *pMemToFree;/* Memory to free when this object destroyed */
Bitmask revMask; /* Mask of ORDER BY terms that need reversing */
WhereClause sWC; /* Decomposition of the WHERE clause */
WhereMaskSet sMaskSet; /* Map cursor numbers to bitmasks */
@@ -147400,6 +150906,8 @@ SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm(
u32 op, /* Mask of WO_xx values describing operator */
Index *pIdx /* Must be compatible with this index, if not NULL */
);
+SQLITE_PRIVATE void *sqlite3WhereMalloc(WhereInfo *pWInfo, u64 nByte);
+SQLITE_PRIVATE void *sqlite3WhereRealloc(WhereInfo *pWInfo, void *pOld, u64 nByte);
/* wherecode.c: */
#ifndef SQLITE_OMIT_EXPLAIN
@@ -147436,6 +150944,11 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
WhereLevel *pLevel, /* The current level pointer */
Bitmask notReady /* Which tables are currently available */
);
+SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop(
+ WhereInfo *pWInfo,
+ int iLevel,
+ WhereLevel *pLevel
+);
/* whereexpr.c: */
SQLITE_PRIVATE void sqlite3WhereClauseInit(WhereClause*,WhereInfo*);
@@ -147478,8 +150991,9 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*);
#define WO_AND 0x0400 /* Two or more AND-connected terms */
#define WO_EQUIV 0x0800 /* Of the form A==B, both columns */
#define WO_NOOP 0x1000 /* This term does not restrict search space */
+#define WO_ROWVAL 0x2000 /* A row-value term */
-#define WO_ALL 0x1fff /* Mask of all possible WO_* values */
+#define WO_ALL 0x3fff /* Mask of all possible WO_* values */
#define WO_SINGLE 0x01ff /* Mask of all non-compound WO_* values */
/*
@@ -147513,6 +151027,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*);
#define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */
#define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */
#define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */
+#define WHERE_VIEWSCAN 0x02000000 /* A full-scan of a VIEW or subquery */
#endif /* !defined(SQLITE_WHEREINT_H) */
@@ -147703,6 +151218,9 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan(
pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr);
}
#endif
+ if( pItem->fg.jointype & JT_LEFT ){
+ sqlite3_str_appendf(&str, " LEFT-JOIN");
+ }
#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS
if( pLoop->nOut>=10 ){
sqlite3_str_appendf(&str, " (~%llu rows)",
@@ -147846,7 +151364,7 @@ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
int nLoop = 0;
assert( pTerm!=0 );
while( (pTerm->wtFlags & TERM_CODED)==0
- && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin))
+ && (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_OuterON))
&& (pLevel->notReady & pTerm->prereqAll)==0
){
if( nLoop && (pTerm->wtFlags & TERM_LIKE)!=0 ){
@@ -148107,16 +151625,22 @@ static int codeEqualityTerm(
if( !ExprUseXSelect(pX) || pX->x.pSelect->pEList->nExpr==1 ){
eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab);
}else{
- sqlite3 *db = pParse->db;
- pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX);
-
- if( !db->mallocFailed ){
- aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq);
+ Expr *pExpr = pTerm->pExpr;
+ if( pExpr->iTable==0 || !ExprHasProperty(pExpr, EP_Subrtn) ){
+ sqlite3 *db = pParse->db;
+ pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX);
+ if( !db->mallocFailed ){
+ aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq);
+ eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap,&iTab);
+ pExpr->iTable = iTab;
+ }
+ sqlite3ExprDelete(db, pX);
+ }else{
+ int n = sqlite3ExprVectorSize(pX->pLeft);
+ aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*MAX(nEq,n));
eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab);
- pTerm->pExpr->iTable = iTab;
}
- sqlite3ExprDelete(db, pX);
- pX = pTerm->pExpr;
+ pX = pExpr;
}
if( eType==IN_INDEX_INDEX_DESC ){
@@ -148139,8 +151663,9 @@ static int codeEqualityTerm(
i = pLevel->u.in.nIn;
pLevel->u.in.nIn += nEq;
pLevel->u.in.aInLoop =
- sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop,
- sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
+ sqlite3WhereRealloc(pTerm->pWC->pWInfo,
+ pLevel->u.in.aInLoop,
+ sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
pIn = pLevel->u.in.aInLoop;
if( pIn ){
int iMap = 0; /* Index in aiMap[] */
@@ -148382,7 +151907,7 @@ static void whereLikeOptimizationStringFixup(
if( pTerm->wtFlags & TERM_LIKEOPT ){
VdbeOp *pOp;
assert( pLevel->iLikeRepCntr>0 );
- pOp = sqlite3VdbeGetOp(v, -1);
+ pOp = sqlite3VdbeGetLastOp(v);
assert( pOp!=0 );
assert( pOp->opcode==OP_String8
|| pTerm->pWC->pWInfo->pParse->db->mallocFailed );
@@ -148561,8 +152086,8 @@ static void codeCursorHint(
*/
if( pTabItem->fg.jointype & JT_LEFT ){
Expr *pExpr = pTerm->pExpr;
- if( !ExprHasProperty(pExpr, EP_FromJoin)
- || pExpr->w.iRightJoinTable!=pTabItem->iCursor
+ if( !ExprHasProperty(pExpr, EP_OuterON)
+ || pExpr->w.iJoin!=pTabItem->iCursor
){
sWalker.eCode = 0;
sWalker.xExprCallback = codeCursorHintIsOrFunction;
@@ -148570,7 +152095,7 @@ static void codeCursorHint(
if( sWalker.eCode ) continue;
}
}else{
- if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue;
+ if( ExprHasProperty(pTerm->pExpr, EP_OuterON) ) continue;
}
/* All terms in pWLoop->aLTerm[] except pEndRange are used to initialize
@@ -148618,13 +152143,21 @@ static void codeCursorHint(
**
** OP_DeferredSeek $iCur $iRowid
**
+** Which causes a seek on $iCur to the row with rowid $iRowid.
+**
** However, if the scan currently being coded is a branch of an OR-loop and
-** the statement currently being coded is a SELECT, then P3 of OP_DeferredSeek
-** is set to iIdxCur and P4 is set to point to an array of integers
-** containing one entry for each column of the table cursor iCur is open
-** on. For each table column, if the column is the i'th column of the
-** index, then the corresponding array entry is set to (i+1). If the column
-** does not appear in the index at all, the array entry is set to 0.
+** the statement currently being coded is a SELECT, then additional information
+** is added that might allow OP_Column to omit the seek and instead do its
+** lookup on the index, thus avoiding an expensive seek operation. To
+** enable this optimization, the P3 of OP_DeferredSeek is set to iIdxCur
+** and P4 is set to an array of integers containing one entry for each column
+** in the table. For each table column, if the column is the i'th
+** column of the index, then the corresponding array entry is set to (i+1).
+** If the column does not appear in the index at all, the array entry is set
+** to 0. The OP_Column opcode can check this array to see if the column it
+** wants is in the index and if it is, it will substitute the index cursor
+** and column number and continue with those new values, rather than seeking
+** the table cursor.
*/
static void codeDeferredSeek(
WhereInfo *pWInfo, /* Where clause context */
@@ -148640,7 +152173,7 @@ static void codeDeferredSeek(
pWInfo->bDeferredSeek = 1;
sqlite3VdbeAddOp3(v, OP_DeferredSeek, iIdxCur, 0, iCur);
- if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)
+ if( (pWInfo->wctrlFlags & (WHERE_OR_SUBCLAUSE|WHERE_RIGHT_JOIN))
&& DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask)
){
int i;
@@ -148698,144 +152231,6 @@ static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){
}
}
-/* An instance of the IdxExprTrans object carries information about a
-** mapping from an expression on table columns into a column in an index
-** down through the Walker.
-*/
-typedef struct IdxExprTrans {
- Expr *pIdxExpr; /* The index expression */
- int iTabCur; /* The cursor of the corresponding table */
- int iIdxCur; /* The cursor for the index */
- int iIdxCol; /* The column for the index */
- int iTabCol; /* The column for the table */
- WhereInfo *pWInfo; /* Complete WHERE clause information */
- sqlite3 *db; /* Database connection (for malloc()) */
-} IdxExprTrans;
-
-/*
-** Preserve pExpr on the WhereETrans list of the WhereInfo.
-*/
-static void preserveExpr(IdxExprTrans *pTrans, Expr *pExpr){
- WhereExprMod *pNew;
- pNew = sqlite3DbMallocRaw(pTrans->db, sizeof(*pNew));
- if( pNew==0 ) return;
- pNew->pNext = pTrans->pWInfo->pExprMods;
- pTrans->pWInfo->pExprMods = pNew;
- pNew->pExpr = pExpr;
- memcpy(&pNew->orig, pExpr, sizeof(*pExpr));
-}
-
-/* The walker node callback used to transform matching expressions into
-** a reference to an index column for an index on an expression.
-**
-** If pExpr matches, then transform it into a reference to the index column
-** that contains the value of pExpr.
-*/
-static int whereIndexExprTransNode(Walker *p, Expr *pExpr){
- IdxExprTrans *pX = p->u.pIdxTrans;
- if( sqlite3ExprCompare(0, pExpr, pX->pIdxExpr, pX->iTabCur)==0 ){
- pExpr = sqlite3ExprSkipCollate(pExpr);
- preserveExpr(pX, pExpr);
- pExpr->affExpr = sqlite3ExprAffinity(pExpr);
- pExpr->op = TK_COLUMN;
- pExpr->iTable = pX->iIdxCur;
- pExpr->iColumn = pX->iIdxCol;
- testcase( ExprHasProperty(pExpr, EP_Skip) );
- testcase( ExprHasProperty(pExpr, EP_Unlikely) );
- ExprClearProperty(pExpr, EP_Skip|EP_Unlikely|EP_WinFunc|EP_Subrtn);
- pExpr->y.pTab = 0;
- return WRC_Prune;
- }else{
- return WRC_Continue;
- }
-}
-
-#ifndef SQLITE_OMIT_GENERATED_COLUMNS
-/* A walker node callback that translates a column reference to a table
-** into a corresponding column reference of an index.
-*/
-static int whereIndexExprTransColumn(Walker *p, Expr *pExpr){
- if( pExpr->op==TK_COLUMN ){
- IdxExprTrans *pX = p->u.pIdxTrans;
- if( pExpr->iTable==pX->iTabCur && pExpr->iColumn==pX->iTabCol ){
- assert( ExprUseYTab(pExpr) && pExpr->y.pTab!=0 );
- preserveExpr(pX, pExpr);
- pExpr->affExpr = sqlite3TableColumnAffinity(pExpr->y.pTab,pExpr->iColumn);
- pExpr->iTable = pX->iIdxCur;
- pExpr->iColumn = pX->iIdxCol;
- pExpr->y.pTab = 0;
- }
- }
- return WRC_Continue;
-}
-#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
-
-/*
-** For an indexes on expression X, locate every instance of expression X
-** in pExpr and change that subexpression into a reference to the appropriate
-** column of the index.
-**
-** 2019-10-24: Updated to also translate references to a VIRTUAL column in
-** the table into references to the corresponding (stored) column of the
-** index.
-*/
-static void whereIndexExprTrans(
- Index *pIdx, /* The Index */
- int iTabCur, /* Cursor of the table that is being indexed */
- int iIdxCur, /* Cursor of the index itself */
- WhereInfo *pWInfo /* Transform expressions in this WHERE clause */
-){
- int iIdxCol; /* Column number of the index */
- ExprList *aColExpr; /* Expressions that are indexed */
- Table *pTab;
- Walker w;
- IdxExprTrans x;
- aColExpr = pIdx->aColExpr;
- if( aColExpr==0 && !pIdx->bHasVCol ){
- /* The index does not reference any expressions or virtual columns
- ** so no translations are needed. */
- return;
- }
- pTab = pIdx->pTable;
- memset(&w, 0, sizeof(w));
- w.u.pIdxTrans = &x;
- x.iTabCur = iTabCur;
- x.iIdxCur = iIdxCur;
- x.pWInfo = pWInfo;
- x.db = pWInfo->pParse->db;
- for(iIdxCol=0; iIdxColnColumn; iIdxCol++){
- i16 iRef = pIdx->aiColumn[iIdxCol];
- if( iRef==XN_EXPR ){
- assert( aColExpr!=0 && aColExpr->a[iIdxCol].pExpr!=0 );
- x.pIdxExpr = aColExpr->a[iIdxCol].pExpr;
- if( sqlite3ExprIsConstant(x.pIdxExpr) ) continue;
- w.xExprCallback = whereIndexExprTransNode;
-#ifndef SQLITE_OMIT_GENERATED_COLUMNS
- }else if( iRef>=0
- && (pTab->aCol[iRef].colFlags & COLFLAG_VIRTUAL)!=0
- && ((pTab->aCol[iRef].colFlags & COLFLAG_HASCOLL)==0
- || sqlite3StrICmp(sqlite3ColumnColl(&pTab->aCol[iRef]),
- sqlite3StrBINARY)==0)
- ){
- /* Check to see if there are direct references to generated columns
- ** that are contained in the index. Pulling the generated column
- ** out of the index is an optimization only - the main table is always
- ** available if the index cannot be used. To avoid unnecessary
- ** complication, omit this optimization if the collating sequence for
- ** the column is non-standard */
- x.iTabCol = iRef;
- w.xExprCallback = whereIndexExprTransColumn;
-#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
- }else{
- continue;
- }
- x.iIdxCol = iIdxCol;
- sqlite3WalkExpr(&w, pWInfo->pWhere);
- sqlite3WalkExprList(&w, pWInfo->pOrderBy);
- sqlite3WalkExprList(&w, pWInfo->pResultSet);
- }
-}
-
/*
** The pTruth expression is always true because it is the WHERE clause
** a partial index that is driving a query loop. Look through all of the
@@ -148904,6 +152299,8 @@ static SQLITE_NOINLINE void filterPullDown(
testcase( pTerm->wtFlags & TERM_VIRTUAL );
regRowid = sqlite3GetTempReg(pParse);
regRowid = codeEqualityTerm(pParse, pTerm, pLevel, 0, 0, regRowid);
+ sqlite3VdbeAddOp2(pParse->pVdbe, OP_MustBeInt, regRowid, addrNxt);
+ VdbeCoverage(pParse->pVdbe);
sqlite3VdbeAddOp4Int(pParse->pVdbe, OP_Filter, pLevel->regFilter,
addrNxt, regRowid, 1);
VdbeCoverage(pParse->pVdbe);
@@ -148996,7 +152393,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
** initialize a memory cell that records if this table matches any
** row of the left table of the join.
*/
- assert( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)
+ assert( (pWInfo->wctrlFlags & (WHERE_OR_SUBCLAUSE|WHERE_RIGHT_JOIN))
|| pLevel->iFrom>0 || (pTabItem[0].fg.jointype & JT_LEFT)==0
);
if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){
@@ -149007,7 +152404,10 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
/* Compute a safe address to jump to if we discover that the table for
** this loop is empty and can never contribute content. */
- for(j=iLevel; j>0 && pWInfo->a[j].iLeftJoin==0; j--){}
+ for(j=iLevel; j>0; j--){
+ if( pWInfo->a[j].iLeftJoin ) break;
+ if( pWInfo->a[j].pRJ ) break;
+ }
addrHalt = pWInfo->a[j].addrBrk;
/* Special case of a FROM clause subquery implemented as a co-routine */
@@ -149052,9 +152452,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
&& pLoop->u.vtab.bOmitOffset
){
assert( pTerm->eOperator==WO_AUX );
- assert( pWInfo->pLimit!=0 );
- assert( pWInfo->pLimit->iOffset>0 );
- sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pLimit->iOffset);
+ assert( pWInfo->pSelect!=0 );
+ assert( pWInfo->pSelect->iOffset>0 );
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pSelect->iOffset);
VdbeComment((v,"Zero OFFSET counter"));
}
}
@@ -149162,6 +152562,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg);
addrNxt = pLevel->addrNxt;
if( pLevel->regFilter ){
+ sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt);
+ VdbeCoverage(v);
sqlite3VdbeAddOp4Int(v, OP_Filter, pLevel->regFilter, addrNxt,
iRowidReg, 1);
VdbeCoverage(v);
@@ -149513,6 +152915,11 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
** guess. */
addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan,
(pIdx->aiRowLogEst[0]+9)/10);
+ if( pRangeStart ){
+ sqlite3VdbeChangeP5(v, 1);
+ sqlite3VdbeChangeP2(v, addrSeekScan, sqlite3VdbeCurrentAddr(v)+1);
+ addrSeekScan = 0;
+ }
VdbeCoverage(v);
}
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
@@ -149588,8 +152995,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
nConstraint++;
}
- sqlite3DbFree(db, zStartAff);
- sqlite3DbFree(db, zEndAff);
+ if( zStartAff ) sqlite3DbNNFreeNN(db, zStartAff);
+ if( zEndAff ) sqlite3DbNNFreeNN(db, zEndAff);
/* Top of the loop body */
if( pLevel->p2==0 ) pLevel->p2 = sqlite3VdbeCurrentAddr(v);
@@ -149634,7 +153041,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
/* Seek the table cursor, if required */
omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0
- && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0;
+ && (pWInfo->wctrlFlags & (WHERE_OR_SUBCLAUSE|WHERE_RIGHT_JOIN))==0;
if( omitTable ){
/* pIdx is a covering index. No need to access the main table. */
}else if( HasRowid(pIdx->pTable) ){
@@ -149651,27 +153058,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
if( pLevel->iLeftJoin==0 ){
- /* If pIdx is an index on one or more expressions, then look through
- ** all the expressions in pWInfo and try to transform matching expressions
- ** into reference to index columns. Also attempt to translate references
- ** to virtual columns in the table into references to (stored) columns
- ** of the index.
- **
- ** Do not do this for the RHS of a LEFT JOIN. This is because the
- ** expression may be evaluated after OP_NullRow has been executed on
- ** the cursor. In this case it is important to do the full evaluation,
- ** as the result of the expression may not be NULL, even if all table
- ** column values are. https://www.sqlite.org/src/info/7fa8049685b50b5a
- **
- ** Also, do not do this when processing one index an a multi-index
- ** OR clause, since the transformation will become invalid once we
- ** move forward to the next index.
- ** https://sqlite.org/src/info/4e8e4857d32d401f
- */
- if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){
- whereIndexExprTrans(pIdx, iCur, iIdxCur, pWInfo);
- }
-
/* If a partial index is driving the loop, try to eliminate WHERE clause
** terms from the query that must be true due to the WHERE clause of
** the partial index.
@@ -149687,7 +153073,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
/* The following assert() is not a requirement, merely an observation:
** The OR-optimization doesn't work for the right hand table of
** a LEFT JOIN: */
- assert( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 );
+ assert( (pWInfo->wctrlFlags & (WHERE_OR_SUBCLAUSE|WHERE_RIGHT_JOIN))==0 );
}
/* Record the instruction used to terminate the loop. */
@@ -149784,7 +153170,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
int nNotReady; /* The number of notReady tables */
SrcItem *origSrc; /* Original list of tables */
nNotReady = pWInfo->nLevel - iLevel - 1;
- pOrTab = sqlite3StackAllocRaw(db,
+ pOrTab = sqlite3DbMallocRawNN(db,
sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0]));
if( pOrTab==0 ) return notReady;
pOrTab->nAlloc = (u8)(nNotReady + 1);
@@ -149891,7 +153277,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
Expr *pDelete; /* Local copy of OR clause term */
int jmp1 = 0; /* Address of jump operation */
testcase( (pTabItem[0].fg.jointype & JT_LEFT)!=0
- && !ExprHasProperty(pOrExpr, EP_FromJoin)
+ && !ExprHasProperty(pOrExpr, EP_OuterON)
); /* See TH3 vtab25.400 and ticket 614b25314c766238 */
pDelete = pOrExpr = sqlite3ExprDup(db, pOrExpr, 0);
if( db->mallocFailed ){
@@ -150029,7 +153415,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
sqlite3VdbeGoto(v, pLevel->addrBrk);
sqlite3VdbeResolveLabel(v, iLoopBody);
- if( pWInfo->nLevel>1 ){ sqlite3StackFree(db, pOrTab); }
+ /* Set the P2 operand of the OP_Return opcode that will end the current
+ ** loop to point to this spot, which is the top of the next containing
+ ** loop. The byte-code formatter will use that P2 value as a hint to
+ ** indent everything in between the this point and the final OP_Return.
+ ** See tag-20220407a in vdbe.c and shell.c */
+ assert( pLevel->op==OP_Return );
+ pLevel->p2 = sqlite3VdbeCurrentAddr(v);
+
+ if( pWInfo->nLevel>1 ){ sqlite3DbFreeNN(db, pOrTab); }
if( !untestedTerms ) disableTerm(pLevel, pTerm);
}else
#endif /* SQLITE_OMIT_OR_OPTIMIZATION */
@@ -150091,10 +153485,22 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
pE = pTerm->pExpr;
assert( pE!=0 );
- if( (pTabItem->fg.jointype&JT_LEFT) && !ExprHasProperty(pE,EP_FromJoin) ){
- continue;
+ if( pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ){
+ if( !ExprHasProperty(pE,EP_OuterON|EP_InnerON) ){
+ /* Defer processing WHERE clause constraints until after outer
+ ** join processing. tag-20220513a */
+ continue;
+ }else if( (pTabItem->fg.jointype & JT_LEFT)==JT_LEFT
+ && !ExprHasProperty(pE,EP_OuterON) ){
+ continue;
+ }else{
+ Bitmask m = sqlite3WhereGetMask(&pWInfo->sMaskSet, pE->w.iJoin);
+ if( m & pLevel->notReady ){
+ /* An ON clause that is not ripe */
+ continue;
+ }
+ }
}
-
if( iLoop==1 && !sqlite3ExprCoveredByIndex(pE, pLevel->iTabCur, pIdx) ){
iNext = 2;
continue;
@@ -150153,7 +153559,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue;
if( (pTerm->eOperator & WO_EQUIV)==0 ) continue;
if( pTerm->leftCursor!=iCur ) continue;
- if( pTabItem->fg.jointype & JT_LEFT ) continue;
+ if( pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ) continue;
pE = pTerm->pExpr;
#ifdef WHERETRACE_ENABLED /* 0x800 */
if( sqlite3WhereTrace & 0x800 ){
@@ -150161,7 +153567,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
sqlite3WhereTermPrint(pTerm, pWC->nTerm-j);
}
#endif
- assert( !ExprHasProperty(pE, EP_FromJoin) );
+ assert( !ExprHasProperty(pE, EP_OuterON) );
assert( (pTerm->prereqRight & pLevel->notReady)!=0 );
assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 );
pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.x.leftColumn, notReady,
@@ -150184,6 +153590,47 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pAlt->wtFlags |= TERM_CODED;
}
+ /* For a RIGHT OUTER JOIN, record the fact that the current row has
+ ** been matched at least once.
+ */
+ if( pLevel->pRJ ){
+ Table *pTab;
+ int nPk;
+ int r;
+ int jmp1 = 0;
+ WhereRightJoin *pRJ = pLevel->pRJ;
+
+ /* pTab is the right-hand table of the RIGHT JOIN. Generate code that
+ ** will record that the current row of that table has been matched at
+ ** least once. This is accomplished by storing the PK for the row in
+ ** both the iMatch index and the regBloom Bloom filter.
+ */
+ pTab = pWInfo->pTabList->a[pLevel->iFrom].pTab;
+ if( HasRowid(pTab) ){
+ r = sqlite3GetTempRange(pParse, 2);
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, pLevel->iTabCur, -1, r+1);
+ nPk = 1;
+ }else{
+ int iPk;
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ nPk = pPk->nKeyCol;
+ r = sqlite3GetTempRange(pParse, nPk+1);
+ for(iPk=0; iPkaiColumn[iPk];
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, iCol,r+1+iPk);
+ }
+ }
+ jmp1 = sqlite3VdbeAddOp4Int(v, OP_Found, pRJ->iMatch, 0, r+1, nPk);
+ VdbeCoverage(v);
+ VdbeComment((v, "match against %s", pTab->zName));
+ sqlite3VdbeAddOp3(v, OP_MakeRecord, r+1, nPk, r);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pRJ->iMatch, r, r+1, nPk);
+ sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pRJ->regBloom, 0, r+1, nPk);
+ sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
+ sqlite3VdbeJumpHere(v, jmp1);
+ sqlite3ReleaseTempRange(pParse, r, nPk+1);
+ }
+
/* For a LEFT OUTER JOIN, generate code that will record the fact that
** at least one row of the right table has matched the left table.
*/
@@ -150191,6 +153638,30 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pLevel->addrFirst = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin);
VdbeComment((v, "record LEFT JOIN hit"));
+ if( pLevel->pRJ==0 ){
+ goto code_outer_join_constraints; /* WHERE clause constraints */
+ }
+ }
+
+ if( pLevel->pRJ ){
+ /* Create a subroutine used to process all interior loops and code
+ ** of the RIGHT JOIN. During normal operation, the subroutine will
+ ** be in-line with the rest of the code. But at the end, a separate
+ ** loop will run that invokes this subroutine for unmatched rows
+ ** of pTab, with all tables to left begin set to NULL.
+ */
+ WhereRightJoin *pRJ = pLevel->pRJ;
+ sqlite3VdbeAddOp2(v, OP_BeginSubrtn, 0, pRJ->regReturn);
+ pRJ->addrSubrtn = sqlite3VdbeCurrentAddr(v);
+ assert( pParse->withinRJSubrtn < 255 );
+ pParse->withinRJSubrtn++;
+
+ /* WHERE clause constraints must be deferred until after outer join
+ ** row elimination has completed, since WHERE clause constraints apply
+ ** to the results of the OUTER JOIN. The following loop generates the
+ ** appropriate WHERE clause constraint checks. tag-20220513a.
+ */
+ code_outer_join_constraints:
for(pTerm=pWC->a, j=0; jnBase; j++, pTerm++){
testcase( pTerm->wtFlags & TERM_VIRTUAL );
testcase( pTerm->wtFlags & TERM_CODED );
@@ -150199,6 +153670,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
assert( pWInfo->untestedTerms );
continue;
}
+ if( pTabItem->fg.jointype & JT_LTORJ ) continue;
assert( pTerm->pExpr );
sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL);
pTerm->wtFlags |= TERM_CODED;
@@ -150219,6 +153691,96 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
return pLevel->notReady;
}
+/*
+** Generate the code for the loop that finds all non-matched terms
+** for a RIGHT JOIN.
+*/
+SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop(
+ WhereInfo *pWInfo,
+ int iLevel,
+ WhereLevel *pLevel
+){
+ Parse *pParse = pWInfo->pParse;
+ Vdbe *v = pParse->pVdbe;
+ WhereRightJoin *pRJ = pLevel->pRJ;
+ Expr *pSubWhere = 0;
+ WhereClause *pWC = &pWInfo->sWC;
+ WhereInfo *pSubWInfo;
+ WhereLoop *pLoop = pLevel->pWLoop;
+ SrcItem *pTabItem = &pWInfo->pTabList->a[pLevel->iFrom];
+ SrcList sFrom;
+ Bitmask mAll = 0;
+ int k;
+
+ ExplainQueryPlan((pParse, 1, "RIGHT-JOIN %s", pTabItem->pTab->zName));
+ sqlite3VdbeNoJumpsOutsideSubrtn(v, pRJ->addrSubrtn, pRJ->endSubrtn,
+ pRJ->regReturn);
+ for(k=0; ka[k].pWLoop->maskSelf;
+ sqlite3VdbeAddOp1(v, OP_NullRow, pWInfo->a[k].iTabCur);
+ iIdxCur = pWInfo->a[k].iIdxCur;
+ if( iIdxCur ){
+ sqlite3VdbeAddOp1(v, OP_NullRow, iIdxCur);
+ }
+ }
+ if( (pTabItem->fg.jointype & JT_LTORJ)==0 ){
+ mAll |= pLoop->maskSelf;
+ for(k=0; knTerm; k++){
+ WhereTerm *pTerm = &pWC->a[k];
+ if( (pTerm->wtFlags & (TERM_VIRTUAL|TERM_SLICE))!=0
+ && pTerm->eOperator!=WO_ROWVAL
+ ){
+ break;
+ }
+ if( pTerm->prereqAll & ~mAll ) continue;
+ if( ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON) ) continue;
+ pSubWhere = sqlite3ExprAnd(pParse, pSubWhere,
+ sqlite3ExprDup(pParse->db, pTerm->pExpr, 0));
+ }
+ }
+ sFrom.nSrc = 1;
+ sFrom.nAlloc = 1;
+ memcpy(&sFrom.a[0], pTabItem, sizeof(SrcItem));
+ sFrom.a[0].fg.jointype = 0;
+ assert( pParse->withinRJSubrtn < 100 );
+ pParse->withinRJSubrtn++;
+ pSubWInfo = sqlite3WhereBegin(pParse, &sFrom, pSubWhere, 0, 0, 0,
+ WHERE_RIGHT_JOIN, 0);
+ if( pSubWInfo ){
+ int iCur = pLevel->iTabCur;
+ int r = ++pParse->nMem;
+ int nPk;
+ int jmp;
+ int addrCont = sqlite3WhereContinueLabel(pSubWInfo);
+ Table *pTab = pTabItem->pTab;
+ if( HasRowid(pTab) ){
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, r);
+ nPk = 1;
+ }else{
+ int iPk;
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ nPk = pPk->nKeyCol;
+ pParse->nMem += nPk - 1;
+ for(iPk=0; iPkaiColumn[iPk];
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, iCol,r+iPk);
+ }
+ }
+ jmp = sqlite3VdbeAddOp4Int(v, OP_Filter, pRJ->regBloom, 0, r, nPk);
+ VdbeCoverage(v);
+ sqlite3VdbeAddOp4Int(v, OP_Found, pRJ->iMatch, addrCont, r, nPk);
+ VdbeCoverage(v);
+ sqlite3VdbeJumpHere(v, jmp);
+ sqlite3VdbeAddOp2(v, OP_Gosub, pRJ->regReturn, pRJ->addrSubrtn);
+ sqlite3WhereEnd(pSubWInfo);
+ }
+ sqlite3ExprDelete(pParse->db, pSubWhere);
+ ExplainQueryPlanPop(pParse);
+ assert( pParse->withinRJSubrtn>0 );
+ pParse->withinRJSubrtn--;
+}
+
/************** End of wherecode.c *******************************************/
/************** Begin file whereexpr.c ***************************************/
/*
@@ -150287,7 +153849,7 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
if( pWC->nTerm>=pWC->nSlot ){
WhereTerm *pOld = pWC->a;
sqlite3 *db = pWC->pWInfo->pParse->db;
- pWC->a = sqlite3DbMallocRawNN(db, sizeof(pWC->a[0])*pWC->nSlot*2 );
+ pWC->a = sqlite3WhereMalloc(pWC->pWInfo, sizeof(pWC->a[0])*pWC->nSlot*2 );
if( pWC->a==0 ){
if( wtFlags & TERM_DYNAMIC ){
sqlite3ExprDelete(db, p);
@@ -150296,10 +153858,7 @@ static int whereClauseInsert(WhereClause *pWC, Expr *p, u16 wtFlags){
return 0;
}
memcpy(pWC->a, pOld, sizeof(pWC->a[0])*pWC->nTerm);
- if( pOld!=pWC->aStatic ){
- sqlite3DbFree(db, pOld);
- }
- pWC->nSlot = sqlite3DbMallocSize(db, pWC->a)/sizeof(pWC->a[0]);
+ pWC->nSlot = pWC->nSlot*2;
}
pTerm = &pWC->a[idx = pWC->nTerm++];
if( (wtFlags & TERM_VIRTUAL)==0 ) pWC->nBase = pWC->nTerm;
@@ -150492,7 +154051,7 @@ static int isLikeOrGlob(
if( pLeft->op!=TK_COLUMN
|| sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT
|| (ALWAYS( ExprUseYTab(pLeft) )
- && pLeft->y.pTab
+ && ALWAYS(pLeft->y.pTab)
&& IsVirtual(pLeft->y.pTab)) /* Might be numeric */
){
int isNum;
@@ -150609,8 +154168,7 @@ static int isAuxiliaryVtabOperator(
** MATCH(expression,vtab_column)
*/
pCol = pList->a[1].pExpr;
- assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) );
- testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 );
+ assert( pCol->op!=TK_COLUMN || (ExprUseYTab(pCol) && pCol->y.pTab!=0) );
if( ExprIsVtab(pCol) ){
for(i=0; ia[0].pExpr;
assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) );
- testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 );
+ assert( pCol->op!=TK_COLUMN || (ExprUseYTab(pCol) && pCol->y.pTab!=0) );
if( ExprIsVtab(pCol) ){
sqlite3_vtab *pVtab;
sqlite3_module *pMod;
@@ -150660,13 +154218,12 @@ static int isAuxiliaryVtabOperator(
int res = 0;
Expr *pLeft = pExpr->pLeft;
Expr *pRight = pExpr->pRight;
- assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) );
- testcase( pLeft->op==TK_COLUMN && pLeft->y.pTab==0 );
+ assert( pLeft->op!=TK_COLUMN || (ExprUseYTab(pLeft) && pLeft->y.pTab!=0) );
if( ExprIsVtab(pLeft) ){
res++;
}
- assert( pRight==0 || pRight->op!=TK_COLUMN || ExprUseYTab(pRight) );
- testcase( pRight && pRight->op==TK_COLUMN && pRight->y.pTab==0 );
+ assert( pRight==0 || pRight->op!=TK_COLUMN
+ || (ExprUseYTab(pRight) && pRight->y.pTab!=0) );
if( pRight && ExprIsVtab(pRight) ){
res++;
SWAP(Expr*, pLeft, pRight);
@@ -150687,9 +154244,9 @@ static int isAuxiliaryVtabOperator(
** a join, then transfer the appropriate markings over to derived.
*/
static void transferJoinMarkings(Expr *pDerived, Expr *pBase){
- if( pDerived ){
- pDerived->flags |= pBase->flags & EP_FromJoin;
- pDerived->w.iRightJoinTable = pBase->w.iRightJoinTable;
+ if( pDerived && ExprHasProperty(pBase, EP_OuterON|EP_InnerON) ){
+ pDerived->flags |= pBase->flags & (EP_OuterON|EP_InnerON);
+ pDerived->w.iJoin = pBase->w.iJoin;
}
}
@@ -151143,7 +154700,7 @@ static int termIsEquivalence(Parse *pParse, Expr *pExpr){
CollSeq *pColl;
if( !OptimizationEnabled(pParse->db, SQLITE_Transitive) ) return 0;
if( pExpr->op!=TK_EQ && pExpr->op!=TK_IS ) return 0;
- if( ExprHasProperty(pExpr, EP_FromJoin) ) return 0;
+ if( ExprHasProperty(pExpr, EP_OuterON) ) return 0;
aff1 = sqlite3ExprAffinity(pExpr->pLeft);
aff2 = sqlite3ExprAffinity(pExpr->pRight);
if( aff1!=aff2
@@ -151174,7 +154731,9 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){
int i;
for(i=0; inSrc; i++){
mask |= exprSelectUsage(pMaskSet, pSrc->a[i].pSelect);
- mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].pOn);
+ if( pSrc->a[i].fg.isUsing==0 ){
+ mask |= sqlite3WhereExprUsage(pMaskSet, pSrc->a[i].u3.pOn);
+ }
if( pSrc->a[i].fg.isTabFunc ){
mask |= sqlite3WhereExprListUsage(pMaskSet, pSrc->a[i].u1.pFuncArg);
}
@@ -151213,6 +154772,7 @@ static SQLITE_NOINLINE int exprMightBeIndexed2(
if( pIdx->aColExpr==0 ) continue;
for(i=0; inKeyCol; i++){
if( pIdx->aiColumn[i]!=XN_EXPR ) continue;
+ assert( pIdx->bHasExpr );
if( sqlite3ExprCompareSkip(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){
aiCurCol[0] = iCur;
aiCurCol[1] = XN_EXPR;
@@ -151329,18 +154889,32 @@ static void exprAnalyze(
if( prereqAll!=sqlite3WhereExprUsageNN(pMaskSet, pExpr) ){
printf("\n*** Incorrect prereqAll computed for:\n");
sqlite3TreeViewExpr(0,pExpr,0);
- abort();
+ assert( 0 );
}
#endif
- if( ExprHasProperty(pExpr, EP_FromJoin) ){
- Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->w.iRightJoinTable);
- prereqAll |= x;
- extraRight = x-1; /* ON clause terms may not be used with an index
- ** on left table of a LEFT JOIN. Ticket #3015 */
- if( (prereqAll>>1)>=x ){
- sqlite3ErrorMsg(pParse, "ON clause references tables to its right");
- return;
+ if( ExprHasProperty(pExpr, EP_OuterON|EP_InnerON) ){
+ Bitmask x = sqlite3WhereGetMask(pMaskSet, pExpr->w.iJoin);
+ if( ExprHasProperty(pExpr, EP_OuterON) ){
+ prereqAll |= x;
+ extraRight = x-1; /* ON clause terms may not be used with an index
+ ** on left table of a LEFT JOIN. Ticket #3015 */
+ if( (prereqAll>>1)>=x ){
+ sqlite3ErrorMsg(pParse, "ON clause references tables to its right");
+ return;
+ }
+ }else if( (prereqAll>>1)>=x ){
+ /* The ON clause of an INNER JOIN references a table to its right.
+ ** Most other SQL database engines raise an error. But SQLite versions
+ ** 3.0 through 3.38 just put the ON clause constraint into the WHERE
+ ** clause and carried on. Beginning with 3.39, raise an error only
+ ** if there is a RIGHT or FULL JOIN in the query. This makes SQLite
+ ** more like other systems, and also preserves legacy. */
+ if( ALWAYS(pSrc->nSrc>0) && (pSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){
+ sqlite3ErrorMsg(pParse, "ON clause references tables to its right");
+ return;
+ }
+ ExprClearProperty(pExpr, EP_InnerON);
}
}
pTerm->prereqAll = prereqAll;
@@ -151408,7 +154982,7 @@ static void exprAnalyze(
pNew->eOperator = (operatorMask(pDup->op) + eExtraOp) & opMask;
}else
if( op==TK_ISNULL
- && !ExprHasProperty(pExpr,EP_FromJoin)
+ && !ExprHasProperty(pExpr,EP_OuterON)
&& 0==sqlite3ExprCanBeNull(pLeft)
){
assert( !ExprHasProperty(pExpr, EP_IntValue) );
@@ -151479,7 +155053,7 @@ static void exprAnalyze(
else if( pExpr->op==TK_NOTNULL ){
if( pExpr->pLeft->op==TK_COLUMN
&& pExpr->pLeft->iColumn>=0
- && !ExprHasProperty(pExpr, EP_FromJoin)
+ && !ExprHasProperty(pExpr, EP_OuterON)
){
Expr *pNewExpr;
Expr *pLeft = pExpr->pLeft;
@@ -151627,7 +155201,7 @@ static void exprAnalyze(
}
pTerm = &pWC->a[idxTerm];
pTerm->wtFlags |= TERM_CODED|TERM_VIRTUAL; /* Disable the original */
- pTerm->eOperator = 0;
+ pTerm->eOperator = WO_ROWVAL;
}
/* If there is a vector IN term - e.g. "(a, b) IN (SELECT ...)" - create
@@ -151683,9 +155257,9 @@ static void exprAnalyze(
Expr *pNewExpr;
pNewExpr = sqlite3PExpr(pParse, TK_MATCH,
0, sqlite3ExprDup(db, pRight, 0));
- if( ExprHasProperty(pExpr, EP_FromJoin) && pNewExpr ){
- ExprSetProperty(pNewExpr, EP_FromJoin);
- pNewExpr->w.iRightJoinTable = pExpr->w.iRightJoinTable;
+ if( ExprHasProperty(pExpr, EP_OuterON) && pNewExpr ){
+ ExprSetProperty(pNewExpr, EP_OuterON);
+ pNewExpr->w.iJoin = pExpr->w.iJoin;
}
idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC);
testcase( idxNew==0 );
@@ -151812,9 +155386,9 @@ static void whereAddLimitExpr(
** exist only so that they may be passed to the xBestIndex method of the
** single virtual table in the FROM clause of the SELECT.
*/
-SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){
- assert( p==0 || (p->pGroupBy==0 && (p->selFlags & SF_Aggregate)==0) );
- if( (p && p->pLimit) /* 1 */
+SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Select *p){
+ assert( p!=0 && p->pLimit!=0 ); /* 1 -- checked by caller */
+ if( p->pGroupBy==0
&& (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */
&& (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */
){
@@ -151828,7 +155402,7 @@ SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){
/* This term is a vector operation that has been decomposed into
** other, subsequent terms. It can be ignored. See tag-20220128a */
assert( pWC->a[ii].wtFlags & TERM_VIRTUAL );
- assert( pWC->a[ii].eOperator==0 );
+ assert( pWC->a[ii].eOperator==WO_ROWVAL );
continue;
}
if( pWC->a[ii].leftCursor!=iCsr ) return;
@@ -151840,7 +155414,7 @@ SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){
Expr *pExpr = pOrderBy->a[ii].pExpr;
if( pExpr->op!=TK_COLUMN ) return;
if( pExpr->iTable!=iCsr ) return;
- if( pOrderBy->a[ii].sortFlags & KEYINFO_ORDER_BIGNULL ) return;
+ if( pOrderBy->a[ii].fg.sortFlags & KEYINFO_ORDER_BIGNULL ) return;
}
}
@@ -151907,9 +155481,6 @@ SQLITE_PRIVATE void sqlite3WhereClauseClear(WhereClause *pWC){
a++;
}
}
- if( pWC->a!=pWC->aStatic ){
- sqlite3DbFree(db, pWC->a);
- }
}
@@ -152036,6 +155607,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
if( pArgs==0 ) return;
for(j=k=0; jnExpr; j++){
Expr *pRhs;
+ u32 joinType;
while( knCol && (pTab->aCol[k].colFlags & COLFLAG_HIDDEN)==0 ){k++;}
if( k>=pTab->nCol ){
sqlite3ErrorMsg(pParse, "too many arguments on %s() - max %d",
@@ -152052,9 +155624,12 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
pRhs = sqlite3PExpr(pParse, TK_UPLUS,
sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0);
pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs);
- if( pItem->fg.jointype & JT_LEFT ){
- sqlite3SetJoinExpr(pTerm, pItem->iCursor);
+ if( pItem->fg.jointype & (JT_LEFT|JT_LTORJ) ){
+ joinType = EP_OuterON;
+ }else{
+ joinType = EP_InnerON;
}
+ sqlite3SetJoinExpr(pTerm, pItem->iCursor, joinType);
whereClauseInsert(pWC, pTerm, TERM_DYNAMIC);
}
}
@@ -152130,7 +155705,7 @@ SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){
** block sorting is required.
*/
SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){
- return pWInfo->nOBSat;
+ return pWInfo->nOBSat<0 ? 0 : pWInfo->nOBSat;
}
/*
@@ -152165,7 +155740,7 @@ SQLITE_PRIVATE int sqlite3WhereOrderByLimitOptLabel(WhereInfo *pWInfo){
}
pInner = &pWInfo->a[pWInfo->nLevel-1];
assert( pInner->addrNxt!=0 );
- return pInner->addrNxt;
+ return pInner->pRJ ? pWInfo->iContinue : pInner->addrNxt;
}
/*
@@ -152316,6 +155891,30 @@ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet *pMaskSet, int iCursor){
return 0;
}
+/* Allocate memory that is automatically freed when pWInfo is freed.
+*/
+SQLITE_PRIVATE void *sqlite3WhereMalloc(WhereInfo *pWInfo, u64 nByte){
+ WhereMemBlock *pBlock;
+ pBlock = sqlite3DbMallocRawNN(pWInfo->pParse->db, nByte+sizeof(*pBlock));
+ if( pBlock ){
+ pBlock->pNext = pWInfo->pMemToFree;
+ pBlock->sz = nByte;
+ pWInfo->pMemToFree = pBlock;
+ pBlock++;
+ }
+ return (void*)pBlock;
+}
+SQLITE_PRIVATE void *sqlite3WhereRealloc(WhereInfo *pWInfo, void *pOld, u64 nByte){
+ void *pNew = sqlite3WhereMalloc(pWInfo, nByte);
+ if( pNew && pOld ){
+ WhereMemBlock *pOldBlk = (WhereMemBlock*)pOld;
+ pOldBlk--;
+ assert( pOldBlk->szsz);
+ }
+ return pNew;
+}
+
/*
** Create a new mask for cursor iCursor.
**
@@ -152369,7 +155968,7 @@ static WhereTerm *whereScanNext(WhereScan *pScan){
&& (iColumn!=XN_EXPR
|| sqlite3ExprCompareSkip(pTerm->pExpr->pLeft,
pScan->pIdxExpr,iCur)==0)
- && (pScan->iEquiv<=1 || !ExprHasProperty(pTerm->pExpr, EP_FromJoin))
+ && (pScan->iEquiv<=1 || !ExprHasProperty(pTerm->pExpr, EP_OuterON))
){
if( (pTerm->eOperator & WO_EQUIV)!=0
&& pScan->nEquivaiCur)
@@ -152721,6 +156320,7 @@ static void translateColumnToCopy(
pOp->p1 = pOp->p2 + iRegister;
pOp->p2 = pOp->p3;
pOp->p3 = 0;
+ pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */
}else if( pOp->opcode==OP_Rowid ){
pOp->opcode = OP_Sequence;
pOp->p1 = iAutoidxCur;
@@ -152781,6 +156381,43 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){
#define whereTraceIndexInfoOutputs(A)
#endif
+/*
+** We know that pSrc is an operand of an outer join. Return true if
+** pTerm is a constraint that is compatible with that join.
+**
+** pTerm must be EP_OuterON if pSrc is the right operand of an
+** outer join. pTerm can be either EP_OuterON or EP_InnerON if pSrc
+** is the left operand of a RIGHT join.
+**
+** See https://sqlite.org/forum/forumpost/206d99a16dd9212f
+** for an example of a WHERE clause constraints that may not be used on
+** the right table of a RIGHT JOIN because the constraint implies a
+** not-NULL condition on the left table of the RIGHT JOIN.
+*/
+static int constraintCompatibleWithOuterJoin(
+ const WhereTerm *pTerm, /* WHERE clause term to check */
+ const SrcItem *pSrc /* Table we are trying to access */
+){
+ assert( (pSrc->fg.jointype&(JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 ); /* By caller */
+ testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LEFT );
+ testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LTORJ );
+ testcase( ExprHasProperty(pTerm->pExpr, EP_OuterON) )
+ testcase( ExprHasProperty(pTerm->pExpr, EP_InnerON) );
+ if( !ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON)
+ || pTerm->pExpr->w.iJoin != pSrc->iCursor
+ ){
+ return 0;
+ }
+ if( (pSrc->fg.jointype & (JT_LEFT|JT_RIGHT))!=0
+ && ExprHasProperty(pTerm->pExpr, EP_InnerON)
+ ){
+ return 0;
+ }
+ return 1;
+}
+
+
+
#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
/*
** Return TRUE if the WHERE clause term pTerm is of a form where it
@@ -152795,14 +156432,11 @@ static int termCanDriveIndex(
char aff;
if( pTerm->leftCursor!=pSrc->iCursor ) return 0;
if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0;
- if( (pSrc->fg.jointype & JT_LEFT)
- && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
- && (pTerm->eOperator & WO_IS)
+ assert( (pSrc->fg.jointype & JT_RIGHT)==0 );
+ if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0
+ && !constraintCompatibleWithOuterJoin(pTerm,pSrc)
){
- /* Cannot use an IS term from the WHERE clause as an index driver for
- ** the RHS of a LEFT JOIN. Such a term can only be used if it is from
- ** the ON clause. */
- return 0;
+ return 0; /* See https://sqlite.org/forum/forumpost/51e6959f61 */
}
if( (pTerm->prereqRight & notReady)!=0 ) return 0;
assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 );
@@ -153143,7 +156777,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
const SrcItem *pTabItem;
pLevel = &pWInfo->a[iLevel];
pTabItem = &pWInfo->pTabList->a[pLevel->iFrom];
- if( pTabItem->fg.jointype & JT_LEFT ) continue;
+ if( pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ) ) continue;
pLoop = pLevel->pWLoop;
if( NEVER(pLoop==0) ) continue;
if( pLoop->prereq & notReady ) continue;
@@ -153214,12 +156848,8 @@ static sqlite3_index_info *allocateIndexInfo(
assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 );
assert( pTerm->u.x.leftColumn>=XN_ROWID );
assert( pTerm->u.x.leftColumnnCol );
-
- /* tag-20191211-002: WHERE-clause constraints are not useful to the
- ** right-hand table of a LEFT JOIN. See tag-20191211-001 for the
- ** equivalent restriction for ordinary tables. */
- if( (pSrc->fg.jointype & JT_LEFT)!=0
- && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
+ if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0
+ && !constraintCompatibleWithOuterJoin(pTerm,pSrc)
){
continue;
}
@@ -153244,7 +156874,7 @@ static sqlite3_index_info *allocateIndexInfo(
}
/* Virtual tables are unable to deal with NULLS FIRST */
- if( pOrderBy->a[i].sortFlags & KEYINFO_ORDER_BIGNULL ) break;
+ if( pOrderBy->a[i].fg.sortFlags & KEYINFO_ORDER_BIGNULL ) break;
/* First case - a direct column references without a COLLATE operator */
if( pExpr->op==TK_COLUMN && pExpr->iTable==pSrc->iCursor ){
@@ -153274,8 +156904,10 @@ static sqlite3_index_info *allocateIndexInfo(
}
if( i==n ){
nOrderBy = n;
- if( (pWInfo->wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY)) ){
- eDistinct = 1 + ((pWInfo->wctrlFlags & WHERE_DISTINCTBY)!=0);
+ if( (pWInfo->wctrlFlags & WHERE_DISTINCTBY) ){
+ eDistinct = 2 + ((pWInfo->wctrlFlags & WHERE_SORTBYGROUP)!=0);
+ }else if( pWInfo->wctrlFlags & WHERE_GROUPBY ){
+ eDistinct = 1;
}
}
}
@@ -153354,7 +156986,7 @@ static sqlite3_index_info *allocateIndexInfo(
|| (pExpr->op==TK_COLLATE && pExpr->pLeft->op==TK_COLUMN
&& pExpr->iColumn==pExpr->pLeft->iColumn) );
pIdxOrderBy[j].iColumn = pExpr->iColumn;
- pIdxOrderBy[j].desc = pOrderBy->a[i].sortFlags & KEYINFO_ORDER_DESC;
+ pIdxOrderBy[j].desc = pOrderBy->a[i].fg.sortFlags & KEYINFO_ORDER_DESC;
j++;
}
pIdxInfo->nOrderBy = j;
@@ -153458,7 +157090,7 @@ static int whereKeyStats(
#endif
assert( pRec!=0 );
assert( pIdx->nSample>0 );
- assert( pRec->nField>0 && pRec->nField<=pIdx->nSampleCol );
+ assert( pRec->nField>0 );
/* Do a binary search to find the first sample greater than or equal
** to pRec. If pRec contains a single field, the set of samples to search
@@ -153504,7 +157136,7 @@ static int whereKeyStats(
** it is extended to two fields. The duplicates that this creates do not
** cause any problems.
*/
- nField = pRec->nField;
+ nField = MIN(pRec->nField, pIdx->nSample);
iCol = 0;
iSample = pIdx->nSample * nField;
do{
@@ -153592,7 +157224,7 @@ static int whereKeyStats(
** is larger than all samples in the array. */
tRowcnt iUpper, iGap;
if( i>=pIdx->nSample ){
- iUpper = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]);
+ iUpper = pIdx->nRowEst0;
}else{
iUpper = aSample[i].anLt[iCol];
}
@@ -154095,7 +157727,7 @@ SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm){
memcpy(zType, "....", 5);
if( pTerm->wtFlags & TERM_VIRTUAL ) zType[0] = 'V';
if( pTerm->eOperator & WO_EQUIV ) zType[1] = 'E';
- if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) zType[2] = 'L';
+ if( ExprHasProperty(pTerm->pExpr, EP_OuterON) ) zType[2] = 'L';
if( pTerm->wtFlags & TERM_CODED ) zType[3] = 'C';
if( pTerm->eOperator & WO_SINGLE ){
assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 );
@@ -154221,12 +157853,18 @@ static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){
}
/*
-** Deallocate internal memory used by a WhereLoop object
+** Deallocate internal memory used by a WhereLoop object. Leave the
+** object in an initialized state, as if it had been newly allocated.
*/
static void whereLoopClear(sqlite3 *db, WhereLoop *p){
- if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFreeNN(db, p->aLTerm);
+ if( p->aLTerm!=p->aLTermSpace ){
+ sqlite3DbFreeNN(db, p->aLTerm);
+ p->aLTerm = p->aLTermSpace;
+ p->nLSlot = ArraySize(p->aLTermSpace);
+ }
whereLoopClearUnion(db, p);
- whereLoopInit(p);
+ p->nLTerm = 0;
+ p->wsFlags = 0;
}
/*
@@ -154250,7 +157888,9 @@ static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){
*/
static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){
whereLoopClearUnion(db, pTo);
- if( whereLoopResize(db, pTo, pFrom->nLTerm) ){
+ if( pFrom->nLTerm > pTo->nLSlot
+ && whereLoopResize(db, pTo, pFrom->nLTerm)
+ ){
memset(pTo, 0, WHERE_LOOP_XFER_SZ);
return SQLITE_NOMEM_BKPT;
}
@@ -154268,42 +157908,29 @@ static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){
** Delete a WhereLoop object
*/
static void whereLoopDelete(sqlite3 *db, WhereLoop *p){
+ assert( db!=0 );
whereLoopClear(db, p);
- sqlite3DbFreeNN(db, p);
+ sqlite3DbNNFreeNN(db, p);
}
/*
** Free a WhereInfo structure
*/
static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){
- int i;
assert( pWInfo!=0 );
- for(i=0; inLevel; i++){
- WhereLevel *pLevel = &pWInfo->a[i];
- if( pLevel->pWLoop && (pLevel->pWLoop->wsFlags & WHERE_IN_ABLE)!=0 ){
- assert( (pLevel->pWLoop->wsFlags & WHERE_MULTI_OR)==0 );
- sqlite3DbFree(db, pLevel->u.in.aInLoop);
- }
- }
+ assert( db!=0 );
sqlite3WhereClauseClear(&pWInfo->sWC);
while( pWInfo->pLoops ){
WhereLoop *p = pWInfo->pLoops;
pWInfo->pLoops = p->pNextLoop;
whereLoopDelete(db, p);
}
- assert( pWInfo->pExprMods==0 );
- sqlite3DbFreeNN(db, pWInfo);
-}
-
-/* Undo all Expr node modifications
-*/
-static void whereUndoExprMods(WhereInfo *pWInfo){
- while( pWInfo->pExprMods ){
- WhereExprMod *p = pWInfo->pExprMods;
- pWInfo->pExprMods = p->pNext;
- memcpy(p->pExpr, &p->orig, sizeof(p->orig));
- sqlite3DbFree(pWInfo->pParse->db, p);
+ while( pWInfo->pMemToFree ){
+ WhereMemBlock *pNext = pWInfo->pMemToFree->pNext;
+ sqlite3DbNNFreeNN(db, pWInfo->pMemToFree);
+ pWInfo->pMemToFree = pNext;
}
+ sqlite3DbNNFreeNN(db, pWInfo);
}
/*
@@ -154660,10 +158287,11 @@ static void whereLoopOutputAdjust(
**
** 2022-03-24: Self-culling only applies if either the extra terms
** are straight comparison operators that are non-true with NULL
- ** operand, or if the loop is not a LEFT JOIN.
+ ** operand, or if the loop is not an OUTER JOIN.
*/
if( (pTerm->eOperator & 0x3f)!=0
- || (pWC->pWInfo->pTabList->a[pLoop->iTab].fg.jointype & JT_LEFT)==0
+ || (pWC->pWInfo->pTabList->a[pLoop->iTab].fg.jointype
+ & (JT_LEFT|JT_LTORJ))==0
){
pLoop->wsFlags |= WHERE_SELFCULL;
}
@@ -154869,15 +158497,11 @@ static int whereLoopAddBtreeIndex(
** to mix with a lower range bound from some other source */
if( pTerm->wtFlags & TERM_LIKEOPT && pTerm->eOperator==WO_LT ) continue;
- /* tag-20191211-001: Do not allow constraints from the WHERE clause to
- ** be used by the right table of a LEFT JOIN. Only constraints in the
- ** ON clause are allowed. See tag-20191211-002 for the vtab equivalent. */
- if( (pSrc->fg.jointype & JT_LEFT)!=0
- && !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
+ if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0
+ && !constraintCompatibleWithOuterJoin(pTerm,pSrc)
){
continue;
}
-
if( IsUniqueIndex(pProbe) && saved_nEq==pProbe->nKeyCol-1 ){
pBuilder->bldFlags1 |= SQLITE_BLDF1_UNIQUE;
}else{
@@ -154888,7 +158512,11 @@ static int whereLoopAddBtreeIndex(
pNew->u.btree.nBtm = saved_nBtm;
pNew->u.btree.nTop = saved_nTop;
pNew->nLTerm = saved_nLTerm;
- if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
+ if( pNew->nLTerm>=pNew->nLSlot
+ && whereLoopResize(db, pNew, pNew->nLTerm+1)
+ ){
+ break; /* OOM while trying to enlarge the pNew->aLTerm array */
+ }
pNew->aLTerm[pNew->nLTerm++] = pTerm;
pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf;
@@ -154981,38 +158609,39 @@ static int whereLoopAddBtreeIndex(
if( scan.iEquiv>1 ) pNew->wsFlags |= WHERE_TRANSCONS;
}else if( eOp & WO_ISNULL ){
pNew->wsFlags |= WHERE_COLUMN_NULL;
- }else if( eOp & (WO_GT|WO_GE) ){
- testcase( eOp & WO_GT );
- testcase( eOp & WO_GE );
- pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT;
- pNew->u.btree.nBtm = whereRangeVectorLen(
- pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm
- );
- pBtm = pTerm;
- pTop = 0;
- if( pTerm->wtFlags & TERM_LIKEOPT ){
- /* Range constraints that come from the LIKE optimization are
- ** always used in pairs. */
- pTop = &pTerm[1];
- assert( (pTop-(pTerm->pWC->a))pWC->nTerm );
- assert( pTop->wtFlags & TERM_LIKEOPT );
- assert( pTop->eOperator==WO_LT );
- if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
- pNew->aLTerm[pNew->nLTerm++] = pTop;
- pNew->wsFlags |= WHERE_TOP_LIMIT;
- pNew->u.btree.nTop = 1;
- }
- }else{
- assert( eOp & (WO_LT|WO_LE) );
- testcase( eOp & WO_LT );
- testcase( eOp & WO_LE );
- pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT;
- pNew->u.btree.nTop = whereRangeVectorLen(
+ }else{
+ int nVecLen = whereRangeVectorLen(
pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm
);
- pTop = pTerm;
- pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ?
- pNew->aLTerm[pNew->nLTerm-2] : 0;
+ if( eOp & (WO_GT|WO_GE) ){
+ testcase( eOp & WO_GT );
+ testcase( eOp & WO_GE );
+ pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT;
+ pNew->u.btree.nBtm = nVecLen;
+ pBtm = pTerm;
+ pTop = 0;
+ if( pTerm->wtFlags & TERM_LIKEOPT ){
+ /* Range constraints that come from the LIKE optimization are
+ ** always used in pairs. */
+ pTop = &pTerm[1];
+ assert( (pTop-(pTerm->pWC->a))pWC->nTerm );
+ assert( pTop->wtFlags & TERM_LIKEOPT );
+ assert( pTop->eOperator==WO_LT );
+ if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */
+ pNew->aLTerm[pNew->nLTerm++] = pTop;
+ pNew->wsFlags |= WHERE_TOP_LIMIT;
+ pNew->u.btree.nTop = 1;
+ }
+ }else{
+ assert( eOp & (WO_LT|WO_LE) );
+ testcase( eOp & WO_LT );
+ testcase( eOp & WO_LE );
+ pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT;
+ pNew->u.btree.nTop = nVecLen;
+ pTop = pTerm;
+ pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ?
+ pNew->aLTerm[pNew->nLTerm-2] : 0;
+ }
}
/* At this point pNew->nOut is set to the number of rows expected to
@@ -155227,23 +158856,26 @@ static int indexMightHelpWithOrderBy(
*/
static int whereUsablePartialIndex(
int iTab, /* The table for which we want an index */
- int isLeft, /* True if iTab is the right table of a LEFT JOIN */
+ u8 jointype, /* The JT_* flags on the join */
WhereClause *pWC, /* The WHERE clause of the query */
Expr *pWhere /* The WHERE clause from the partial index */
){
int i;
WhereTerm *pTerm;
- Parse *pParse = pWC->pWInfo->pParse;
+ Parse *pParse;
+
+ if( jointype & JT_LTORJ ) return 0;
+ pParse = pWC->pWInfo->pParse;
while( pWhere->op==TK_AND ){
- if( !whereUsablePartialIndex(iTab,isLeft,pWC,pWhere->pLeft) ) return 0;
+ if( !whereUsablePartialIndex(iTab,jointype,pWC,pWhere->pLeft) ) return 0;
pWhere = pWhere->pRight;
}
if( pParse->db->flags & SQLITE_EnableQPSG ) pParse = 0;
for(i=0, pTerm=pWC->a; inTerm; i++, pTerm++){
Expr *pExpr;
pExpr = pTerm->pExpr;
- if( (!ExprHasProperty(pExpr, EP_FromJoin) || pExpr->w.iRightJoinTable==iTab)
- && (isLeft==0 || ExprHasProperty(pExpr, EP_FromJoin))
+ if( (!ExprHasProperty(pExpr, EP_OuterON) || pExpr->w.iJoin==iTab)
+ && ((jointype & JT_OUTER)==0 || ExprHasProperty(pExpr, EP_OuterON))
&& sqlite3ExprImpliesExpr(pParse, pExpr, pWhere, iTab)
&& (pTerm->wtFlags & TERM_VNULL)==0
){
@@ -155253,6 +158885,94 @@ static int whereUsablePartialIndex(
return 0;
}
+/*
+** Structure passed to the whereIsCoveringIndex Walker callback.
+*/
+struct CoveringIndexCheck {
+ Index *pIdx; /* The index */
+ int iTabCur; /* Cursor number for the corresponding table */
+};
+
+/*
+** Information passed in is pWalk->u.pCovIdxCk. Call is pCk.
+**
+** If the Expr node references the table with cursor pCk->iTabCur, then
+** make sure that column is covered by the index pCk->pIdx. We know that
+** all columns less than 63 (really BMS-1) are covered, so we don't need
+** to check them. But we do need to check any column at 63 or greater.
+**
+** If the index does not cover the column, then set pWalk->eCode to
+** non-zero and return WRC_Abort to stop the search.
+**
+** If this node does not disprove that the index can be a covering index,
+** then just return WRC_Continue, to continue the search.
+*/
+static int whereIsCoveringIndexWalkCallback(Walker *pWalk, Expr *pExpr){
+ int i; /* Loop counter */
+ const Index *pIdx; /* The index of interest */
+ const i16 *aiColumn; /* Columns contained in the index */
+ u16 nColumn; /* Number of columns in the index */
+ if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_AGG_COLUMN ) return WRC_Continue;
+ if( pExpr->iColumn<(BMS-1) ) return WRC_Continue;
+ if( pExpr->iTable!=pWalk->u.pCovIdxCk->iTabCur ) return WRC_Continue;
+ pIdx = pWalk->u.pCovIdxCk->pIdx;
+ aiColumn = pIdx->aiColumn;
+ nColumn = pIdx->nColumn;
+ for(i=0; iiColumn ) return WRC_Continue;
+ }
+ pWalk->eCode = 1;
+ return WRC_Abort;
+}
+
+
+/*
+** pIdx is an index that covers all of the low-number columns used by
+** pWInfo->pSelect (columns from 0 through 62). But there are columns
+** in pWInfo->pSelect beyond 62. This routine tries to answer the question
+** of whether pIdx covers *all* columns in the query.
+**
+** Return 0 if pIdx is a covering index. Return non-zero if pIdx is
+** not a covering index or if we are unable to determine if pIdx is a
+** covering index.
+**
+** This routine is an optimization. It is always safe to return non-zero.
+** But returning zero when non-zero should have been returned can lead to
+** incorrect bytecode and assertion faults.
+*/
+static SQLITE_NOINLINE u32 whereIsCoveringIndex(
+ WhereInfo *pWInfo, /* The WHERE clause context */
+ Index *pIdx, /* Index that is being tested */
+ int iTabCur /* Cursor for the table being indexed */
+){
+ int i;
+ struct CoveringIndexCheck ck;
+ Walker w;
+ if( pWInfo->pSelect==0 ){
+ /* We don't have access to the full query, so we cannot check to see
+ ** if pIdx is covering. Assume it is not. */
+ return 1;
+ }
+ for(i=0; inColumn; i++){
+ if( pIdx->aiColumn[i]>=BMS-1 ) break;
+ }
+ if( i>=pIdx->nColumn ){
+ /* pIdx does not index any columns greater than 62, but we know from
+ ** colMask that columns greater than 62 are used, so this is not a
+ ** covering index */
+ return 1;
+ }
+ ck.pIdx = pIdx;
+ ck.iTabCur = iTabCur;
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = whereIsCoveringIndexWalkCallback;
+ w.xSelectCallback = sqlite3SelectWalkNoop;
+ w.u.pCovIdxCk = &ck;
+ w.eCode = 0;
+ sqlite3WalkSelect(&w, pWInfo->pSelect);
+ return w.eCode;
+}
+
/*
** Add all WhereLoop objects for a single table of the join where the table
** is identified by pBuilder->pNew->iTab. That table is guaranteed to be
@@ -155352,13 +159072,14 @@ static int whereLoopAddBtree(
#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
/* Automatic indexes */
if( !pBuilder->pOrSet /* Not part of an OR optimization */
- && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0
+ && (pWInfo->wctrlFlags & (WHERE_RIGHT_JOIN|WHERE_OR_SUBCLAUSE))==0
&& (pWInfo->pParse->db->flags & SQLITE_AutoIndex)!=0
&& !pSrc->fg.isIndexedBy /* Has no INDEXED BY clause */
&& !pSrc->fg.notIndexed /* Has no NOT INDEXED clause */
&& HasRowid(pTab) /* Not WITHOUT ROWID table. (FIXME: Why not?) */
&& !pSrc->fg.isCorrelated /* Not a correlated subquery */
&& !pSrc->fg.isRecursive /* Not a recursive common table expression. */
+ && (pSrc->fg.jointype & JT_RIGHT)==0 /* Not the right tab of a RIGHT JOIN */
){
/* Generate auto-index WhereLoops */
LogEst rLogSize; /* Logarithm of the number of rows in the table */
@@ -155408,9 +159129,8 @@ static int whereLoopAddBtree(
for(; rc==SQLITE_OK && pProbe;
pProbe=(pSrc->fg.isIndexedBy ? 0 : pProbe->pNext), iSortIdx++
){
- int isLeft = (pSrc->fg.jointype & JT_OUTER)!=0;
if( pProbe->pPartIdxWhere!=0
- && !whereUsablePartialIndex(pSrc->iCursor, isLeft, pWC,
+ && !whereUsablePartialIndex(pSrc->iCursor, pSrc->fg.jointype, pWC,
pProbe->pPartIdxWhere)
){
testcase( pNew->iTab!=pSrc->iCursor ); /* See ticket [98d973b8f5] */
@@ -155455,6 +159175,9 @@ static int whereLoopAddBtree(
#else
pNew->rRun = rSize + 16;
#endif
+ if( IsView(pTab) || (pTab->tabFlags & TF_Ephemeral)!=0 ){
+ pNew->wsFlags |= WHERE_VIEWSCAN;
+ }
ApplyCostMultiplier(pNew->rRun, pTab->costMult);
whereLoopOutputAdjust(pWC, pNew, rSize);
rc = whereLoopInsert(pBuilder, pNew);
@@ -155467,6 +159190,9 @@ static int whereLoopAddBtree(
m = 0;
}else{
m = pSrc->colUsed & pProbe->colNotIdxed;
+ if( m==TOPBIT ){
+ m = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor);
+ }
pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED;
}
@@ -155518,7 +159244,14 @@ static int whereLoopAddBtree(
}
ApplyCostMultiplier(pNew->rRun, pTab->costMult);
whereLoopOutputAdjust(pWC, pNew, rSize);
- rc = whereLoopInsert(pBuilder, pNew);
+ if( (pSrc->fg.jointype & JT_RIGHT)!=0 && pProbe->aColExpr ){
+ /* Do not do an SCAN of a index-on-expression in a RIGHT JOIN
+ ** because the cursor used to access the index might not be
+ ** positioned to the correct row during the right-join no-match
+ ** loop. */
+ }else{
+ rc = whereLoopInsert(pBuilder, pNew);
+ }
pNew->nOut = rSize;
if( rc ) break;
}
@@ -155693,6 +159426,7 @@ static int whereLoopAddVirtualOne(
*pbIn = 1; assert( (mExclude & WO_IN)==0 );
}
+ assert( pbRetryLimit || !isLimitTerm(pTerm) );
if( isLimitTerm(pTerm) && *pbIn ){
/* If there is an IN(...) term handled as an == (separate call to
** xFilter for each value on the RHS of the IN) and a LIMIT or
@@ -155840,9 +159574,7 @@ SQLITE_API int sqlite3_vtab_rhs_value(
*/
SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){
HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1];
- assert( pHidden->eDistinct==0
- || pHidden->eDistinct==1
- || pHidden->eDistinct==2 );
+ assert( pHidden->eDistinct>=0 && pHidden->eDistinct<=3 );
return pHidden->eDistinct;
}
@@ -155850,15 +159582,26 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){
&& !defined(SQLITE_OMIT_VIRTUALTABLE)
/*
** Cause the prepared statement that is associated with a call to
-** xBestIndex to open write transactions on all attached schemas.
+** xBestIndex to potentiall use all schemas. If the statement being
+** prepared is read-only, then just start read transactions on all
+** schemas. But if this is a write operation, start writes on all
+** schemas.
+**
** This is used by the (built-in) sqlite_dbpage virtual table.
*/
-SQLITE_PRIVATE void sqlite3VtabWriteAll(sqlite3_index_info *pIdxInfo){
+SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info *pIdxInfo){
HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1];
Parse *pParse = pHidden->pParse;
int nDb = pParse->db->nDb;
int i;
- for(i=0; iwriteMask ){
+ for(i=0; ipTabList->a + pNew->iTab;
iCur = pItem->iCursor;
+ /* The multi-index OR optimization does not work for RIGHT and FULL JOIN */
+ if( pItem->fg.jointype & JT_RIGHT ) return SQLITE_OK;
+
for(pTerm=pWC->a; pTermeOperator & WO_OR)!=0
&& (pTerm->u.pOrInfo->indexable & pNew->maskSelf)!=0
@@ -156154,29 +159900,50 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){
SrcItem *pEnd = &pTabList->a[pWInfo->nLevel];
sqlite3 *db = pWInfo->pParse->db;
int rc = SQLITE_OK;
+ int bFirstPastRJ = 0;
+ int hasRightJoin = 0;
WhereLoop *pNew;
+
/* Loop over the tables in the join, from left to right */
pNew = pBuilder->pNew;
- whereLoopInit(pNew);
+
+ /* Verify that pNew has already been initialized */
+ assert( pNew->nLTerm==0 );
+ assert( pNew->wsFlags==0 );
+ assert( pNew->nLSlot>=ArraySize(pNew->aLTermSpace) );
+ assert( pNew->aLTerm!=0 );
+
pBuilder->iPlanLimit = SQLITE_QUERY_PLANNER_LIMIT;
for(iTab=0, pItem=pTabList->a; pItemiTab = iTab;
pBuilder->iPlanLimit += SQLITE_QUERY_PLANNER_LIMIT_INCR;
pNew->maskSelf = sqlite3WhereGetMask(&pWInfo->sMaskSet, pItem->iCursor);
- if( (pItem->fg.jointype & (JT_LEFT|JT_CROSS))!=0 ){
- /* This condition is true when pItem is the FROM clause term on the
- ** right-hand-side of a LEFT or CROSS JOIN. */
- mPrereq = mPrior;
- }else{
+ if( bFirstPastRJ
+ || (pItem->fg.jointype & (JT_OUTER|JT_CROSS|JT_LTORJ))!=0
+ ){
+ /* Add prerequisites to prevent reordering of FROM clause terms
+ ** across CROSS joins and outer joins. The bFirstPastRJ boolean
+ ** prevents the right operand of a RIGHT JOIN from being swapped with
+ ** other elements even further to the right.
+ **
+ ** The JT_LTORJ case and the hasRightJoin flag work together to
+ ** prevent FROM-clause terms from moving from the right side of
+ ** a LEFT JOIN over to the left side of that join if the LEFT JOIN
+ ** is itself on the left side of a RIGHT JOIN.
+ */
+ if( pItem->fg.jointype & JT_LTORJ ) hasRightJoin = 1;
+ mPrereq |= mPrior;
+ bFirstPastRJ = (pItem->fg.jointype & JT_RIGHT)!=0;
+ }else if( !hasRightJoin ){
mPrereq = 0;
}
#ifndef SQLITE_OMIT_VIRTUALTABLE
if( IsVirtual(pItem->pTab) ){
SrcItem *p;
for(p=&pItem[1]; pfg.jointype & (JT_LEFT|JT_CROSS)) ){
+ if( mUnusable || (p->fg.jointype & (JT_OUTER|JT_CROSS)) ){
mUnusable |= sqlite3WhereGetMask(&pWInfo->sMaskSet, p->iCursor);
}
}
@@ -156301,7 +160068,9 @@ static i8 wherePathSatisfiesOrderBy(
pLoop = pLast;
}
if( pLoop->wsFlags & WHERE_VIRTUALTABLE ){
- if( pLoop->u.vtab.isOrdered && (wctrlFlags & WHERE_DISTINCTBY)==0 ){
+ if( pLoop->u.vtab.isOrdered
+ && ((wctrlFlags&(WHERE_DISTINCTBY|WHERE_SORTBYGROUP))!=WHERE_DISTINCTBY)
+ ){
obSat = obDone;
}
break;
@@ -156479,16 +160248,18 @@ static i8 wherePathSatisfiesOrderBy(
/* Make sure the sort order is compatible in an ORDER BY clause.
** Sort order is irrelevant for a GROUP BY clause. */
if( revSet ){
- if( (rev ^ revIdx)!=(pOrderBy->a[i].sortFlags&KEYINFO_ORDER_DESC) ){
+ if( (rev ^ revIdx)
+ != (pOrderBy->a[i].fg.sortFlags&KEYINFO_ORDER_DESC)
+ ){
isMatch = 0;
}
}else{
- rev = revIdx ^ (pOrderBy->a[i].sortFlags & KEYINFO_ORDER_DESC);
+ rev = revIdx ^ (pOrderBy->a[i].fg.sortFlags & KEYINFO_ORDER_DESC);
if( rev ) *pRevMask |= MASKBIT(iLoop);
revSet = 1;
}
}
- if( isMatch && (pOrderBy->a[i].sortFlags & KEYINFO_ORDER_BIGNULL) ){
+ if( isMatch && (pOrderBy->a[i].fg.sortFlags & KEYINFO_ORDER_BIGNULL) ){
if( j==pLoop->u.btree.nEq ){
pLoop->wsFlags |= WHERE_BIGNULL_SORT;
}else{
@@ -156568,7 +160339,7 @@ static i8 wherePathSatisfiesOrderBy(
** SELECT * FROM t1 GROUP BY y,x ORDER BY y,x; -- IsSorted()==0
*/
SQLITE_PRIVATE int sqlite3WhereIsSorted(WhereInfo *pWInfo){
- assert( pWInfo->wctrlFlags & WHERE_GROUPBY );
+ assert( pWInfo->wctrlFlags & (WHERE_GROUPBY|WHERE_DISTINCTBY) );
assert( pWInfo->wctrlFlags & WHERE_SORTBYGROUP );
return pWInfo->sorted;
}
@@ -156647,7 +160418,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
int mxChoice; /* Maximum number of simultaneous paths tracked */
int nLoop; /* Number of terms in the join */
Parse *pParse; /* Parsing context */
- sqlite3 *db; /* The database connection */
int iLoop; /* Loop counter over the terms of the join */
int ii, jj; /* Loop counters */
int mxI = 0; /* Index of next entry to replace */
@@ -156666,7 +160436,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
int nSpace; /* Bytes of space allocated at pSpace */
pParse = pWInfo->pParse;
- db = pParse->db;
nLoop = pWInfo->nLevel;
/* TUNING: For simple queries, only the best path is tracked.
** For 2-way joins, the 5 best paths are followed.
@@ -156689,7 +160458,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
/* Allocate and initialize space for aTo, aFrom and aSortCost[] */
nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2;
nSpace += sizeof(LogEst) * nOrderBy;
- pSpace = sqlite3DbMallocRawNN(db, nSpace);
+ pSpace = sqlite3StackAllocRawNN(pParse->db, nSpace);
if( pSpace==0 ) return SQLITE_NOMEM_BKPT;
aTo = (WherePath*)pSpace;
aFrom = aTo+mxChoice;
@@ -156739,9 +160508,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
LogEst nOut; /* Rows visited by (pFrom+pWLoop) */
LogEst rCost; /* Cost of path (pFrom+pWLoop) */
LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */
- i8 isOrdered = pFrom->isOrdered; /* isOrdered for (pFrom+pWLoop) */
+ i8 isOrdered; /* isOrdered for (pFrom+pWLoop) */
Bitmask maskNew; /* Mask of src visited by (..) */
- Bitmask revMask = 0; /* Mask of rev-order loops for (..) */
+ Bitmask revMask; /* Mask of rev-order loops for (..) */
if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue;
if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue;
@@ -156760,7 +160529,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted);
nOut = pFrom->nRow + pWLoop->nOut;
maskNew = pFrom->maskLoop | pWLoop->maskSelf;
+ isOrdered = pFrom->isOrdered;
if( isOrdered<0 ){
+ revMask = 0;
isOrdered = wherePathSatisfiesOrderBy(pWInfo,
pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags,
iLoop, pWLoop, &revMask);
@@ -156788,6 +160559,13 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
rUnsorted -= 2; /* TUNING: Slight bias in favor of no-sort plans */
}
+ /* TUNING: A full-scan of a VIEW or subquery in the outer loop
+ ** is not so bad. */
+ if( iLoop==0 && (pWLoop->wsFlags & WHERE_VIEWSCAN)!=0 ){
+ rCost += -10;
+ nOut += -30;
+ }
+
/* Check to see if pWLoop should be added to the set of
** mxChoice best-so-far paths.
**
@@ -156938,7 +160716,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
if( nFrom==0 ){
sqlite3ErrorMsg(pParse, "no query solution");
- sqlite3DbFreeNN(db, pSpace);
+ sqlite3StackFreeNN(pParse->db, pSpace);
return SQLITE_ERROR;
}
@@ -156969,12 +160747,12 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
}
pWInfo->bOrderedInnerLoop = 0;
if( pWInfo->pOrderBy ){
+ pWInfo->nOBSat = pFrom->isOrdered;
if( pWInfo->wctrlFlags & WHERE_DISTINCTBY ){
if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){
pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
}
}else{
- pWInfo->nOBSat = pFrom->isOrdered;
pWInfo->revMask = pFrom->revLoop;
if( pWInfo->nOBSat<=0 ){
pWInfo->nOBSat = 0;
@@ -157020,7 +160798,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
pWInfo->nRowOut = pFrom->nRow;
/* Free temporary memory and return success */
- sqlite3DbFreeNN(db, pSpace);
+ sqlite3StackFreeNN(pParse->db, pSpace);
return SQLITE_OK;
}
@@ -157053,7 +160831,11 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){
pItem = pWInfo->pTabList->a;
pTab = pItem->pTab;
if( IsVirtual(pTab) ) return 0;
- if( pItem->fg.isIndexedBy ) return 0;
+ if( pItem->fg.isIndexedBy || pItem->fg.notIndexed ){
+ testcase( pItem->fg.isIndexedBy );
+ testcase( pItem->fg.notIndexed );
+ return 0;
+ }
iCur = pItem->iCursor;
pWC = &pWInfo->sWC;
pLoop = pBuilder->pNew;
@@ -157226,7 +161008,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin(
WhereLoop *pLoop;
pLoop = pWInfo->a[i].pWLoop;
pItem = &pWInfo->pTabList->a[pLoop->iTab];
- if( (pItem->fg.jointype & JT_LEFT)==0 ) continue;
+ if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))!=JT_LEFT ) continue;
if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)==0
&& (pLoop->wsFlags & WHERE_ONEROW)==0
){
@@ -157236,8 +161018,8 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin(
pEnd = pWInfo->sWC.a + pWInfo->sWC.nTerm;
for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){
- if( !ExprHasProperty(pTerm->pExpr, EP_FromJoin)
- || pTerm->pExpr->w.iRightJoinTable!=pItem->iCursor
+ if( !ExprHasProperty(pTerm->pExpr, EP_OuterON)
+ || pTerm->pExpr->w.iJoin!=pItem->iCursor
){
break;
}
@@ -157315,6 +161097,77 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful(
}
}
+/*
+** This is an sqlite3ParserAddCleanup() callback that is invoked to
+** free the Parse->pIdxExpr list when the Parse object is destroyed.
+*/
+static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
+ Parse *pParse = (Parse*)pObject;
+ while( pParse->pIdxExpr!=0 ){
+ IndexedExpr *p = pParse->pIdxExpr;
+ pParse->pIdxExpr = p->pIENext;
+ sqlite3ExprDelete(db, p->pExpr);
+ sqlite3DbFreeNN(db, p);
+ }
+}
+
+/*
+** The index pIdx is used by a query and contains one or more expressions.
+** In other words pIdx is an index on an expression. iIdxCur is the cursor
+** number for the index and iDataCur is the cursor number for the corresponding
+** table.
+**
+** This routine adds IndexedExpr entries to the Parse->pIdxExpr field for
+** each of the expressions in the index so that the expression code generator
+** will know to replace occurrences of the indexed expression with
+** references to the corresponding column of the index.
+*/
+static SQLITE_NOINLINE void whereAddIndexedExpr(
+ Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxExpr */
+ Index *pIdx, /* The index-on-expression that contains the expressions */
+ int iIdxCur, /* Cursor number for pIdx */
+ SrcItem *pTabItem /* The FROM clause entry for the table */
+){
+ int i;
+ IndexedExpr *p;
+ Table *pTab;
+ assert( pIdx->bHasExpr );
+ pTab = pIdx->pTable;
+ for(i=0; inColumn; i++){
+ Expr *pExpr;
+ int j = pIdx->aiColumn[i];
+ int bMaybeNullRow;
+ if( j==XN_EXPR ){
+ pExpr = pIdx->aColExpr->a[i].pExpr;
+ testcase( pTabItem->fg.jointype & JT_LEFT );
+ testcase( pTabItem->fg.jointype & JT_RIGHT );
+ testcase( pTabItem->fg.jointype & JT_LTORJ );
+ bMaybeNullRow = (pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0;
+ }else if( j>=0 && (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)!=0 ){
+ pExpr = sqlite3ColumnExpr(pTab, &pTab->aCol[j]);
+ bMaybeNullRow = 0;
+ }else{
+ continue;
+ }
+ if( sqlite3ExprIsConstant(pExpr) ) continue;
+ p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr));
+ if( p==0 ) break;
+ p->pIENext = pParse->pIdxExpr;
+ p->pExpr = sqlite3ExprDup(pParse->db, pExpr, 0);
+ p->iDataCur = pTabItem->iCursor;
+ p->iIdxCur = iIdxCur;
+ p->iIdxCol = i;
+ p->bMaybeNullRow = bMaybeNullRow;
+#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
+ p->zIdxName = pIdx->zName;
+#endif
+ pParse->pIdxExpr = p;
+ if( p->pIENext==0 ){
+ sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pParse);
+ }
+ }
+}
+
/*
** Generate the beginning of the loop used for WHERE clause processing.
** The return value is a pointer to an opaque structure that contains
@@ -157409,7 +161262,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
Expr *pWhere, /* The WHERE clause */
ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */
ExprList *pResultSet, /* Query result set. Req'd for DISTINCT */
- Select *pLimit, /* Use this LIMIT/OFFSET clause, if any */
+ Select *pSelect, /* The entire SELECT statement */
u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */
int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number
** If WHERE_USE_LIMIT, then the limit amount */
@@ -157468,7 +161321,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
** field (type Bitmask) it must be aligned on an 8-byte boundary on
** some architectures. Hence the ROUND8() below.
*/
- nByteWInfo = ROUND8(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
+ nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel));
pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop));
if( db->mallocFailed ){
sqlite3DbFree(db, pWInfo);
@@ -157478,7 +161331,9 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
pWInfo->pParse = pParse;
pWInfo->pTabList = pTabList;
pWInfo->pOrderBy = pOrderBy;
+#if WHERETRACE_ENABLED
pWInfo->pWhere = pWhere;
+#endif
pWInfo->pResultSet = pResultSet;
pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1;
pWInfo->nLevel = nTabList;
@@ -157486,9 +161341,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
pWInfo->wctrlFlags = wctrlFlags;
pWInfo->iLimit = iAuxArg;
pWInfo->savedNQueryLoop = pParse->nQueryLoop;
-#ifndef SQLITE_OMIT_VIRTUALTABLE
- pWInfo->pLimit = pLimit;
-#endif
+ pWInfo->pSelect = pSelect;
memset(&pWInfo->nOBSat, 0,
offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat));
memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel));
@@ -157557,8 +161410,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* Analyze all of the subexpressions. */
sqlite3WhereExprAnalyze(pTabList, &pWInfo->sWC);
- sqlite3WhereAddLimit(&pWInfo->sWC, pLimit);
- if( db->mallocFailed ) goto whereBeginError;
+ if( pSelect && pSelect->pLimit ){
+ sqlite3WhereAddLimit(&pWInfo->sWC, pSelect);
+ }
+ if( pParse->nErr ) goto whereBeginError;
/* Special case: WHERE terms that do not refer to any tables in the join
** (constant expressions). Evaluate each such term, and jump over all the
@@ -157790,8 +161645,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* noop */
}else
#endif
- if( (pLoop->wsFlags & WHERE_IDX_ONLY)==0
- && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){
+ if( ((pLoop->wsFlags & WHERE_IDX_ONLY)==0
+ && (wctrlFlags & WHERE_OR_SUBCLAUSE)==0)
+ || (pTabItem->fg.jointype & (JT_LTORJ|JT_RIGHT))!=0
+ ){
int op = OP_OpenRead;
if( pWInfo->eOnePass!=ONEPASS_OFF ){
op = OP_OpenWrite;
@@ -157858,8 +161715,12 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
op = OP_ReopenIdx;
}else{
iIndexCur = pParse->nTab++;
+ if( pIx->bHasExpr && OptimizationEnabled(db, SQLITE_IndexedExpr) ){
+ whereAddIndexedExpr(pParse, pIx, iIndexCur, pTabItem);
+ }
}
pLevel->iIdxCur = iIndexCur;
+ assert( pIx!=0 );
assert( pIx->pSchema==pTab->pSchema );
assert( iIndexCur>=0 );
if( op ){
@@ -157893,6 +161754,37 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
}
}
if( iDb>=0 ) sqlite3CodeVerifySchema(pParse, iDb);
+ if( (pTabItem->fg.jointype & JT_RIGHT)!=0
+ && (pLevel->pRJ = sqlite3WhereMalloc(pWInfo, sizeof(WhereRightJoin)))!=0
+ ){
+ WhereRightJoin *pRJ = pLevel->pRJ;
+ pRJ->iMatch = pParse->nTab++;
+ pRJ->regBloom = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Blob, 65536, pRJ->regBloom);
+ pRJ->regReturn = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Null, 0, pRJ->regReturn);
+ assert( pTab==pTabItem->pTab );
+ if( HasRowid(pTab) ){
+ KeyInfo *pInfo;
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRJ->iMatch, 1);
+ pInfo = sqlite3KeyInfoAlloc(pParse->db, 1, 0);
+ if( pInfo ){
+ pInfo->aColl[0] = 0;
+ pInfo->aSortFlags[0] = 0;
+ sqlite3VdbeAppendP4(v, pInfo, P4_KEYINFO);
+ }
+ }else{
+ Index *pPk = sqlite3PrimaryKeyIndex(pTab);
+ sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRJ->iMatch, pPk->nKeyCol);
+ sqlite3VdbeSetP4KeyInfo(pParse, pPk);
+ }
+ pLoop->wsFlags &= ~WHERE_IDX_ONLY;
+ /* The nature of RIGHT JOIN processing is such that it messes up
+ ** the output order. So omit any ORDER BY/GROUP BY elimination
+ ** optimizations. We need to do an actual sort for RIGHT JOIN. */
+ pWInfo->nOBSat = 0;
+ pWInfo->eDistinct = WHERE_DISTINCT_UNORDERED;
+ }
}
pWInfo->iTop = sqlite3VdbeCurrentAddr(v);
if( db->mallocFailed ) goto whereBeginError;
@@ -157904,9 +161796,20 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
for(ii=0; iinErr ) goto whereBeginError;
pLevel = &pWInfo->a[ii];
wsFlags = pLevel->pWLoop->wsFlags;
+ pSrc = &pTabList->a[pLevel->iFrom];
+ if( pSrc->fg.isMaterialized ){
+ if( pSrc->fg.isCorrelated ){
+ sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub);
+ }else{
+ int iOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ sqlite3VdbeAddOp2(v, OP_Gosub, pSrc->regReturn, pSrc->addrFillSub);
+ sqlite3VdbeJumpHere(v, iOnce);
+ }
+ }
if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){
if( (wsFlags & WHERE_AUTO_INDEX)!=0 ){
#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
@@ -157937,8 +161840,6 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* Jump here if malloc fails */
whereBeginError:
if( pWInfo ){
- testcase( pWInfo->pExprMods!=0 );
- whereUndoExprMods(pWInfo);
pParse->nQueryLoop = pWInfo->savedNQueryLoop;
whereInfoFree(db, pWInfo);
}
@@ -157998,6 +161899,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
SrcList *pTabList = pWInfo->pTabList;
sqlite3 *db = pParse->db;
int iEnd = sqlite3VdbeCurrentAddr(v);
+ int nRJ = 0;
/* Generate loop termination code.
*/
@@ -158005,6 +161907,17 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
for(i=pWInfo->nLevel-1; i>=0; i--){
int addr;
pLevel = &pWInfo->a[i];
+ if( pLevel->pRJ ){
+ /* Terminate the subroutine that forms the interior of the loop of
+ ** the RIGHT JOIN table */
+ WhereRightJoin *pRJ = pLevel->pRJ;
+ sqlite3VdbeResolveLabel(v, pLevel->addrCont);
+ pLevel->addrCont = 0;
+ pRJ->endSubrtn = sqlite3VdbeCurrentAddr(v);
+ sqlite3VdbeAddOp3(v, OP_Return, pRJ->regReturn, pRJ->addrSubrtn, 1);
+ VdbeCoverage(v);
+ nRJ++;
+ }
pLoop = pLevel->pWLoop;
if( pLevel->op!=OP_Noop ){
#ifndef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
@@ -158032,7 +161945,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}
#endif /* SQLITE_DISABLE_SKIPAHEAD_DISTINCT */
/* The common case: Advance to the next row */
- sqlite3VdbeResolveLabel(v, pLevel->addrCont);
+ if( pLevel->addrCont ) sqlite3VdbeResolveLabel(v, pLevel->addrCont);
sqlite3VdbeAddOp3(v, pLevel->op, pLevel->p1, pLevel->p2, pLevel->p3);
sqlite3VdbeChangeP5(v, pLevel->p5);
VdbeCoverage(v);
@@ -158047,7 +161960,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
#ifndef SQLITE_DISABLE_SKIPAHEAD_DISTINCT
if( addrSeek ) sqlite3VdbeJumpHere(v, addrSeek);
#endif
- }else{
+ }else if( pLevel->addrCont ){
sqlite3VdbeResolveLabel(v, pLevel->addrCont);
}
if( (pLoop->wsFlags & WHERE_IN_ABLE)!=0 && pLevel->u.in.nIn>0 ){
@@ -158097,6 +162010,10 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}
}
sqlite3VdbeResolveLabel(v, pLevel->addrBrk);
+ if( pLevel->pRJ ){
+ sqlite3VdbeAddOp3(v, OP_Return, pLevel->pRJ->regReturn, 0, 1);
+ VdbeCoverage(v);
+ }
if( pLevel->addrSkip ){
sqlite3VdbeGoto(v, pLevel->addrSkip);
VdbeComment((v, "next skip-scan on %s", pLoop->u.btree.pIndex->zName));
@@ -158140,11 +162057,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
pWInfo->pTabList->a[pLevel->iFrom].pTab->zName));
}
- /* The "break" point is here, just past the end of the outer loop.
- ** Set it.
- */
- sqlite3VdbeResolveLabel(v, pWInfo->iBreak);
-
assert( pWInfo->nLevel<=pTabList->nSrc );
for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){
int k, last;
@@ -158155,6 +162067,15 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
assert( pTab!=0 );
pLoop = pLevel->pWLoop;
+ /* Do RIGHT JOIN processing. Generate code that will output the
+ ** unmatched rows of the right operand of the RIGHT JOIN with
+ ** all of the columns of the left operand set to NULL.
+ */
+ if( pLevel->pRJ ){
+ sqlite3WhereRightJoinLoop(pWInfo, i, pLevel);
+ continue;
+ }
+
/* For a co-routine, change all OP_Column references to the table of
** the co-routine into OP_Copy of result contained in a register.
** OP_Rowid becomes OP_Null.
@@ -158166,29 +162087,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
continue;
}
-#ifdef SQLITE_ENABLE_EARLY_CURSOR_CLOSE
- /* Close all of the cursors that were opened by sqlite3WhereBegin.
- ** Except, do not close cursors that will be reused by the OR optimization
- ** (WHERE_OR_SUBCLAUSE). And do not close the OP_OpenWrite cursors
- ** created for the ONEPASS optimization.
- */
- if( (pTab->tabFlags & TF_Ephemeral)==0
- && !IsView(pTab)
- && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0
- ){
- int ws = pLoop->wsFlags;
- if( pWInfo->eOnePass==ONEPASS_OFF && (ws & WHERE_IDX_ONLY)==0 ){
- sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor);
- }
- if( (ws & WHERE_INDEXED)!=0
- && (ws & (WHERE_IPK|WHERE_AUTO_INDEX))==0
- && pLevel->iIdxCur!=pWInfo->aiCurOnePass[1]
- ){
- sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur);
- }
- }
-#endif
-
/* If this scan uses an index, make VDBE code substitutions to read data
** from the index instead of from the table where possible. In some cases
** this optimization prevents the table from ever being read, which can
@@ -158213,6 +162111,16 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}else{
last = pWInfo->iEndWhere;
}
+ if( pIdx->bHasExpr ){
+ IndexedExpr *p = pParse->pIdxExpr;
+ while( p ){
+ if( p->iIdxCur==pLevel->iIdxCur ){
+ p->iDataCur = -1;
+ p->iIdxCur = -1;
+ }
+ p = p->pIENext;
+ }
+ }
k = pLevel->addrBody + 1;
#ifdef SQLITE_DEBUG
if( db->flags & SQLITE_VdbeAddopTrace ){
@@ -158289,11 +162197,16 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
}
}
+ /* The "break" point is here, just past the end of the outer loop.
+ ** Set it.
+ */
+ sqlite3VdbeResolveLabel(v, pWInfo->iBreak);
+
/* Final cleanup
*/
- if( pWInfo->pExprMods ) whereUndoExprMods(pWInfo);
pParse->nQueryLoop = pWInfo->savedNQueryLoop;
whereInfoFree(db, pWInfo);
+ pParse->withinRJSubrtn -= nRJ;
return;
}
@@ -159025,7 +162938,7 @@ SQLITE_PRIVATE void sqlite3WindowUpdate(
}
}
}
- pWin->pFunc = pFunc;
+ pWin->pWFunc = pFunc;
}
/*
@@ -159201,7 +163114,6 @@ static ExprList *exprListAppendList(
for(i=0; inExpr; i++){
sqlite3 *db = pParse->db;
Expr *pDup = sqlite3ExprDup(db, pAppend->a[i].pExpr, 0);
- assert( pDup==0 || !ExprHasProperty(pDup, EP_MemToken) );
if( db->mallocFailed ){
sqlite3ExprDelete(db, pDup);
break;
@@ -159217,7 +163129,7 @@ static ExprList *exprListAppendList(
}
}
pList = sqlite3ExprListAppend(pParse, pList, pDup);
- if( pList ) pList->a[nInit+i].sortFlags = pAppend->a[i].sortFlags;
+ if( pList ) pList->a[nInit+i].fg.sortFlags = pAppend->a[i].fg.sortFlags;
}
}
return pList;
@@ -159337,9 +163249,9 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
ExprList *pArgs;
assert( ExprUseXList(pWin->pOwner) );
- assert( pWin->pFunc!=0 );
+ assert( pWin->pWFunc!=0 );
pArgs = pWin->pOwner->x.pList;
- if( pWin->pFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){
+ if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){
selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist);
pWin->iArgCol = (pSublist ? pSublist->nExpr : 0);
pWin->bExprArgs = 1;
@@ -159721,7 +163633,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){
}
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *p = pWin->pFunc;
+ FuncDef *p = pWin->pWFunc;
if( (p->funcFlags & SQLITE_FUNC_MINMAX) && pWin->eStart!=TK_UNBOUNDED ){
/* The inline versions of min() and max() require a single ephemeral
** table and 3 registers. The registers are used as follows:
@@ -159738,7 +163650,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeInit(Parse *pParse, Select *pSelect){
pWin->csrApp = pParse->nTab++;
pWin->regApp = pParse->nMem+1;
pParse->nMem += 3;
- if( pKeyInfo && pWin->pFunc->zName[1]=='i' ){
+ if( pKeyInfo && pWin->pWFunc->zName[1]=='i' ){
assert( pKeyInfo->aSortFlags[0]==0 );
pKeyInfo->aSortFlags[0] = KEYINFO_ORDER_DESC;
}
@@ -159961,7 +163873,7 @@ static void windowAggStep(
Vdbe *v = sqlite3GetVdbe(pParse);
Window *pWin;
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
+ FuncDef *pFunc = pWin->pWFunc;
int regArg;
int nArg = pWin->bExprArgs ? 0 : windowArgCount(pWin);
int i;
@@ -160075,7 +163987,7 @@ static void windowAggFinal(WindowCodeArg *p, int bFin){
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
if( pMWin->regStartRowid==0
- && (pWin->pFunc->funcFlags & SQLITE_FUNC_MINMAX)
+ && (pWin->pWFunc->funcFlags & SQLITE_FUNC_MINMAX)
&& (pWin->eStart!=TK_UNBOUNDED)
){
sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regResult);
@@ -160089,12 +164001,12 @@ static void windowAggFinal(WindowCodeArg *p, int bFin){
int nArg = windowArgCount(pWin);
if( bFin ){
sqlite3VdbeAddOp2(v, OP_AggFinal, pWin->regAccum, nArg);
- sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
+ sqlite3VdbeAppendP4(v, pWin->pWFunc, P4_FUNCDEF);
sqlite3VdbeAddOp2(v, OP_Copy, pWin->regAccum, pWin->regResult);
sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
}else{
sqlite3VdbeAddOp3(v, OP_AggValue,pWin->regAccum,nArg,pWin->regResult);
- sqlite3VdbeAppendP4(v, pWin->pFunc, P4_FUNCDEF);
+ sqlite3VdbeAppendP4(v, pWin->pWFunc, P4_FUNCDEF);
}
}
}
@@ -160223,7 +164135,7 @@ static void windowReturnOneRow(WindowCodeArg *p){
Window *pWin;
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
+ FuncDef *pFunc = pWin->pWFunc;
assert( ExprUseXList(pWin->pOwner) );
if( pFunc->zName==nth_valueName
|| pFunc->zName==first_valueName
@@ -160295,7 +164207,7 @@ static int windowInitAccum(Parse *pParse, Window *pMWin){
int nArg = 0;
Window *pWin;
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
+ FuncDef *pFunc = pWin->pWFunc;
assert( pWin->regAccum );
sqlite3VdbeAddOp2(v, OP_Null, 0, pWin->regAccum);
nArg = MAX(nArg, windowArgCount(pWin));
@@ -160325,7 +164237,7 @@ static int windowCacheFrame(Window *pMWin){
Window *pWin;
if( pMWin->regStartRowid ) return 1;
for(pWin=pMWin; pWin; pWin=pWin->pNextWin){
- FuncDef *pFunc = pWin->pFunc;
+ FuncDef *pFunc = pWin->pWFunc;
if( (pFunc->zName==nth_valueName)
|| (pFunc->zName==first_valueName)
|| (pFunc->zName==leadName)
@@ -160418,7 +164330,7 @@ static void windowCodeRangeTest(
assert( op==OP_Ge || op==OP_Gt || op==OP_Le );
assert( pOrderBy && pOrderBy->nExpr==1 );
- if( pOrderBy->a[0].sortFlags & KEYINFO_ORDER_DESC ){
+ if( pOrderBy->a[0].fg.sortFlags & KEYINFO_ORDER_DESC ){
switch( op ){
case OP_Ge: op = OP_Le; break;
case OP_Gt: op = OP_Lt; break;
@@ -160451,7 +164363,7 @@ static void windowCodeRangeTest(
** Additionally, if either reg1 or reg2 are NULL but the jump to lbl is
** not taken, control jumps over the comparison operator coded below this
** block. */
- if( pOrderBy->a[0].sortFlags & KEYINFO_ORDER_BIGNULL ){
+ if( pOrderBy->a[0].fg.sortFlags & KEYINFO_ORDER_BIGNULL ){
/* This block runs if reg1 contains a NULL. */
int addr = sqlite3VdbeAddOp1(v, OP_NotNull, reg1); VdbeCoverage(v);
switch( op ){
@@ -160472,10 +164384,9 @@ static void windowCodeRangeTest(
/* This block runs if reg1 is not NULL, but reg2 is. */
sqlite3VdbeJumpHere(v, addr);
- sqlite3VdbeAddOp2(v, OP_IsNull, reg2, lbl); VdbeCoverage(v);
- if( op==OP_Gt || op==OP_Ge ){
- sqlite3VdbeChangeP2(v, -1, addrDone);
- }
+ sqlite3VdbeAddOp2(v, OP_IsNull, reg2,
+ (op==OP_Gt || op==OP_Ge) ? addrDone : lbl);
+ VdbeCoverage(v);
}
/* Register reg1 currently contains csr1.peerVal (the peer-value from csr1).
@@ -160683,7 +164594,7 @@ SQLITE_PRIVATE Window *sqlite3WindowDup(sqlite3 *db, Expr *pOwner, Window *p){
pNew->zName = sqlite3DbStrDup(db, p->zName);
pNew->zBase = sqlite3DbStrDup(db, p->zBase);
pNew->pFilter = sqlite3ExprDup(db, p->pFilter, 0);
- pNew->pFunc = p->pFunc;
+ pNew->pWFunc = p->pWFunc;
pNew->pPartition = sqlite3ExprListDup(db, p->pPartition, 0);
pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, 0);
pNew->eFrmType = p->eFrmType;
@@ -161569,7 +165480,7 @@ static void updateDeleteLimitError(
p->affExpr = 0;
p->flags = EP_Leaf;
ExprClearVVAProperties(p);
- p->iAgg = -1;
+ /* p->iAgg = -1; // Not required */
p->pLeft = p->pRight = 0;
p->pAggInfo = 0;
memset(&p->x, 0, sizeof(p->x));
@@ -161902,6 +165813,7 @@ typedef union {
With* yy521;
const char* yy522;
Expr* yy528;
+ OnOrUsing yy561;
struct FrameBound yy595;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
@@ -161918,18 +165830,18 @@ typedef union {
#define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse;
#define sqlite3ParserCTX_STORE yypParser->pParse=pParse;
#define YYFALLBACK 1
-#define YYNSTATE 578
-#define YYNRULE 402
-#define YYNRULE_WITH_ACTION 340
+#define YYNSTATE 580
+#define YYNRULE 405
+#define YYNRULE_WITH_ACTION 342
#define YYNTOKEN 185
-#define YY_MAX_SHIFT 577
-#define YY_MIN_SHIFTREDUCE 835
-#define YY_MAX_SHIFTREDUCE 1236
-#define YY_ERROR_ACTION 1237
-#define YY_ACCEPT_ACTION 1238
-#define YY_NO_ACTION 1239
-#define YY_MIN_REDUCE 1240
-#define YY_MAX_REDUCE 1641
+#define YY_MAX_SHIFT 579
+#define YY_MIN_SHIFTREDUCE 839
+#define YY_MAX_SHIFTREDUCE 1243
+#define YY_ERROR_ACTION 1244
+#define YY_ACCEPT_ACTION 1245
+#define YY_NO_ACTION 1246
+#define YY_MIN_REDUCE 1247
+#define YY_MAX_REDUCE 1651
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -161996,429 +165908,432 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2071)
+#define YY_ACTTAB_COUNT (2101)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 570, 1311, 570, 1290, 201, 201, 570, 116, 112, 222,
- /* 10 */ 570, 1311, 381, 570, 116, 112, 222, 401, 412, 413,
- /* 20 */ 1264, 382, 1273, 41, 41, 41, 41, 1416, 1521, 71,
- /* 30 */ 71, 971, 1262, 41, 41, 495, 71, 71, 272, 972,
- /* 40 */ 298, 480, 298, 123, 124, 114, 1214, 1214, 1048, 1051,
- /* 50 */ 1040, 1040, 121, 121, 122, 122, 122, 122, 547, 413,
- /* 60 */ 1238, 1, 1, 577, 2, 1242, 552, 116, 112, 222,
- /* 70 */ 309, 484, 142, 552, 1276, 528, 116, 112, 222, 1324,
- /* 80 */ 421, 527, 551, 123, 124, 114, 1214, 1214, 1048, 1051,
- /* 90 */ 1040, 1040, 121, 121, 122, 122, 122, 122, 428, 116,
- /* 100 */ 112, 222, 120, 120, 120, 120, 119, 119, 118, 118,
- /* 110 */ 118, 117, 113, 448, 277, 277, 277, 277, 564, 564,
- /* 120 */ 564, 1562, 380, 1564, 1190, 379, 1161, 567, 1161, 567,
- /* 130 */ 413, 1562, 541, 252, 219, 1557, 99, 141, 453, 6,
- /* 140 */ 369, 233, 120, 120, 120, 120, 119, 119, 118, 118,
- /* 150 */ 118, 117, 113, 448, 123, 124, 114, 1214, 1214, 1048,
- /* 160 */ 1051, 1040, 1040, 121, 121, 122, 122, 122, 122, 138,
- /* 170 */ 289, 1190, 1550, 452, 118, 118, 118, 117, 113, 448,
- /* 180 */ 125, 1190, 1191, 1192, 144, 469, 338, 570, 150, 127,
- /* 190 */ 448, 122, 122, 122, 122, 115, 120, 120, 120, 120,
- /* 200 */ 119, 119, 118, 118, 118, 117, 113, 448, 458, 423,
- /* 210 */ 13, 13, 215, 120, 120, 120, 120, 119, 119, 118,
- /* 220 */ 118, 118, 117, 113, 448, 426, 308, 561, 1190, 1191,
- /* 230 */ 1192, 445, 444, 413, 1275, 122, 122, 122, 122, 120,
- /* 240 */ 120, 120, 120, 119, 119, 118, 118, 118, 117, 113,
- /* 250 */ 448, 1547, 98, 1037, 1037, 1049, 1052, 123, 124, 114,
- /* 260 */ 1214, 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122,
- /* 270 */ 122, 122, 570, 410, 409, 1190, 570, 413, 1221, 319,
- /* 280 */ 1221, 80, 81, 120, 120, 120, 120, 119, 119, 118,
- /* 290 */ 118, 118, 117, 113, 448, 70, 70, 1190, 1608, 71,
- /* 300 */ 71, 123, 124, 114, 1214, 1214, 1048, 1051, 1040, 1040,
- /* 310 */ 121, 121, 122, 122, 122, 122, 120, 120, 120, 120,
- /* 320 */ 119, 119, 118, 118, 118, 117, 113, 448, 1041, 210,
- /* 330 */ 1190, 369, 1190, 1191, 1192, 245, 552, 403, 508, 505,
- /* 340 */ 504, 108, 562, 138, 4, 520, 937, 437, 503, 217,
- /* 350 */ 518, 526, 356, 883, 1190, 1191, 1192, 387, 565, 570,
- /* 360 */ 120, 120, 120, 120, 119, 119, 118, 118, 118, 117,
- /* 370 */ 113, 448, 277, 277, 16, 16, 1602, 445, 444, 153,
- /* 380 */ 413, 449, 13, 13, 1283, 567, 1218, 1190, 1191, 1192,
- /* 390 */ 1007, 1220, 264, 559, 1578, 186, 570, 431, 138, 1219,
- /* 400 */ 308, 561, 476, 138, 123, 124, 114, 1214, 1214, 1048,
- /* 410 */ 1051, 1040, 1040, 121, 121, 122, 122, 122, 122, 55,
- /* 420 */ 55, 417, 1027, 511, 1221, 1190, 1221, 478, 106, 106,
- /* 430 */ 1316, 1316, 1190, 171, 570, 388, 107, 384, 449, 572,
- /* 440 */ 571, 434, 1547, 1017, 336, 553, 569, 263, 280, 364,
- /* 450 */ 514, 359, 513, 250, 495, 308, 561, 71, 71, 355,
- /* 460 */ 308, 561, 378, 120, 120, 120, 120, 119, 119, 118,
- /* 470 */ 118, 118, 117, 113, 448, 1017, 1017, 1019, 1020, 27,
- /* 480 */ 277, 277, 1190, 1191, 1192, 1156, 570, 532, 413, 1190,
- /* 490 */ 1191, 1192, 352, 567, 552, 1264, 537, 521, 1156, 1520,
- /* 500 */ 317, 1156, 285, 554, 489, 573, 570, 573, 486, 51,
- /* 510 */ 51, 207, 123, 124, 114, 1214, 1214, 1048, 1051, 1040,
- /* 520 */ 1040, 121, 121, 122, 122, 122, 122, 171, 1416, 13,
- /* 530 */ 13, 413, 277, 277, 1190, 509, 119, 119, 118, 118,
- /* 540 */ 118, 117, 113, 448, 433, 567, 522, 220, 519, 1556,
- /* 550 */ 369, 550, 1190, 6, 536, 123, 124, 114, 1214, 1214,
- /* 560 */ 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122, 122,
- /* 570 */ 145, 120, 120, 120, 120, 119, 119, 118, 118, 118,
- /* 580 */ 117, 113, 448, 245, 570, 478, 508, 505, 504, 570,
- /* 590 */ 1485, 1190, 1191, 1192, 1314, 1314, 503, 1190, 149, 429,
- /* 600 */ 1190, 484, 413, 274, 369, 956, 876, 56, 56, 1190,
- /* 610 */ 1191, 1192, 71, 71, 120, 120, 120, 120, 119, 119,
- /* 620 */ 118, 118, 118, 117, 113, 448, 123, 124, 114, 1214,
- /* 630 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 640 */ 122, 413, 545, 1556, 83, 869, 98, 6, 932, 533,
- /* 650 */ 852, 547, 151, 931, 1190, 1191, 1192, 1190, 1191, 1192,
- /* 660 */ 290, 1547, 187, 1637, 399, 123, 124, 114, 1214, 1214,
- /* 670 */ 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122, 122,
- /* 680 */ 570, 958, 570, 457, 957, 120, 120, 120, 120, 119,
- /* 690 */ 119, 118, 118, 118, 117, 113, 448, 1156, 221, 1190,
- /* 700 */ 335, 457, 456, 13, 13, 13, 13, 1007, 369, 467,
- /* 710 */ 1156, 193, 413, 1156, 386, 1547, 1174, 32, 297, 478,
- /* 720 */ 195, 1531, 5, 956, 120, 120, 120, 120, 119, 119,
- /* 730 */ 118, 118, 118, 117, 113, 448, 123, 124, 114, 1214,
- /* 740 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 750 */ 122, 413, 1071, 423, 1190, 1028, 1190, 1191, 1192, 1190,
- /* 760 */ 423, 336, 464, 322, 548, 1549, 446, 446, 446, 570,
- /* 770 */ 3, 117, 113, 448, 457, 123, 124, 114, 1214, 1214,
- /* 780 */ 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122, 122,
- /* 790 */ 1477, 570, 15, 15, 293, 120, 120, 120, 120, 119,
- /* 800 */ 119, 118, 118, 118, 117, 113, 448, 1190, 570, 1490,
- /* 810 */ 1416, 1190, 1191, 1192, 13, 13, 1190, 1191, 1192, 1548,
- /* 820 */ 271, 271, 413, 286, 308, 561, 1012, 1490, 1492, 196,
- /* 830 */ 288, 71, 71, 567, 120, 120, 120, 120, 119, 119,
- /* 840 */ 118, 118, 118, 117, 113, 448, 123, 124, 114, 1214,
- /* 850 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 860 */ 122, 413, 201, 1091, 1190, 1191, 1192, 1328, 304, 1533,
- /* 870 */ 392, 278, 278, 454, 568, 406, 926, 926, 570, 567,
- /* 880 */ 570, 430, 495, 484, 567, 123, 124, 114, 1214, 1214,
- /* 890 */ 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122, 122,
- /* 900 */ 1490, 71, 71, 13, 13, 120, 120, 120, 120, 119,
- /* 910 */ 119, 118, 118, 118, 117, 113, 448, 570, 549, 570,
- /* 920 */ 1581, 577, 2, 1242, 1096, 1096, 492, 1484, 309, 1529,
- /* 930 */ 142, 328, 413, 840, 841, 842, 312, 1324, 305, 367,
- /* 940 */ 43, 43, 57, 57, 120, 120, 120, 120, 119, 119,
- /* 950 */ 118, 118, 118, 117, 113, 448, 123, 124, 114, 1214,
- /* 960 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 970 */ 122, 12, 277, 277, 570, 1156, 413, 576, 432, 1242,
- /* 980 */ 469, 338, 296, 478, 309, 567, 142, 249, 1156, 308,
- /* 990 */ 561, 1156, 325, 1324, 327, 495, 459, 71, 71, 233,
- /* 1000 */ 283, 101, 114, 1214, 1214, 1048, 1051, 1040, 1040, 121,
- /* 1010 */ 121, 122, 122, 122, 122, 120, 120, 120, 120, 119,
- /* 1020 */ 119, 118, 118, 118, 117, 113, 448, 1112, 277, 277,
- /* 1030 */ 1416, 452, 398, 1234, 443, 277, 277, 248, 247, 246,
- /* 1040 */ 1323, 567, 1113, 313, 198, 294, 495, 1322, 567, 468,
- /* 1050 */ 570, 1431, 398, 1134, 1027, 233, 418, 1114, 295, 120,
- /* 1060 */ 120, 120, 120, 119, 119, 118, 118, 118, 117, 113,
- /* 1070 */ 448, 1018, 104, 71, 71, 1017, 326, 500, 912, 570,
- /* 1080 */ 277, 277, 277, 277, 1112, 1265, 419, 452, 913, 365,
- /* 1090 */ 1575, 1319, 413, 567, 956, 567, 9, 202, 255, 1113,
- /* 1100 */ 316, 491, 44, 44, 249, 563, 419, 1017, 1017, 1019,
- /* 1110 */ 447, 1235, 413, 1607, 1114, 901, 123, 124, 114, 1214,
- /* 1120 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 1130 */ 122, 1235, 413, 1211, 215, 558, 123, 124, 114, 1214,
- /* 1140 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 1150 */ 122, 1135, 1635, 474, 1635, 255, 123, 111, 114, 1214,
- /* 1160 */ 1214, 1048, 1051, 1040, 1040, 121, 121, 122, 122, 122,
- /* 1170 */ 122, 1135, 1636, 418, 1636, 120, 120, 120, 120, 119,
- /* 1180 */ 119, 118, 118, 118, 117, 113, 448, 221, 209, 355,
- /* 1190 */ 1211, 1211, 147, 1430, 495, 120, 120, 120, 120, 119,
- /* 1200 */ 119, 118, 118, 118, 117, 113, 448, 1260, 543, 523,
- /* 1210 */ 892, 555, 956, 12, 570, 120, 120, 120, 120, 119,
- /* 1220 */ 119, 118, 118, 118, 117, 113, 448, 542, 570, 864,
- /* 1230 */ 1133, 365, 1575, 350, 1360, 413, 1167, 58, 58, 343,
- /* 1240 */ 1359, 512, 277, 277, 277, 277, 277, 277, 1211, 893,
- /* 1250 */ 1133, 59, 59, 463, 367, 567, 570, 567, 96, 567,
- /* 1260 */ 124, 114, 1214, 1214, 1048, 1051, 1040, 1040, 121, 121,
- /* 1270 */ 122, 122, 122, 122, 570, 1416, 570, 281, 1190, 60,
- /* 1280 */ 60, 110, 396, 396, 395, 266, 393, 864, 1167, 849,
- /* 1290 */ 570, 485, 570, 440, 345, 1156, 348, 61, 61, 62,
- /* 1300 */ 62, 971, 227, 1554, 315, 435, 544, 6, 1156, 972,
- /* 1310 */ 570, 1156, 314, 45, 45, 46, 46, 516, 120, 120,
- /* 1320 */ 120, 120, 119, 119, 118, 118, 118, 117, 113, 448,
- /* 1330 */ 420, 173, 1536, 47, 47, 1190, 1191, 1192, 108, 562,
- /* 1340 */ 329, 4, 229, 1555, 932, 570, 441, 6, 570, 931,
- /* 1350 */ 164, 570, 1294, 137, 1194, 565, 570, 1553, 570, 1093,
- /* 1360 */ 570, 6, 570, 1093, 535, 570, 872, 8, 49, 49,
- /* 1370 */ 228, 50, 50, 570, 63, 63, 570, 461, 449, 64,
- /* 1380 */ 64, 65, 65, 14, 14, 66, 66, 411, 129, 129,
- /* 1390 */ 559, 570, 462, 570, 1509, 490, 67, 67, 570, 52,
- /* 1400 */ 52, 550, 411, 471, 539, 414, 226, 1027, 570, 538,
- /* 1410 */ 308, 561, 1194, 411, 68, 68, 69, 69, 570, 1027,
- /* 1420 */ 570, 53, 53, 872, 1018, 106, 106, 529, 1017, 570,
- /* 1430 */ 1508, 159, 159, 107, 455, 449, 572, 571, 475, 307,
- /* 1440 */ 1017, 160, 160, 76, 76, 570, 1552, 470, 411, 411,
- /* 1450 */ 6, 1229, 54, 54, 482, 276, 219, 570, 891, 890,
- /* 1460 */ 1017, 1017, 1019, 84, 206, 1210, 230, 282, 72, 72,
- /* 1470 */ 333, 487, 1017, 1017, 1019, 1020, 27, 1580, 1178, 451,
- /* 1480 */ 130, 130, 281, 148, 105, 38, 103, 396, 396, 395,
- /* 1490 */ 266, 393, 570, 1130, 849, 400, 570, 108, 562, 570,
- /* 1500 */ 4, 311, 570, 30, 17, 570, 279, 227, 570, 315,
- /* 1510 */ 108, 562, 472, 4, 565, 73, 73, 314, 570, 157,
- /* 1520 */ 157, 570, 131, 131, 530, 132, 132, 565, 128, 128,
- /* 1530 */ 570, 158, 158, 570, 31, 291, 570, 449, 334, 525,
- /* 1540 */ 98, 152, 152, 424, 136, 136, 1009, 229, 254, 559,
- /* 1550 */ 449, 483, 340, 135, 135, 164, 133, 133, 137, 134,
- /* 1560 */ 134, 879, 559, 539, 570, 477, 570, 254, 540, 479,
- /* 1570 */ 339, 254, 98, 898, 899, 228, 539, 570, 1027, 570,
- /* 1580 */ 1078, 538, 210, 232, 106, 106, 1356, 75, 75, 77,
- /* 1590 */ 77, 1027, 107, 344, 449, 572, 571, 106, 106, 1017,
- /* 1600 */ 74, 74, 42, 42, 570, 107, 347, 449, 572, 571,
- /* 1610 */ 414, 501, 1017, 251, 363, 308, 561, 1139, 353, 879,
- /* 1620 */ 98, 1074, 349, 251, 362, 1595, 351, 48, 48, 1021,
- /* 1630 */ 1307, 1017, 1017, 1019, 1020, 27, 1293, 1291, 1078, 455,
- /* 1640 */ 965, 929, 254, 110, 1017, 1017, 1019, 1020, 27, 1178,
- /* 1650 */ 451, 974, 975, 281, 108, 562, 1292, 4, 396, 396,
- /* 1660 */ 395, 266, 393, 1347, 1090, 849, 1090, 1089, 862, 1089,
- /* 1670 */ 146, 565, 930, 358, 110, 303, 368, 557, 227, 1368,
- /* 1680 */ 315, 108, 562, 1415, 4, 1343, 496, 1021, 314, 1354,
- /* 1690 */ 1569, 556, 1421, 1272, 449, 204, 1263, 1251, 565, 1250,
- /* 1700 */ 1252, 1588, 269, 1340, 371, 373, 559, 375, 11, 212,
- /* 1710 */ 397, 225, 321, 284, 1402, 460, 287, 331, 229, 332,
- /* 1720 */ 292, 449, 324, 216, 337, 1407, 164, 481, 377, 137,
- /* 1730 */ 1406, 404, 506, 559, 1290, 1027, 361, 1481, 199, 1591,
- /* 1740 */ 211, 106, 106, 936, 1480, 1229, 228, 560, 175, 107,
- /* 1750 */ 200, 449, 572, 571, 258, 391, 1017, 1528, 1526, 223,
- /* 1760 */ 1226, 422, 1027, 83, 208, 79, 82, 184, 106, 106,
- /* 1770 */ 1486, 126, 1397, 550, 169, 320, 107, 1403, 449, 572,
- /* 1780 */ 571, 414, 177, 1017, 1390, 323, 308, 561, 1017, 1017,
- /* 1790 */ 1019, 1020, 27, 465, 35, 235, 100, 562, 499, 4,
- /* 1800 */ 179, 180, 181, 466, 182, 96, 402, 1409, 473, 1408,
- /* 1810 */ 455, 36, 1411, 565, 188, 1017, 1017, 1019, 1020, 27,
- /* 1820 */ 405, 1475, 488, 239, 89, 494, 270, 192, 1497, 342,
- /* 1830 */ 241, 497, 346, 242, 515, 243, 449, 1253, 1310, 1309,
- /* 1840 */ 407, 91, 436, 1308, 883, 217, 438, 439, 559, 524,
- /* 1850 */ 531, 408, 1351, 1606, 1301, 301, 1280, 1605, 360, 1279,
- /* 1860 */ 1278, 1604, 1574, 302, 95, 366, 370, 372, 1300, 1352,
- /* 1870 */ 1350, 374, 256, 257, 442, 10, 1349, 1027, 1461, 385,
- /* 1880 */ 97, 1375, 102, 106, 106, 534, 1560, 34, 1559, 574,
- /* 1890 */ 1184, 107, 265, 449, 572, 571, 267, 268, 1017, 203,
- /* 1900 */ 1333, 383, 389, 1332, 390, 575, 376, 1248, 1243, 1513,
- /* 1910 */ 161, 143, 1374, 1514, 1512, 162, 299, 1511, 163, 213,
- /* 1920 */ 836, 214, 78, 450, 205, 310, 224, 1088, 140, 1086,
- /* 1930 */ 1017, 1017, 1019, 1020, 27, 318, 306, 176, 165, 1210,
- /* 1940 */ 178, 231, 915, 234, 330, 1102, 183, 166, 167, 425,
- /* 1950 */ 427, 185, 85, 86, 87, 168, 88, 415, 1105, 236,
- /* 1960 */ 174, 237, 416, 1101, 154, 18, 238, 341, 1223, 240,
- /* 1970 */ 254, 493, 190, 1094, 37, 189, 851, 498, 362, 244,
- /* 1980 */ 354, 510, 191, 90, 170, 502, 19, 20, 507, 93,
- /* 1990 */ 881, 357, 92, 300, 894, 155, 517, 218, 1172, 156,
- /* 2000 */ 1054, 959, 1141, 94, 39, 1140, 273, 275, 964, 194,
- /* 2010 */ 110, 1158, 1162, 1160, 253, 21, 1166, 7, 1146, 33,
- /* 2020 */ 22, 197, 23, 24, 25, 1165, 546, 26, 98, 1069,
- /* 2030 */ 1055, 1053, 1057, 1111, 1058, 1110, 259, 260, 28, 40,
- /* 2040 */ 1180, 1022, 863, 109, 29, 566, 394, 1179, 139, 172,
- /* 2050 */ 925, 261, 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
- /* 2060 */ 262, 1239, 1239, 1239, 1239, 1597, 1239, 1239, 1239, 1239,
- /* 2070 */ 1596,
+ /* 0 */ 572, 208, 572, 118, 115, 229, 572, 118, 115, 229,
+ /* 10 */ 572, 1318, 381, 1297, 412, 566, 566, 566, 572, 413,
+ /* 20 */ 382, 1318, 1280, 41, 41, 41, 41, 208, 1530, 71,
+ /* 30 */ 71, 975, 423, 41, 41, 495, 303, 279, 303, 976,
+ /* 40 */ 401, 71, 71, 125, 126, 80, 1221, 1221, 1054, 1057,
+ /* 50 */ 1044, 1044, 123, 123, 124, 124, 124, 124, 480, 413,
+ /* 60 */ 1245, 1, 1, 579, 2, 1249, 554, 118, 115, 229,
+ /* 70 */ 317, 484, 146, 484, 528, 118, 115, 229, 533, 1331,
+ /* 80 */ 421, 527, 142, 125, 126, 80, 1221, 1221, 1054, 1057,
+ /* 90 */ 1044, 1044, 123, 123, 124, 124, 124, 124, 118, 115,
+ /* 100 */ 229, 327, 122, 122, 122, 122, 121, 121, 120, 120,
+ /* 110 */ 120, 119, 116, 448, 284, 284, 284, 284, 446, 446,
+ /* 120 */ 446, 1571, 380, 1573, 1196, 379, 1167, 569, 1167, 569,
+ /* 130 */ 413, 1571, 541, 259, 226, 448, 101, 145, 453, 316,
+ /* 140 */ 563, 240, 122, 122, 122, 122, 121, 121, 120, 120,
+ /* 150 */ 120, 119, 116, 448, 125, 126, 80, 1221, 1221, 1054,
+ /* 160 */ 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, 142,
+ /* 170 */ 294, 1196, 343, 452, 120, 120, 120, 119, 116, 448,
+ /* 180 */ 127, 1196, 1197, 1198, 148, 445, 444, 572, 119, 116,
+ /* 190 */ 448, 124, 124, 124, 124, 117, 122, 122, 122, 122,
+ /* 200 */ 121, 121, 120, 120, 120, 119, 116, 448, 458, 113,
+ /* 210 */ 13, 13, 550, 122, 122, 122, 122, 121, 121, 120,
+ /* 220 */ 120, 120, 119, 116, 448, 426, 316, 563, 1196, 1197,
+ /* 230 */ 1198, 149, 1228, 413, 1228, 124, 124, 124, 124, 122,
+ /* 240 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116,
+ /* 250 */ 448, 469, 346, 1041, 1041, 1055, 1058, 125, 126, 80,
+ /* 260 */ 1221, 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124,
+ /* 270 */ 124, 124, 1283, 526, 222, 1196, 572, 413, 224, 518,
+ /* 280 */ 175, 82, 83, 122, 122, 122, 122, 121, 121, 120,
+ /* 290 */ 120, 120, 119, 116, 448, 1011, 16, 16, 1196, 133,
+ /* 300 */ 133, 125, 126, 80, 1221, 1221, 1054, 1057, 1044, 1044,
+ /* 310 */ 123, 123, 124, 124, 124, 124, 122, 122, 122, 122,
+ /* 320 */ 121, 121, 120, 120, 120, 119, 116, 448, 1045, 550,
+ /* 330 */ 1196, 377, 1196, 1197, 1198, 252, 1438, 403, 508, 505,
+ /* 340 */ 504, 111, 564, 570, 4, 930, 930, 437, 503, 344,
+ /* 350 */ 464, 330, 364, 398, 1241, 1196, 1197, 1198, 567, 572,
+ /* 360 */ 122, 122, 122, 122, 121, 121, 120, 120, 120, 119,
+ /* 370 */ 116, 448, 284, 284, 373, 1584, 1611, 445, 444, 154,
+ /* 380 */ 413, 449, 71, 71, 1290, 569, 1225, 1196, 1197, 1198,
+ /* 390 */ 85, 1227, 271, 561, 547, 519, 1565, 572, 98, 1226,
+ /* 400 */ 6, 1282, 476, 142, 125, 126, 80, 1221, 1221, 1054,
+ /* 410 */ 1057, 1044, 1044, 123, 123, 124, 124, 124, 124, 554,
+ /* 420 */ 13, 13, 1031, 511, 1228, 1196, 1228, 553, 109, 109,
+ /* 430 */ 222, 572, 1242, 175, 572, 431, 110, 197, 449, 574,
+ /* 440 */ 573, 434, 1556, 1021, 325, 555, 1196, 270, 287, 372,
+ /* 450 */ 514, 367, 513, 257, 71, 71, 547, 71, 71, 363,
+ /* 460 */ 316, 563, 1617, 122, 122, 122, 122, 121, 121, 120,
+ /* 470 */ 120, 120, 119, 116, 448, 1021, 1021, 1023, 1024, 27,
+ /* 480 */ 284, 284, 1196, 1197, 1198, 1162, 572, 1616, 413, 905,
+ /* 490 */ 190, 554, 360, 569, 554, 941, 537, 521, 1162, 520,
+ /* 500 */ 417, 1162, 556, 1196, 1197, 1198, 572, 548, 1558, 51,
+ /* 510 */ 51, 214, 125, 126, 80, 1221, 1221, 1054, 1057, 1044,
+ /* 520 */ 1044, 123, 123, 124, 124, 124, 124, 1196, 478, 135,
+ /* 530 */ 135, 413, 284, 284, 1494, 509, 121, 121, 120, 120,
+ /* 540 */ 120, 119, 116, 448, 1011, 569, 522, 217, 545, 1565,
+ /* 550 */ 316, 563, 142, 6, 536, 125, 126, 80, 1221, 1221,
+ /* 560 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124,
+ /* 570 */ 1559, 122, 122, 122, 122, 121, 121, 120, 120, 120,
+ /* 580 */ 119, 116, 448, 489, 1196, 1197, 1198, 486, 281, 1271,
+ /* 590 */ 961, 252, 1196, 377, 508, 505, 504, 1196, 344, 575,
+ /* 600 */ 1196, 575, 413, 292, 503, 961, 880, 191, 484, 316,
+ /* 610 */ 563, 388, 290, 384, 122, 122, 122, 122, 121, 121,
+ /* 620 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221,
+ /* 630 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 640 */ 124, 413, 398, 1140, 1196, 873, 100, 284, 284, 1196,
+ /* 650 */ 1197, 1198, 377, 1097, 1196, 1197, 1198, 1196, 1197, 1198,
+ /* 660 */ 569, 459, 32, 377, 233, 125, 126, 80, 1221, 1221,
+ /* 670 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124,
+ /* 680 */ 1437, 963, 572, 228, 962, 122, 122, 122, 122, 121,
+ /* 690 */ 121, 120, 120, 120, 119, 116, 448, 1162, 228, 1196,
+ /* 700 */ 157, 1196, 1197, 1198, 1557, 13, 13, 301, 961, 1236,
+ /* 710 */ 1162, 153, 413, 1162, 377, 1587, 1180, 5, 373, 1584,
+ /* 720 */ 433, 1242, 3, 961, 122, 122, 122, 122, 121, 121,
+ /* 730 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221,
+ /* 740 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 750 */ 124, 413, 208, 571, 1196, 1032, 1196, 1197, 1198, 1196,
+ /* 760 */ 392, 856, 155, 1556, 286, 406, 1102, 1102, 492, 572,
+ /* 770 */ 469, 346, 1323, 1323, 1556, 125, 126, 80, 1221, 1221,
+ /* 780 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124,
+ /* 790 */ 129, 572, 13, 13, 378, 122, 122, 122, 122, 121,
+ /* 800 */ 121, 120, 120, 120, 119, 116, 448, 302, 572, 457,
+ /* 810 */ 532, 1196, 1197, 1198, 13, 13, 1196, 1197, 1198, 1301,
+ /* 820 */ 467, 1271, 413, 1321, 1321, 1556, 1016, 457, 456, 200,
+ /* 830 */ 299, 71, 71, 1269, 122, 122, 122, 122, 121, 121,
+ /* 840 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221,
+ /* 850 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 860 */ 124, 413, 227, 1077, 1162, 284, 284, 423, 312, 278,
+ /* 870 */ 278, 285, 285, 1423, 410, 409, 386, 1162, 569, 572,
+ /* 880 */ 1162, 1200, 569, 1604, 569, 125, 126, 80, 1221, 1221,
+ /* 890 */ 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124, 124,
+ /* 900 */ 457, 1486, 13, 13, 1540, 122, 122, 122, 122, 121,
+ /* 910 */ 121, 120, 120, 120, 119, 116, 448, 201, 572, 358,
+ /* 920 */ 1590, 579, 2, 1249, 844, 845, 846, 1566, 317, 1216,
+ /* 930 */ 146, 6, 413, 255, 254, 253, 206, 1331, 9, 1200,
+ /* 940 */ 262, 71, 71, 428, 122, 122, 122, 122, 121, 121,
+ /* 950 */ 120, 120, 120, 119, 116, 448, 125, 126, 80, 1221,
+ /* 960 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 970 */ 124, 572, 284, 284, 572, 1217, 413, 578, 313, 1249,
+ /* 980 */ 353, 1300, 356, 423, 317, 569, 146, 495, 529, 1647,
+ /* 990 */ 399, 375, 495, 1331, 70, 70, 1299, 71, 71, 240,
+ /* 1000 */ 1329, 104, 80, 1221, 1221, 1054, 1057, 1044, 1044, 123,
+ /* 1010 */ 123, 124, 124, 124, 124, 122, 122, 122, 122, 121,
+ /* 1020 */ 121, 120, 120, 120, 119, 116, 448, 1118, 284, 284,
+ /* 1030 */ 432, 452, 1529, 1217, 443, 284, 284, 1493, 1356, 311,
+ /* 1040 */ 478, 569, 1119, 975, 495, 495, 217, 1267, 569, 1542,
+ /* 1050 */ 572, 976, 207, 572, 1031, 240, 387, 1120, 523, 122,
+ /* 1060 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116,
+ /* 1070 */ 448, 1022, 107, 71, 71, 1021, 13, 13, 916, 572,
+ /* 1080 */ 1499, 572, 284, 284, 97, 530, 495, 452, 917, 1330,
+ /* 1090 */ 1326, 549, 413, 284, 284, 569, 151, 209, 1499, 1501,
+ /* 1100 */ 262, 454, 55, 55, 56, 56, 569, 1021, 1021, 1023,
+ /* 1110 */ 447, 336, 413, 531, 12, 295, 125, 126, 80, 1221,
+ /* 1120 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 1130 */ 124, 351, 413, 868, 1538, 1217, 125, 126, 80, 1221,
+ /* 1140 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 1150 */ 124, 1141, 1645, 478, 1645, 375, 125, 114, 80, 1221,
+ /* 1160 */ 1221, 1054, 1057, 1044, 1044, 123, 123, 124, 124, 124,
+ /* 1170 */ 124, 1499, 333, 478, 335, 122, 122, 122, 122, 121,
+ /* 1180 */ 121, 120, 120, 120, 119, 116, 448, 203, 1423, 572,
+ /* 1190 */ 1298, 868, 468, 1217, 440, 122, 122, 122, 122, 121,
+ /* 1200 */ 121, 120, 120, 120, 119, 116, 448, 557, 1141, 1646,
+ /* 1210 */ 543, 1646, 15, 15, 896, 122, 122, 122, 122, 121,
+ /* 1220 */ 121, 120, 120, 120, 119, 116, 448, 572, 298, 542,
+ /* 1230 */ 1139, 1423, 1563, 1564, 1335, 413, 6, 6, 1173, 1272,
+ /* 1240 */ 419, 320, 284, 284, 1423, 512, 569, 529, 300, 461,
+ /* 1250 */ 43, 43, 572, 897, 12, 569, 334, 482, 429, 411,
+ /* 1260 */ 126, 80, 1221, 1221, 1054, 1057, 1044, 1044, 123, 123,
+ /* 1270 */ 124, 124, 124, 124, 572, 57, 57, 288, 1196, 1423,
+ /* 1280 */ 500, 462, 396, 396, 395, 273, 393, 1139, 1562, 853,
+ /* 1290 */ 1173, 411, 6, 572, 321, 1162, 474, 44, 44, 1561,
+ /* 1300 */ 1118, 430, 234, 6, 323, 256, 544, 256, 1162, 435,
+ /* 1310 */ 572, 1162, 322, 17, 491, 1119, 58, 58, 122, 122,
+ /* 1320 */ 122, 122, 121, 121, 120, 120, 120, 119, 116, 448,
+ /* 1330 */ 1120, 216, 485, 59, 59, 1196, 1197, 1198, 111, 564,
+ /* 1340 */ 324, 4, 236, 460, 530, 572, 237, 460, 572, 441,
+ /* 1350 */ 168, 560, 424, 141, 483, 567, 572, 293, 572, 1099,
+ /* 1360 */ 572, 293, 572, 1099, 535, 572, 876, 8, 60, 60,
+ /* 1370 */ 235, 61, 61, 572, 418, 572, 418, 572, 449, 62,
+ /* 1380 */ 62, 45, 45, 46, 46, 47, 47, 199, 49, 49,
+ /* 1390 */ 561, 572, 363, 572, 100, 490, 50, 50, 63, 63,
+ /* 1400 */ 64, 64, 565, 419, 539, 414, 572, 1031, 572, 538,
+ /* 1410 */ 316, 563, 316, 563, 65, 65, 14, 14, 572, 1031,
+ /* 1420 */ 572, 516, 936, 876, 1022, 109, 109, 935, 1021, 66,
+ /* 1430 */ 66, 131, 131, 110, 455, 449, 574, 573, 420, 177,
+ /* 1440 */ 1021, 132, 132, 67, 67, 572, 471, 572, 936, 475,
+ /* 1450 */ 1368, 283, 226, 935, 315, 1367, 411, 572, 463, 411,
+ /* 1460 */ 1021, 1021, 1023, 239, 411, 86, 213, 1354, 52, 52,
+ /* 1470 */ 68, 68, 1021, 1021, 1023, 1024, 27, 1589, 1184, 451,
+ /* 1480 */ 69, 69, 288, 97, 108, 1545, 106, 396, 396, 395,
+ /* 1490 */ 273, 393, 572, 883, 853, 887, 572, 111, 564, 470,
+ /* 1500 */ 4, 572, 152, 30, 38, 572, 1136, 234, 400, 323,
+ /* 1510 */ 111, 564, 531, 4, 567, 53, 53, 322, 572, 163,
+ /* 1520 */ 163, 572, 341, 472, 164, 164, 337, 567, 76, 76,
+ /* 1530 */ 572, 289, 1518, 572, 31, 1517, 572, 449, 342, 487,
+ /* 1540 */ 100, 54, 54, 348, 72, 72, 296, 236, 1084, 561,
+ /* 1550 */ 449, 883, 1364, 134, 134, 168, 73, 73, 141, 161,
+ /* 1560 */ 161, 1578, 561, 539, 572, 319, 572, 352, 540, 1013,
+ /* 1570 */ 477, 261, 261, 895, 894, 235, 539, 572, 1031, 572,
+ /* 1580 */ 479, 538, 261, 371, 109, 109, 525, 136, 136, 130,
+ /* 1590 */ 130, 1031, 110, 370, 449, 574, 573, 109, 109, 1021,
+ /* 1600 */ 162, 162, 156, 156, 572, 110, 1084, 449, 574, 573,
+ /* 1610 */ 414, 355, 1021, 572, 357, 316, 563, 572, 347, 572,
+ /* 1620 */ 100, 501, 361, 258, 100, 902, 903, 140, 140, 359,
+ /* 1630 */ 1314, 1021, 1021, 1023, 1024, 27, 139, 139, 366, 455,
+ /* 1640 */ 137, 137, 138, 138, 1021, 1021, 1023, 1024, 27, 1184,
+ /* 1650 */ 451, 572, 376, 288, 111, 564, 1025, 4, 396, 396,
+ /* 1660 */ 395, 273, 393, 572, 1145, 853, 572, 1080, 572, 258,
+ /* 1670 */ 496, 567, 572, 211, 75, 75, 559, 966, 234, 261,
+ /* 1680 */ 323, 111, 564, 933, 4, 113, 77, 77, 322, 74,
+ /* 1690 */ 74, 42, 42, 1377, 449, 48, 48, 1422, 567, 978,
+ /* 1700 */ 979, 1096, 1095, 1096, 1095, 866, 561, 150, 934, 1350,
+ /* 1710 */ 113, 1362, 558, 1428, 1025, 1279, 1270, 1258, 236, 1257,
+ /* 1720 */ 1259, 449, 1597, 1347, 308, 276, 168, 309, 11, 141,
+ /* 1730 */ 397, 310, 232, 561, 1409, 1031, 339, 291, 329, 219,
+ /* 1740 */ 340, 109, 109, 940, 297, 1414, 235, 345, 481, 110,
+ /* 1750 */ 506, 449, 574, 573, 332, 1413, 1021, 404, 1297, 369,
+ /* 1760 */ 223, 1490, 1031, 1489, 1359, 1360, 1358, 1357, 109, 109,
+ /* 1770 */ 204, 1600, 1236, 562, 265, 218, 110, 205, 449, 574,
+ /* 1780 */ 573, 414, 391, 1021, 1537, 179, 316, 563, 1021, 1021,
+ /* 1790 */ 1023, 1024, 27, 230, 1535, 1233, 79, 564, 85, 4,
+ /* 1800 */ 422, 215, 552, 81, 84, 188, 1410, 128, 1404, 550,
+ /* 1810 */ 455, 35, 328, 567, 173, 1021, 1021, 1023, 1024, 27,
+ /* 1820 */ 181, 1495, 1397, 331, 465, 183, 184, 185, 186, 466,
+ /* 1830 */ 499, 242, 98, 402, 1416, 1418, 449, 1415, 473, 36,
+ /* 1840 */ 192, 488, 405, 1506, 246, 91, 494, 196, 561, 1484,
+ /* 1850 */ 350, 497, 277, 354, 248, 249, 111, 564, 1260, 4,
+ /* 1860 */ 250, 407, 515, 436, 1317, 1308, 93, 1316, 1315, 887,
+ /* 1870 */ 1307, 224, 1583, 567, 438, 524, 439, 1031, 263, 264,
+ /* 1880 */ 442, 1615, 10, 109, 109, 1287, 408, 1614, 1286, 368,
+ /* 1890 */ 1285, 110, 1613, 449, 574, 573, 449, 306, 1021, 307,
+ /* 1900 */ 374, 1382, 1569, 1470, 1381, 385, 105, 314, 561, 99,
+ /* 1910 */ 1568, 534, 34, 576, 1190, 272, 1340, 551, 383, 274,
+ /* 1920 */ 1339, 210, 389, 390, 275, 577, 1255, 1250, 415, 165,
+ /* 1930 */ 1021, 1021, 1023, 1024, 27, 147, 1522, 1031, 166, 1523,
+ /* 1940 */ 416, 1521, 178, 109, 109, 1520, 304, 167, 840, 450,
+ /* 1950 */ 220, 110, 221, 449, 574, 573, 212, 78, 1021, 318,
+ /* 1960 */ 231, 1094, 1092, 144, 180, 326, 169, 1216, 241, 182,
+ /* 1970 */ 919, 338, 238, 1108, 187, 170, 171, 425, 427, 189,
+ /* 1980 */ 87, 88, 89, 90, 172, 1111, 243, 1107, 244, 158,
+ /* 1990 */ 1021, 1021, 1023, 1024, 27, 18, 245, 1230, 493, 349,
+ /* 2000 */ 1100, 261, 247, 193, 194, 37, 370, 855, 498, 251,
+ /* 2010 */ 195, 510, 92, 19, 174, 362, 502, 20, 507, 885,
+ /* 2020 */ 365, 898, 94, 305, 159, 95, 517, 96, 1178, 160,
+ /* 2030 */ 1060, 1147, 39, 1146, 225, 280, 282, 970, 198, 964,
+ /* 2040 */ 113, 1164, 1168, 260, 1166, 21, 1172, 7, 22, 1152,
+ /* 2050 */ 33, 23, 24, 25, 1171, 546, 26, 202, 100, 102,
+ /* 2060 */ 1075, 103, 1061, 1059, 1063, 1117, 1064, 1116, 266, 267,
+ /* 2070 */ 28, 40, 929, 1026, 867, 112, 29, 568, 394, 143,
+ /* 2080 */ 1186, 268, 176, 1185, 269, 1246, 1246, 1246, 1246, 1246,
+ /* 2090 */ 1246, 1246, 1246, 1246, 1246, 1606, 1246, 1246, 1246, 1246,
+ /* 2100 */ 1605,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 193, 223, 193, 225, 193, 193, 193, 274, 275, 276,
- /* 10 */ 193, 233, 219, 193, 274, 275, 276, 206, 206, 19,
- /* 20 */ 193, 219, 216, 216, 217, 216, 217, 193, 295, 216,
- /* 30 */ 217, 31, 205, 216, 217, 193, 216, 217, 213, 39,
- /* 40 */ 228, 193, 230, 43, 44, 45, 46, 47, 48, 49,
+ /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276,
+ /* 10 */ 193, 223, 219, 225, 206, 210, 211, 212, 193, 19,
+ /* 20 */ 219, 233, 216, 216, 217, 216, 217, 193, 295, 216,
+ /* 30 */ 217, 31, 193, 216, 217, 193, 228, 213, 230, 39,
+ /* 40 */ 206, 216, 217, 43, 44, 45, 46, 47, 48, 49,
/* 50 */ 50, 51, 52, 53, 54, 55, 56, 57, 193, 19,
/* 60 */ 185, 186, 187, 188, 189, 190, 253, 274, 275, 276,
- /* 70 */ 195, 193, 197, 253, 216, 262, 274, 275, 276, 204,
- /* 80 */ 238, 204, 262, 43, 44, 45, 46, 47, 48, 49,
- /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 264, 274,
- /* 100 */ 275, 276, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 70 */ 195, 193, 197, 193, 261, 274, 275, 276, 253, 204,
+ /* 80 */ 238, 204, 81, 43, 44, 45, 46, 47, 48, 49,
+ /* 90 */ 50, 51, 52, 53, 54, 55, 56, 57, 274, 275,
+ /* 100 */ 276, 262, 102, 103, 104, 105, 106, 107, 108, 109,
/* 110 */ 110, 111, 112, 113, 239, 240, 239, 240, 210, 211,
/* 120 */ 212, 314, 315, 314, 59, 316, 86, 252, 88, 252,
- /* 130 */ 19, 314, 315, 256, 257, 309, 25, 72, 296, 313,
- /* 140 */ 193, 266, 102, 103, 104, 105, 106, 107, 108, 109,
+ /* 130 */ 19, 314, 315, 256, 257, 113, 25, 72, 296, 138,
+ /* 140 */ 139, 266, 102, 103, 104, 105, 106, 107, 108, 109,
/* 150 */ 110, 111, 112, 113, 43, 44, 45, 46, 47, 48,
/* 160 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 81,
- /* 170 */ 292, 59, 307, 298, 108, 109, 110, 111, 112, 113,
- /* 180 */ 69, 116, 117, 118, 72, 128, 129, 193, 241, 22,
+ /* 170 */ 292, 59, 292, 298, 108, 109, 110, 111, 112, 113,
+ /* 180 */ 69, 116, 117, 118, 72, 106, 107, 193, 111, 112,
/* 190 */ 113, 54, 55, 56, 57, 58, 102, 103, 104, 105,
- /* 200 */ 106, 107, 108, 109, 110, 111, 112, 113, 120, 193,
- /* 210 */ 216, 217, 25, 102, 103, 104, 105, 106, 107, 108,
+ /* 200 */ 106, 107, 108, 109, 110, 111, 112, 113, 120, 25,
+ /* 210 */ 216, 217, 145, 102, 103, 104, 105, 106, 107, 108,
/* 220 */ 109, 110, 111, 112, 113, 231, 138, 139, 116, 117,
- /* 230 */ 118, 106, 107, 19, 216, 54, 55, 56, 57, 102,
+ /* 230 */ 118, 164, 153, 19, 155, 54, 55, 56, 57, 102,
/* 240 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
- /* 250 */ 113, 304, 25, 46, 47, 48, 49, 43, 44, 45,
+ /* 250 */ 113, 128, 129, 46, 47, 48, 49, 43, 44, 45,
/* 260 */ 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
- /* 270 */ 56, 57, 193, 106, 107, 59, 193, 19, 153, 263,
- /* 280 */ 155, 67, 24, 102, 103, 104, 105, 106, 107, 108,
- /* 290 */ 109, 110, 111, 112, 113, 216, 217, 59, 230, 216,
+ /* 270 */ 56, 57, 216, 193, 25, 59, 193, 19, 165, 166,
+ /* 280 */ 193, 67, 24, 102, 103, 104, 105, 106, 107, 108,
+ /* 290 */ 109, 110, 111, 112, 113, 73, 216, 217, 59, 216,
/* 300 */ 217, 43, 44, 45, 46, 47, 48, 49, 50, 51,
/* 310 */ 52, 53, 54, 55, 56, 57, 102, 103, 104, 105,
- /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 121, 142,
- /* 330 */ 59, 193, 116, 117, 118, 119, 253, 204, 122, 123,
- /* 340 */ 124, 19, 20, 81, 22, 262, 108, 19, 132, 165,
- /* 350 */ 166, 193, 24, 126, 116, 117, 118, 278, 36, 193,
+ /* 320 */ 106, 107, 108, 109, 110, 111, 112, 113, 121, 145,
+ /* 330 */ 59, 193, 116, 117, 118, 119, 273, 204, 122, 123,
+ /* 340 */ 124, 19, 20, 134, 22, 136, 137, 19, 132, 127,
+ /* 350 */ 128, 129, 24, 22, 23, 116, 117, 118, 36, 193,
/* 360 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- /* 370 */ 112, 113, 239, 240, 216, 217, 215, 106, 107, 241,
+ /* 370 */ 112, 113, 239, 240, 311, 312, 215, 106, 107, 241,
/* 380 */ 19, 59, 216, 217, 223, 252, 115, 116, 117, 118,
- /* 390 */ 73, 120, 26, 71, 193, 22, 193, 231, 81, 128,
- /* 400 */ 138, 139, 269, 81, 43, 44, 45, 46, 47, 48,
- /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 216,
- /* 420 */ 217, 198, 100, 95, 153, 59, 155, 193, 106, 107,
- /* 430 */ 235, 236, 59, 193, 193, 249, 114, 251, 116, 117,
- /* 440 */ 118, 113, 304, 121, 127, 204, 193, 119, 120, 121,
- /* 450 */ 122, 123, 124, 125, 193, 138, 139, 216, 217, 131,
- /* 460 */ 138, 139, 193, 102, 103, 104, 105, 106, 107, 108,
+ /* 390 */ 151, 120, 26, 71, 193, 308, 309, 193, 149, 128,
+ /* 400 */ 313, 216, 269, 81, 43, 44, 45, 46, 47, 48,
+ /* 410 */ 49, 50, 51, 52, 53, 54, 55, 56, 57, 253,
+ /* 420 */ 216, 217, 100, 95, 153, 59, 155, 261, 106, 107,
+ /* 430 */ 25, 193, 101, 193, 193, 231, 114, 25, 116, 117,
+ /* 440 */ 118, 113, 304, 121, 193, 204, 59, 119, 120, 121,
+ /* 450 */ 122, 123, 124, 125, 216, 217, 193, 216, 217, 131,
+ /* 460 */ 138, 139, 230, 102, 103, 104, 105, 106, 107, 108,
/* 470 */ 109, 110, 111, 112, 113, 153, 154, 155, 156, 157,
- /* 480 */ 239, 240, 116, 117, 118, 76, 193, 193, 19, 116,
- /* 490 */ 117, 118, 23, 252, 253, 193, 87, 204, 89, 238,
- /* 500 */ 193, 92, 268, 262, 281, 203, 193, 205, 285, 216,
+ /* 480 */ 239, 240, 116, 117, 118, 76, 193, 23, 19, 25,
+ /* 490 */ 22, 253, 23, 252, 253, 108, 87, 204, 89, 261,
+ /* 500 */ 198, 92, 261, 116, 117, 118, 193, 306, 307, 216,
/* 510 */ 217, 150, 43, 44, 45, 46, 47, 48, 49, 50,
- /* 520 */ 51, 52, 53, 54, 55, 56, 57, 193, 193, 216,
- /* 530 */ 217, 19, 239, 240, 59, 23, 106, 107, 108, 109,
- /* 540 */ 110, 111, 112, 113, 231, 252, 253, 193, 308, 309,
- /* 550 */ 193, 145, 59, 313, 145, 43, 44, 45, 46, 47,
+ /* 520 */ 51, 52, 53, 54, 55, 56, 57, 59, 193, 216,
+ /* 530 */ 217, 19, 239, 240, 283, 23, 106, 107, 108, 109,
+ /* 540 */ 110, 111, 112, 113, 73, 252, 253, 142, 308, 309,
+ /* 550 */ 138, 139, 81, 313, 145, 43, 44, 45, 46, 47,
/* 560 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 570 */ 164, 102, 103, 104, 105, 106, 107, 108, 109, 110,
- /* 580 */ 111, 112, 113, 119, 193, 193, 122, 123, 124, 193,
- /* 590 */ 283, 116, 117, 118, 235, 236, 132, 59, 241, 264,
- /* 600 */ 59, 193, 19, 23, 193, 25, 23, 216, 217, 116,
- /* 610 */ 117, 118, 216, 217, 102, 103, 104, 105, 106, 107,
+ /* 570 */ 307, 102, 103, 104, 105, 106, 107, 108, 109, 110,
+ /* 580 */ 111, 112, 113, 281, 116, 117, 118, 285, 23, 193,
+ /* 590 */ 25, 119, 59, 193, 122, 123, 124, 59, 127, 203,
+ /* 600 */ 59, 205, 19, 268, 132, 25, 23, 22, 193, 138,
+ /* 610 */ 139, 249, 204, 251, 102, 103, 104, 105, 106, 107,
/* 620 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 630 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 640 */ 57, 19, 308, 309, 151, 23, 25, 313, 135, 253,
- /* 650 */ 21, 193, 241, 140, 116, 117, 118, 116, 117, 118,
- /* 660 */ 268, 304, 22, 301, 302, 43, 44, 45, 46, 47,
+ /* 640 */ 57, 19, 22, 23, 59, 23, 25, 239, 240, 116,
+ /* 650 */ 117, 118, 193, 11, 116, 117, 118, 116, 117, 118,
+ /* 660 */ 252, 269, 22, 193, 15, 43, 44, 45, 46, 47,
/* 670 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 680 */ 193, 143, 193, 193, 143, 102, 103, 104, 105, 106,
+ /* 680 */ 273, 143, 193, 118, 143, 102, 103, 104, 105, 106,
/* 690 */ 107, 108, 109, 110, 111, 112, 113, 76, 118, 59,
- /* 700 */ 292, 211, 212, 216, 217, 216, 217, 73, 193, 80,
- /* 710 */ 89, 25, 19, 92, 193, 304, 23, 22, 231, 193,
- /* 720 */ 231, 193, 22, 143, 102, 103, 104, 105, 106, 107,
+ /* 700 */ 241, 116, 117, 118, 304, 216, 217, 292, 143, 60,
+ /* 710 */ 89, 241, 19, 92, 193, 193, 23, 22, 311, 312,
+ /* 720 */ 231, 101, 22, 143, 102, 103, 104, 105, 106, 107,
/* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 750 */ 57, 19, 123, 193, 59, 23, 116, 117, 118, 59,
- /* 760 */ 193, 127, 128, 129, 306, 307, 210, 211, 212, 193,
- /* 770 */ 22, 111, 112, 113, 284, 43, 44, 45, 46, 47,
+ /* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59,
+ /* 760 */ 201, 21, 241, 304, 22, 206, 127, 128, 129, 193,
+ /* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47,
/* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 790 */ 161, 193, 216, 217, 268, 102, 103, 104, 105, 106,
- /* 800 */ 107, 108, 109, 110, 111, 112, 113, 59, 193, 193,
- /* 810 */ 193, 116, 117, 118, 216, 217, 116, 117, 118, 304,
- /* 820 */ 239, 240, 19, 263, 138, 139, 23, 211, 212, 231,
- /* 830 */ 263, 216, 217, 252, 102, 103, 104, 105, 106, 107,
+ /* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106,
+ /* 800 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 193,
+ /* 810 */ 193, 116, 117, 118, 216, 217, 116, 117, 118, 226,
+ /* 820 */ 80, 193, 19, 235, 236, 304, 23, 211, 212, 231,
+ /* 830 */ 204, 216, 217, 205, 102, 103, 104, 105, 106, 107,
/* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 860 */ 57, 19, 193, 11, 116, 117, 118, 240, 253, 193,
- /* 870 */ 201, 239, 240, 193, 134, 206, 136, 137, 193, 252,
- /* 880 */ 193, 264, 193, 193, 252, 43, 44, 45, 46, 47,
+ /* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239,
+ /* 870 */ 240, 239, 240, 193, 106, 107, 193, 89, 252, 193,
+ /* 880 */ 92, 59, 252, 141, 252, 43, 44, 45, 46, 47,
/* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
- /* 900 */ 284, 216, 217, 216, 217, 102, 103, 104, 105, 106,
- /* 910 */ 107, 108, 109, 110, 111, 112, 113, 193, 231, 193,
- /* 920 */ 187, 188, 189, 190, 127, 128, 129, 238, 195, 193,
- /* 930 */ 197, 16, 19, 7, 8, 9, 193, 204, 253, 193,
- /* 940 */ 216, 217, 216, 217, 102, 103, 104, 105, 106, 107,
+ /* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106,
+ /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 16,
+ /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 25,
+ /* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117,
+ /* 940 */ 24, 216, 217, 263, 102, 103, 104, 105, 106, 107,
/* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46,
/* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 970 */ 57, 213, 239, 240, 193, 76, 19, 188, 232, 190,
- /* 980 */ 128, 129, 292, 193, 195, 252, 197, 46, 89, 138,
- /* 990 */ 139, 92, 77, 204, 79, 193, 269, 216, 217, 266,
+ /* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190,
+ /* 980 */ 77, 226, 79, 193, 195, 252, 197, 193, 19, 301,
+ /* 990 */ 302, 193, 193, 204, 216, 217, 226, 216, 217, 266,
/* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52,
/* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106,
/* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240,
- /* 1030 */ 193, 298, 22, 23, 253, 239, 240, 127, 128, 129,
- /* 1040 */ 238, 252, 27, 193, 286, 204, 193, 204, 252, 291,
- /* 1050 */ 193, 273, 22, 23, 100, 266, 115, 42, 268, 102,
+ /* 1030 */ 232, 298, 238, 117, 253, 239, 240, 238, 259, 260,
+ /* 1040 */ 193, 252, 27, 31, 193, 193, 142, 204, 252, 193,
+ /* 1050 */ 193, 39, 262, 193, 100, 266, 278, 42, 204, 102,
/* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
- /* 1070 */ 113, 117, 159, 216, 217, 121, 161, 19, 63, 193,
- /* 1080 */ 239, 240, 239, 240, 12, 208, 209, 298, 73, 311,
- /* 1090 */ 312, 238, 19, 252, 25, 252, 22, 24, 24, 27,
- /* 1100 */ 193, 264, 216, 217, 46, 208, 209, 153, 154, 155,
- /* 1110 */ 253, 101, 19, 23, 42, 25, 43, 44, 45, 46,
+ /* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193,
+ /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 238,
+ /* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212,
+ /* 1100 */ 24, 193, 216, 217, 216, 217, 252, 153, 154, 155,
+ /* 1110 */ 253, 16, 19, 144, 213, 268, 43, 44, 45, 46,
/* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1130 */ 57, 101, 19, 59, 25, 63, 43, 44, 45, 46,
+ /* 1130 */ 57, 238, 19, 59, 193, 59, 43, 44, 45, 46,
/* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1150 */ 57, 22, 23, 115, 25, 24, 43, 44, 45, 46,
+ /* 1150 */ 57, 22, 23, 193, 25, 193, 43, 44, 45, 46,
/* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
- /* 1170 */ 57, 22, 23, 115, 25, 102, 103, 104, 105, 106,
- /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 118, 150, 131,
- /* 1190 */ 59, 117, 22, 273, 193, 102, 103, 104, 105, 106,
- /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 66, 204,
- /* 1210 */ 35, 204, 143, 213, 193, 102, 103, 104, 105, 106,
- /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 85, 193, 59,
- /* 1230 */ 101, 311, 312, 16, 193, 19, 94, 216, 217, 238,
- /* 1240 */ 193, 66, 239, 240, 239, 240, 239, 240, 117, 74,
- /* 1250 */ 101, 216, 217, 193, 193, 252, 193, 252, 149, 252,
+ /* 1170 */ 57, 284, 77, 193, 79, 102, 103, 104, 105, 106,
+ /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 193, 193,
+ /* 1190 */ 193, 117, 291, 117, 232, 102, 103, 104, 105, 106,
+ /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 22, 23,
+ /* 1210 */ 66, 25, 216, 217, 35, 102, 103, 104, 105, 106,
+ /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 268, 85,
+ /* 1230 */ 101, 193, 309, 309, 240, 19, 313, 313, 94, 208,
+ /* 1240 */ 209, 193, 239, 240, 193, 66, 252, 19, 268, 244,
+ /* 1250 */ 216, 217, 193, 74, 213, 252, 161, 19, 263, 254,
/* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
- /* 1270 */ 54, 55, 56, 57, 193, 193, 193, 5, 59, 216,
- /* 1280 */ 217, 25, 10, 11, 12, 13, 14, 117, 146, 17,
- /* 1290 */ 193, 291, 193, 232, 77, 76, 79, 216, 217, 216,
- /* 1300 */ 217, 31, 30, 309, 32, 130, 87, 313, 89, 39,
- /* 1310 */ 193, 92, 40, 216, 217, 216, 217, 108, 102, 103,
+ /* 1270 */ 54, 55, 56, 57, 193, 216, 217, 5, 59, 193,
+ /* 1280 */ 19, 244, 10, 11, 12, 13, 14, 101, 309, 17,
+ /* 1290 */ 146, 254, 313, 193, 193, 76, 115, 216, 217, 309,
+ /* 1300 */ 12, 263, 30, 313, 32, 46, 87, 46, 89, 130,
+ /* 1310 */ 193, 92, 40, 22, 263, 27, 216, 217, 102, 103,
/* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
- /* 1330 */ 299, 300, 193, 216, 217, 116, 117, 118, 19, 20,
- /* 1340 */ 193, 22, 70, 309, 135, 193, 264, 313, 193, 140,
- /* 1350 */ 78, 193, 226, 81, 59, 36, 193, 309, 193, 29,
- /* 1360 */ 193, 313, 193, 33, 145, 193, 59, 48, 216, 217,
- /* 1370 */ 98, 216, 217, 193, 216, 217, 193, 244, 59, 216,
- /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 254, 216, 217,
- /* 1390 */ 71, 193, 244, 193, 193, 65, 216, 217, 193, 216,
- /* 1400 */ 217, 145, 254, 244, 85, 133, 15, 100, 193, 90,
- /* 1410 */ 138, 139, 117, 254, 216, 217, 216, 217, 193, 100,
- /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 19, 121, 193,
- /* 1430 */ 193, 216, 217, 114, 162, 116, 117, 118, 244, 244,
- /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 129, 254, 254,
- /* 1450 */ 313, 60, 216, 217, 19, 256, 257, 193, 120, 121,
- /* 1460 */ 153, 154, 155, 149, 150, 25, 24, 99, 216, 217,
- /* 1470 */ 152, 193, 153, 154, 155, 156, 157, 0, 1, 2,
- /* 1480 */ 216, 217, 5, 22, 158, 24, 160, 10, 11, 12,
- /* 1490 */ 13, 14, 193, 23, 17, 25, 193, 19, 20, 193,
- /* 1500 */ 22, 133, 193, 22, 22, 193, 22, 30, 193, 32,
- /* 1510 */ 19, 20, 129, 22, 36, 216, 217, 40, 193, 216,
- /* 1520 */ 217, 193, 216, 217, 116, 216, 217, 36, 216, 217,
- /* 1530 */ 193, 216, 217, 193, 53, 152, 193, 59, 23, 19,
- /* 1540 */ 25, 216, 217, 61, 216, 217, 23, 70, 25, 71,
- /* 1550 */ 59, 116, 193, 216, 217, 78, 216, 217, 81, 216,
- /* 1560 */ 217, 59, 71, 85, 193, 23, 193, 25, 90, 23,
- /* 1570 */ 23, 25, 25, 7, 8, 98, 85, 193, 100, 193,
- /* 1580 */ 59, 90, 142, 141, 106, 107, 193, 216, 217, 216,
- /* 1590 */ 217, 100, 114, 193, 116, 117, 118, 106, 107, 121,
- /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118,
- /* 1610 */ 133, 23, 121, 25, 121, 138, 139, 97, 23, 117,
- /* 1620 */ 25, 23, 193, 25, 131, 141, 193, 216, 217, 59,
- /* 1630 */ 193, 153, 154, 155, 156, 157, 226, 193, 117, 162,
- /* 1640 */ 23, 23, 25, 25, 153, 154, 155, 156, 157, 1,
- /* 1650 */ 2, 83, 84, 5, 19, 20, 226, 22, 10, 11,
- /* 1660 */ 12, 13, 14, 258, 153, 17, 155, 153, 23, 155,
- /* 1670 */ 25, 36, 23, 193, 25, 255, 193, 236, 30, 193,
- /* 1680 */ 32, 19, 20, 193, 22, 193, 288, 117, 40, 193,
- /* 1690 */ 318, 193, 193, 193, 59, 242, 193, 193, 36, 193,
- /* 1700 */ 193, 193, 287, 255, 255, 255, 71, 255, 243, 214,
- /* 1710 */ 191, 297, 267, 245, 271, 259, 259, 293, 70, 246,
- /* 1720 */ 246, 59, 267, 229, 245, 271, 78, 293, 259, 81,
- /* 1730 */ 271, 271, 220, 71, 225, 100, 219, 219, 249, 196,
- /* 1740 */ 243, 106, 107, 108, 219, 60, 98, 280, 297, 114,
- /* 1750 */ 249, 116, 117, 118, 141, 245, 121, 200, 200, 297,
- /* 1760 */ 38, 200, 100, 151, 150, 294, 294, 22, 106, 107,
- /* 1770 */ 283, 148, 250, 145, 43, 249, 114, 272, 116, 117,
- /* 1780 */ 118, 133, 234, 121, 250, 249, 138, 139, 153, 154,
- /* 1790 */ 155, 156, 157, 18, 270, 199, 19, 20, 18, 22,
- /* 1800 */ 237, 237, 237, 200, 237, 149, 246, 272, 246, 272,
- /* 1810 */ 162, 270, 234, 36, 234, 153, 154, 155, 156, 157,
- /* 1820 */ 246, 246, 200, 199, 158, 62, 200, 22, 290, 289,
- /* 1830 */ 199, 221, 200, 199, 115, 199, 59, 200, 218, 218,
- /* 1840 */ 221, 22, 64, 218, 126, 165, 24, 113, 71, 305,
- /* 1850 */ 144, 221, 261, 224, 227, 282, 218, 224, 218, 220,
- /* 1860 */ 218, 218, 312, 282, 115, 221, 260, 260, 227, 261,
- /* 1870 */ 261, 260, 200, 91, 82, 22, 261, 100, 277, 200,
- /* 1880 */ 147, 265, 158, 106, 107, 146, 317, 25, 317, 202,
- /* 1890 */ 13, 114, 194, 116, 117, 118, 194, 6, 121, 248,
- /* 1900 */ 250, 249, 247, 250, 246, 192, 260, 192, 192, 213,
- /* 1910 */ 207, 222, 265, 213, 213, 207, 222, 213, 207, 214,
- /* 1920 */ 4, 214, 213, 3, 22, 163, 15, 23, 16, 23,
- /* 1930 */ 153, 154, 155, 156, 157, 139, 279, 151, 130, 25,
- /* 1940 */ 142, 24, 20, 144, 16, 1, 142, 130, 130, 61,
- /* 1950 */ 37, 151, 53, 53, 53, 130, 53, 303, 116, 34,
- /* 1960 */ 300, 141, 303, 1, 5, 22, 115, 161, 75, 141,
- /* 1970 */ 25, 41, 115, 68, 24, 68, 20, 19, 131, 125,
- /* 1980 */ 23, 96, 22, 22, 37, 67, 22, 22, 67, 149,
- /* 1990 */ 59, 24, 22, 67, 28, 23, 22, 141, 23, 23,
- /* 2000 */ 23, 143, 23, 25, 22, 97, 23, 23, 116, 22,
- /* 2010 */ 25, 88, 75, 86, 34, 34, 75, 44, 23, 22,
- /* 2020 */ 34, 25, 34, 34, 34, 93, 24, 34, 25, 23,
- /* 2030 */ 23, 23, 23, 23, 11, 23, 25, 22, 22, 22,
- /* 2040 */ 1, 23, 23, 22, 22, 25, 15, 1, 23, 25,
- /* 2050 */ 135, 141, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2060 */ 141, 319, 319, 319, 319, 141, 319, 319, 319, 319,
- /* 2070 */ 141, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2080 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2090 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
+ /* 1330 */ 42, 150, 291, 216, 217, 116, 117, 118, 19, 20,
+ /* 1340 */ 193, 22, 70, 260, 116, 193, 24, 264, 193, 263,
+ /* 1350 */ 78, 63, 61, 81, 116, 36, 193, 260, 193, 29,
+ /* 1360 */ 193, 264, 193, 33, 145, 193, 59, 48, 216, 217,
+ /* 1370 */ 98, 216, 217, 193, 115, 193, 115, 193, 59, 216,
+ /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 255, 216, 217,
+ /* 1390 */ 71, 193, 131, 193, 25, 65, 216, 217, 216, 217,
+ /* 1400 */ 216, 217, 208, 209, 85, 133, 193, 100, 193, 90,
+ /* 1410 */ 138, 139, 138, 139, 216, 217, 216, 217, 193, 100,
+ /* 1420 */ 193, 108, 135, 116, 117, 106, 107, 140, 121, 216,
+ /* 1430 */ 217, 216, 217, 114, 162, 116, 117, 118, 299, 300,
+ /* 1440 */ 121, 216, 217, 216, 217, 193, 244, 193, 135, 244,
+ /* 1450 */ 193, 256, 257, 140, 244, 193, 254, 193, 193, 254,
+ /* 1460 */ 153, 154, 155, 141, 254, 149, 150, 258, 216, 217,
+ /* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2,
+ /* 1480 */ 216, 217, 5, 115, 158, 193, 160, 10, 11, 12,
+ /* 1490 */ 13, 14, 193, 59, 17, 126, 193, 19, 20, 129,
+ /* 1500 */ 22, 193, 22, 22, 24, 193, 23, 30, 25, 32,
+ /* 1510 */ 19, 20, 144, 22, 36, 216, 217, 40, 193, 216,
+ /* 1520 */ 217, 193, 152, 129, 216, 217, 193, 36, 216, 217,
+ /* 1530 */ 193, 99, 193, 193, 53, 193, 193, 59, 23, 193,
+ /* 1540 */ 25, 216, 217, 193, 216, 217, 152, 70, 59, 71,
+ /* 1550 */ 59, 117, 193, 216, 217, 78, 216, 217, 81, 216,
+ /* 1560 */ 217, 318, 71, 85, 193, 133, 193, 193, 90, 23,
+ /* 1570 */ 23, 25, 25, 120, 121, 98, 85, 193, 100, 193,
+ /* 1580 */ 23, 90, 25, 121, 106, 107, 19, 216, 217, 216,
+ /* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121,
+ /* 1600 */ 216, 217, 216, 217, 193, 114, 117, 116, 117, 118,
+ /* 1610 */ 133, 193, 121, 193, 193, 138, 139, 193, 23, 193,
+ /* 1620 */ 25, 23, 23, 25, 25, 7, 8, 216, 217, 193,
+ /* 1630 */ 193, 153, 154, 155, 156, 157, 216, 217, 193, 162,
+ /* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1,
+ /* 1650 */ 2, 193, 193, 5, 19, 20, 59, 22, 10, 11,
+ /* 1660 */ 12, 13, 14, 193, 97, 17, 193, 23, 193, 25,
+ /* 1670 */ 288, 36, 193, 242, 216, 217, 236, 23, 30, 25,
+ /* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216,
+ /* 1690 */ 217, 216, 217, 193, 59, 216, 217, 193, 36, 83,
+ /* 1700 */ 84, 153, 153, 155, 155, 23, 71, 25, 23, 193,
+ /* 1710 */ 25, 193, 193, 193, 117, 193, 193, 193, 70, 193,
+ /* 1720 */ 193, 59, 193, 255, 255, 287, 78, 255, 243, 81,
+ /* 1730 */ 191, 255, 297, 71, 271, 100, 293, 245, 267, 214,
+ /* 1740 */ 246, 106, 107, 108, 246, 271, 98, 245, 293, 114,
+ /* 1750 */ 220, 116, 117, 118, 267, 271, 121, 271, 225, 219,
+ /* 1760 */ 229, 219, 100, 219, 259, 259, 259, 259, 106, 107,
+ /* 1770 */ 249, 196, 60, 280, 141, 243, 114, 249, 116, 117,
+ /* 1780 */ 118, 133, 245, 121, 200, 297, 138, 139, 153, 154,
+ /* 1790 */ 155, 156, 157, 297, 200, 38, 19, 20, 151, 22,
+ /* 1800 */ 200, 150, 140, 294, 294, 22, 272, 148, 250, 145,
+ /* 1810 */ 162, 270, 249, 36, 43, 153, 154, 155, 156, 157,
+ /* 1820 */ 234, 283, 250, 249, 18, 237, 237, 237, 237, 200,
+ /* 1830 */ 18, 199, 149, 246, 272, 234, 59, 272, 246, 270,
+ /* 1840 */ 234, 200, 246, 290, 199, 158, 62, 22, 71, 246,
+ /* 1850 */ 289, 221, 200, 200, 199, 199, 19, 20, 200, 22,
+ /* 1860 */ 199, 221, 115, 64, 218, 227, 22, 218, 218, 126,
+ /* 1870 */ 227, 165, 312, 36, 24, 305, 113, 100, 200, 91,
+ /* 1880 */ 82, 224, 22, 106, 107, 218, 221, 224, 220, 218,
+ /* 1890 */ 218, 114, 218, 116, 117, 118, 59, 282, 121, 282,
+ /* 1900 */ 221, 265, 317, 277, 265, 200, 158, 279, 71, 147,
+ /* 1910 */ 317, 146, 25, 202, 13, 194, 250, 140, 249, 194,
+ /* 1920 */ 250, 248, 247, 246, 6, 192, 192, 192, 303, 207,
+ /* 1930 */ 153, 154, 155, 156, 157, 222, 213, 100, 207, 213,
+ /* 1940 */ 303, 213, 300, 106, 107, 213, 222, 207, 4, 3,
+ /* 1950 */ 214, 114, 214, 116, 117, 118, 22, 213, 121, 163,
+ /* 1960 */ 15, 23, 23, 16, 151, 139, 130, 25, 144, 142,
+ /* 1970 */ 20, 16, 24, 1, 142, 130, 130, 61, 37, 151,
+ /* 1980 */ 53, 53, 53, 53, 130, 116, 34, 1, 141, 5,
+ /* 1990 */ 153, 154, 155, 156, 157, 22, 115, 75, 41, 161,
+ /* 2000 */ 68, 25, 141, 68, 115, 24, 131, 20, 19, 125,
+ /* 2010 */ 22, 96, 22, 22, 37, 23, 67, 22, 67, 59,
+ /* 2020 */ 24, 28, 22, 67, 23, 149, 22, 25, 23, 23,
+ /* 2030 */ 23, 23, 22, 97, 141, 23, 23, 116, 22, 143,
+ /* 2040 */ 25, 88, 75, 34, 86, 34, 75, 44, 34, 23,
+ /* 2050 */ 22, 34, 34, 34, 93, 24, 34, 25, 25, 142,
+ /* 2060 */ 23, 142, 23, 23, 23, 23, 11, 23, 25, 22,
+ /* 2070 */ 22, 22, 135, 23, 23, 22, 22, 25, 15, 23,
+ /* 2080 */ 1, 141, 25, 1, 141, 319, 319, 319, 319, 319,
+ /* 2090 */ 319, 319, 319, 319, 319, 141, 319, 319, 319, 319,
+ /* 2100 */ 141, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2130 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
@@ -162433,177 +166348,180 @@ static const YYCODETYPE yy_lookahead[] = {
/* 2220 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2230 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
/* 2240 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
- /* 2250 */ 319, 319, 319, 319, 319, 319,
+ /* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
+ /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
+ /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319,
+ /* 2280 */ 319, 319, 319, 319, 319, 319,
};
-#define YY_SHIFT_COUNT (577)
+#define YY_SHIFT_COUNT (579)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (2046)
+#define YY_SHIFT_MAX (2082)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 1648, 1477, 1272, 322, 322, 262, 1319, 1478, 1491, 1662,
- /* 10 */ 1662, 1662, 317, 0, 0, 214, 1093, 1662, 1662, 1662,
- /* 20 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662,
- /* 30 */ 271, 271, 1219, 1219, 216, 88, 262, 262, 262, 262,
- /* 40 */ 262, 40, 111, 258, 361, 469, 512, 583, 622, 693,
+ /* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837,
+ /* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837,
+ /* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 30 */ 271, 271, 1219, 1219, 216, 88, 1, 1, 1, 1,
+ /* 40 */ 1, 40, 111, 258, 361, 469, 512, 583, 622, 693,
/* 50 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093,
/* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093,
/* 70 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, 1662,
- /* 80 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662,
- /* 90 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662,
- /* 100 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662,
- /* 110 */ 1662, 1662, 1662, 1662, 1777, 1662, 1662, 1662, 1662, 1662,
- /* 120 */ 1662, 1662, 1662, 1662, 1662, 1662, 1662, 1662, 137, 181,
- /* 130 */ 181, 181, 181, 181, 94, 430, 66, 65, 112, 366,
- /* 140 */ 475, 475, 629, 1058, 475, 475, 125, 125, 475, 686,
- /* 150 */ 686, 686, 660, 686, 57, 184, 184, 77, 77, 2071,
- /* 160 */ 2071, 328, 328, 328, 493, 373, 373, 373, 373, 1015,
- /* 170 */ 1015, 409, 366, 1129, 1149, 475, 475, 475, 475, 475,
- /* 180 */ 475, 475, 475, 475, 475, 475, 475, 475, 475, 475,
- /* 190 */ 475, 475, 475, 475, 475, 621, 621, 475, 852, 899,
- /* 200 */ 899, 1295, 1295, 406, 851, 2071, 2071, 2071, 2071, 2071,
- /* 210 */ 2071, 2071, 1307, 954, 954, 640, 464, 695, 238, 700,
- /* 220 */ 538, 541, 748, 475, 475, 475, 475, 475, 475, 475,
- /* 230 */ 475, 475, 475, 634, 475, 475, 475, 475, 475, 475,
- /* 240 */ 475, 475, 475, 475, 475, 475, 1175, 1175, 1175, 475,
- /* 250 */ 475, 475, 580, 475, 475, 475, 1074, 1142, 475, 475,
- /* 260 */ 1072, 475, 475, 475, 475, 475, 475, 475, 475, 797,
- /* 270 */ 1330, 740, 1131, 1131, 1131, 1131, 1069, 740, 740, 1209,
- /* 280 */ 167, 926, 1391, 1038, 1314, 187, 1408, 1314, 1408, 1435,
- /* 290 */ 1109, 1038, 1038, 1109, 1038, 187, 1435, 227, 1090, 941,
- /* 300 */ 1270, 1270, 1270, 1408, 1256, 1256, 1326, 1440, 513, 1461,
- /* 310 */ 1685, 1685, 1613, 1613, 1722, 1722, 1613, 1612, 1614, 1745,
- /* 320 */ 1623, 1628, 1731, 1623, 1628, 1775, 1775, 1775, 1775, 1613,
- /* 330 */ 1780, 1656, 1614, 1614, 1656, 1745, 1731, 1656, 1731, 1656,
- /* 340 */ 1613, 1780, 1666, 1763, 1613, 1780, 1805, 1613, 1780, 1613,
- /* 350 */ 1780, 1805, 1719, 1719, 1719, 1778, 1819, 1819, 1805, 1719,
- /* 360 */ 1718, 1719, 1778, 1719, 1719, 1680, 1822, 1734, 1734, 1805,
- /* 370 */ 1706, 1749, 1706, 1749, 1706, 1749, 1706, 1749, 1613, 1782,
- /* 380 */ 1782, 1792, 1792, 1623, 1628, 1853, 1613, 1724, 1623, 1733,
- /* 390 */ 1739, 1656, 1862, 1877, 1877, 1891, 1891, 1891, 2071, 2071,
- /* 400 */ 2071, 2071, 2071, 2071, 2071, 2071, 2071, 2071, 2071, 2071,
- /* 410 */ 2071, 2071, 2071, 207, 915, 1010, 1030, 1217, 910, 1170,
- /* 420 */ 1470, 1368, 1481, 1442, 1318, 1383, 1515, 1482, 1523, 1542,
- /* 430 */ 1546, 1547, 1588, 1595, 1502, 1338, 1566, 1493, 1520, 1521,
- /* 440 */ 1598, 1617, 1568, 1618, 1511, 1514, 1645, 1649, 1570, 1484,
- /* 450 */ 1916, 1920, 1902, 1762, 1911, 1912, 1904, 1906, 1796, 1786,
- /* 460 */ 1808, 1914, 1914, 1917, 1798, 1922, 1799, 1928, 1944, 1804,
- /* 470 */ 1817, 1914, 1818, 1888, 1913, 1914, 1800, 1899, 1900, 1901,
- /* 480 */ 1903, 1825, 1842, 1925, 1820, 1962, 1959, 1943, 1851, 1806,
- /* 490 */ 1905, 1945, 1907, 1893, 1930, 1828, 1857, 1950, 1956, 1958,
- /* 500 */ 1847, 1854, 1960, 1918, 1961, 1964, 1957, 1965, 1921, 1931,
- /* 510 */ 1967, 1885, 1966, 1970, 1926, 1947, 1972, 1840, 1974, 1975,
- /* 520 */ 1976, 1977, 1978, 1979, 1982, 1908, 1856, 1983, 1984, 1892,
- /* 530 */ 1980, 1987, 1858, 1985, 1981, 1986, 1988, 1989, 1923, 1937,
- /* 540 */ 1927, 1973, 1941, 1932, 1990, 1995, 1997, 2002, 1996, 2003,
- /* 550 */ 1993, 2006, 1985, 2007, 2008, 2009, 2010, 2011, 2012, 2015,
- /* 560 */ 2023, 2016, 2017, 2018, 2019, 2021, 2022, 2020, 1915, 1910,
- /* 570 */ 1919, 1924, 1929, 2024, 2025, 2031, 2039, 2046,
+ /* 80 */ 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837,
+ /* 130 */ 137, 181, 181, 181, 181, 181, 181, 181, 94, 430,
+ /* 140 */ 66, 65, 112, 366, 533, 533, 740, 1261, 533, 533,
+ /* 150 */ 79, 79, 533, 412, 412, 412, 77, 412, 123, 113,
+ /* 160 */ 113, 22, 22, 2101, 2101, 328, 328, 328, 239, 468,
+ /* 170 */ 468, 468, 468, 1015, 1015, 409, 366, 1129, 1186, 533,
+ /* 180 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 969,
+ /* 200 */ 621, 621, 533, 642, 788, 788, 1228, 1228, 822, 822,
+ /* 210 */ 67, 1274, 2101, 2101, 2101, 2101, 2101, 2101, 2101, 1307,
+ /* 220 */ 954, 954, 585, 472, 640, 387, 695, 538, 541, 700,
+ /* 230 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 240 */ 222, 533, 533, 533, 533, 533, 533, 533, 533, 533,
+ /* 250 */ 533, 533, 533, 1179, 1179, 1179, 533, 533, 533, 565,
+ /* 260 */ 533, 533, 533, 916, 1144, 533, 533, 1288, 533, 533,
+ /* 270 */ 533, 533, 533, 533, 533, 533, 639, 1330, 209, 1076,
+ /* 280 */ 1076, 1076, 1076, 580, 209, 209, 1313, 768, 917, 649,
+ /* 290 */ 1181, 1316, 405, 1316, 1238, 249, 1181, 1181, 249, 1181,
+ /* 300 */ 405, 1238, 1369, 464, 1259, 1012, 1012, 1012, 1368, 1368,
+ /* 310 */ 1368, 1368, 184, 184, 1326, 904, 1287, 1480, 1712, 1712,
+ /* 320 */ 1633, 1633, 1757, 1757, 1633, 1647, 1651, 1783, 1659, 1664,
+ /* 330 */ 1771, 1659, 1664, 1806, 1806, 1806, 1806, 1633, 1812, 1683,
+ /* 340 */ 1651, 1651, 1683, 1783, 1771, 1683, 1771, 1683, 1633, 1812,
+ /* 350 */ 1687, 1784, 1633, 1812, 1825, 1633, 1812, 1633, 1812, 1825,
+ /* 360 */ 1747, 1747, 1747, 1799, 1844, 1844, 1825, 1747, 1743, 1747,
+ /* 370 */ 1799, 1747, 1747, 1706, 1850, 1763, 1763, 1825, 1633, 1788,
+ /* 380 */ 1788, 1798, 1798, 1659, 1664, 1860, 1633, 1748, 1659, 1762,
+ /* 390 */ 1765, 1683, 1887, 1901, 1901, 1918, 1918, 1918, 2101, 2101,
+ /* 400 */ 2101, 2101, 2101, 2101, 2101, 2101, 2101, 2101, 2101, 2101,
+ /* 410 */ 2101, 2101, 2101, 207, 1095, 331, 620, 903, 806, 1074,
+ /* 420 */ 1483, 1432, 1481, 1322, 1370, 1394, 1515, 1291, 1546, 1547,
+ /* 430 */ 1557, 1595, 1598, 1599, 1434, 1453, 1618, 1462, 1567, 1489,
+ /* 440 */ 1644, 1654, 1616, 1660, 1548, 1549, 1682, 1685, 1597, 742,
+ /* 450 */ 1944, 1946, 1934, 1796, 1945, 1947, 1938, 1939, 1826, 1813,
+ /* 460 */ 1836, 1942, 1942, 1948, 1827, 1950, 1824, 1955, 1972, 1832,
+ /* 470 */ 1845, 1942, 1846, 1916, 1941, 1942, 1828, 1927, 1928, 1929,
+ /* 480 */ 1930, 1854, 1869, 1952, 1847, 1986, 1984, 1973, 1881, 1838,
+ /* 490 */ 1932, 1976, 1935, 1922, 1957, 1861, 1889, 1981, 1987, 1989,
+ /* 500 */ 1875, 1884, 1988, 1949, 1990, 1991, 1992, 1995, 1951, 1960,
+ /* 510 */ 1996, 1915, 1993, 2000, 1956, 1977, 2001, 1876, 2004, 2005,
+ /* 520 */ 2006, 2007, 2002, 2008, 2010, 1936, 1893, 2012, 2013, 1921,
+ /* 530 */ 2009, 2016, 1896, 2015, 2011, 2014, 2017, 2018, 1953, 1967,
+ /* 540 */ 1958, 2003, 1971, 1961, 2019, 2026, 2028, 2031, 2032, 2033,
+ /* 550 */ 2022, 1917, 1919, 2037, 2015, 2039, 2040, 2041, 2042, 2043,
+ /* 560 */ 2044, 2047, 2055, 2048, 2049, 2050, 2051, 2053, 2054, 2052,
+ /* 570 */ 1937, 1940, 1943, 1954, 1959, 2057, 2056, 2063, 2079, 2082,
};
#define YY_REDUCE_COUNT (412)
-#define YY_REDUCE_MIN (-267)
-#define YY_REDUCE_MAX (1716)
+#define YY_REDUCE_MIN (-271)
+#define YY_REDUCE_MAX (1744)
static const short yy_reduce_ofst[] = {
/* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187,
- /* 10 */ -180, 83, 133, -207, -198, -267, -175, -6, 166, 313,
- /* 20 */ 487, 396, 489, 598, 615, 685, 687, 79, 781, 857,
- /* 30 */ 490, 616, 240, 334, -188, 796, 841, 843, 1003, 1005,
- /* 40 */ 1007, -260, -260, -260, -260, -260, -260, -260, -260, -260,
- /* 50 */ -260, -260, -260, -260, -260, -260, -260, -260, -260, -260,
- /* 60 */ -260, -260, -260, -260, -260, -260, -260, -260, -260, -260,
- /* 70 */ -260, -260, -260, -260, -260, -260, -260, -260, 158, 203,
- /* 80 */ 391, 576, 724, 726, 886, 1021, 1035, 1063, 1081, 1083,
- /* 90 */ 1097, 1099, 1117, 1152, 1155, 1158, 1163, 1165, 1167, 1169,
- /* 100 */ 1172, 1180, 1183, 1198, 1200, 1205, 1215, 1225, 1227, 1236,
- /* 110 */ 1252, 1264, 1299, 1303, 1306, 1309, 1312, 1315, 1325, 1328,
- /* 120 */ 1337, 1340, 1343, 1371, 1373, 1384, 1386, 1411, -260, -260,
- /* 130 */ -260, -260, -260, -260, -260, -260, -260, -53, 138, 302,
- /* 140 */ -158, 357, 223, -222, 411, 458, -92, 556, 669, 581,
- /* 150 */ 632, 581, -260, 632, 758, 778, 920, -260, -260, -260,
- /* 160 */ -260, 161, 161, 161, 307, 234, 392, 526, 790, 195,
- /* 170 */ 359, -174, -173, 362, 362, -189, 16, 560, 567, 261,
- /* 180 */ 689, 802, 853, -122, -166, 408, 335, 617, 690, 837,
- /* 190 */ 1001, 746, 1061, 515, 1082, 994, 1034, -135, 1000, 1048,
- /* 200 */ 1137, 877, 897, 186, 627, 1031, 1133, 1148, 1159, 1194,
- /* 210 */ 1199, 1195, -194, -142, 18, -152, 68, 201, 253, 269,
- /* 220 */ 294, 354, 521, 528, 676, 680, 736, 743, 850, 907,
- /* 230 */ 1041, 1047, 1060, 727, 1139, 1147, 1201, 1237, 1278, 1359,
- /* 240 */ 1393, 1400, 1413, 1429, 1433, 1437, 1126, 1410, 1430, 1444,
- /* 250 */ 1480, 1483, 1405, 1486, 1490, 1492, 1420, 1372, 1496, 1498,
- /* 260 */ 1441, 1499, 253, 1500, 1503, 1504, 1506, 1507, 1508, 1398,
- /* 270 */ 1415, 1453, 1448, 1449, 1450, 1452, 1405, 1453, 1453, 1465,
- /* 280 */ 1495, 1519, 1414, 1443, 1445, 1468, 1456, 1455, 1457, 1424,
- /* 290 */ 1473, 1454, 1459, 1474, 1460, 1479, 1434, 1512, 1494, 1509,
- /* 300 */ 1517, 1518, 1525, 1469, 1489, 1501, 1467, 1510, 1497, 1543,
- /* 310 */ 1451, 1462, 1557, 1558, 1471, 1472, 1561, 1487, 1505, 1524,
- /* 320 */ 1522, 1526, 1548, 1534, 1536, 1563, 1564, 1565, 1567, 1603,
- /* 330 */ 1596, 1560, 1535, 1537, 1562, 1541, 1578, 1574, 1580, 1575,
- /* 340 */ 1622, 1624, 1538, 1540, 1626, 1631, 1610, 1632, 1634, 1637,
- /* 350 */ 1636, 1619, 1620, 1621, 1625, 1627, 1629, 1633, 1630, 1638,
- /* 360 */ 1639, 1640, 1641, 1642, 1643, 1550, 1544, 1573, 1581, 1644,
- /* 370 */ 1591, 1606, 1608, 1607, 1609, 1611, 1615, 1646, 1672, 1569,
- /* 380 */ 1571, 1616, 1647, 1650, 1652, 1601, 1679, 1657, 1653, 1651,
- /* 390 */ 1655, 1658, 1687, 1698, 1702, 1713, 1715, 1716, 1654, 1659,
- /* 400 */ 1660, 1703, 1696, 1700, 1701, 1704, 1708, 1689, 1694, 1705,
- /* 410 */ 1707, 1709, 1711,
+ /* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489,
+ /* 20 */ 576, -175, 598, 686, 615, 725, 860, 778, 781, 857,
+ /* 30 */ 616, 887, 87, 240, -192, 408, 626, 796, 843, 854,
+ /* 40 */ 1003, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, 80, 83,
+ /* 80 */ 313, 886, 888, 996, 1034, 1059, 1081, 1100, 1117, 1152,
+ /* 90 */ 1155, 1163, 1165, 1167, 1169, 1172, 1180, 1182, 1184, 1198,
+ /* 100 */ 1200, 1213, 1215, 1225, 1227, 1252, 1254, 1264, 1299, 1303,
+ /* 110 */ 1308, 1312, 1325, 1328, 1337, 1340, 1343, 1371, 1373, 1384,
+ /* 120 */ 1386, 1411, 1420, 1424, 1426, 1458, 1470, 1473, 1475, 1479,
+ /* 130 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271,
+ /* 140 */ -271, 138, 459, 396, -158, 470, 302, -212, 521, 201,
+ /* 150 */ -195, -92, 559, 630, 632, 630, -271, 632, 901, 63,
+ /* 160 */ 407, -271, -271, -271, -271, 161, 161, 161, 251, 335,
+ /* 170 */ 847, 960, 980, 537, 588, 618, 628, 688, 688, -166,
+ /* 180 */ -161, 674, 790, 794, 799, 851, 852, -122, 680, -120,
+ /* 190 */ 995, 1038, 415, 1051, 893, 798, 962, 400, 1086, 779,
+ /* 200 */ 923, 924, 263, 1041, 979, 990, 1083, 1097, 1031, 1194,
+ /* 210 */ 362, 994, 1139, 1005, 1037, 1202, 1205, 1195, 1210, -194,
+ /* 220 */ 56, 185, -135, 232, 522, 560, 601, 617, 669, 683,
+ /* 230 */ 711, 856, 908, 941, 1048, 1101, 1147, 1257, 1262, 1265,
+ /* 240 */ 392, 1292, 1333, 1339, 1342, 1346, 1350, 1359, 1374, 1418,
+ /* 250 */ 1421, 1436, 1437, 593, 755, 770, 997, 1445, 1459, 1209,
+ /* 260 */ 1500, 1504, 1516, 1132, 1243, 1518, 1519, 1440, 1520, 560,
+ /* 270 */ 1522, 1523, 1524, 1526, 1527, 1529, 1382, 1438, 1431, 1468,
+ /* 280 */ 1469, 1472, 1476, 1209, 1431, 1431, 1485, 1525, 1539, 1435,
+ /* 290 */ 1463, 1471, 1492, 1487, 1443, 1494, 1474, 1484, 1498, 1486,
+ /* 300 */ 1502, 1455, 1530, 1531, 1533, 1540, 1542, 1544, 1505, 1506,
+ /* 310 */ 1507, 1508, 1521, 1528, 1493, 1537, 1532, 1575, 1488, 1496,
+ /* 320 */ 1584, 1594, 1509, 1510, 1600, 1538, 1534, 1541, 1558, 1563,
+ /* 330 */ 1586, 1572, 1574, 1588, 1589, 1590, 1591, 1629, 1632, 1587,
+ /* 340 */ 1562, 1565, 1592, 1569, 1601, 1596, 1606, 1603, 1641, 1645,
+ /* 350 */ 1553, 1561, 1652, 1655, 1630, 1653, 1656, 1658, 1661, 1640,
+ /* 360 */ 1646, 1649, 1650, 1638, 1657, 1663, 1665, 1667, 1668, 1671,
+ /* 370 */ 1643, 1672, 1674, 1560, 1570, 1615, 1617, 1679, 1678, 1585,
+ /* 380 */ 1593, 1636, 1639, 1666, 1669, 1626, 1705, 1628, 1670, 1673,
+ /* 390 */ 1675, 1677, 1711, 1721, 1725, 1733, 1734, 1735, 1625, 1637,
+ /* 400 */ 1642, 1722, 1723, 1726, 1728, 1732, 1731, 1713, 1724, 1736,
+ /* 410 */ 1738, 1744, 1740,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1641, 1641, 1641, 1470, 1237, 1348, 1237, 1237, 1237, 1470,
- /* 10 */ 1470, 1470, 1237, 1378, 1378, 1523, 1270, 1237, 1237, 1237,
- /* 20 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1469, 1237, 1237,
- /* 30 */ 1237, 1237, 1558, 1558, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 40 */ 1237, 1237, 1387, 1237, 1394, 1237, 1237, 1237, 1237, 1237,
- /* 50 */ 1471, 1472, 1237, 1237, 1237, 1522, 1524, 1487, 1401, 1400,
- /* 60 */ 1399, 1398, 1505, 1365, 1392, 1385, 1389, 1465, 1466, 1464,
- /* 70 */ 1468, 1472, 1471, 1237, 1388, 1435, 1449, 1434, 1237, 1237,
- /* 80 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 90 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 100 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 110 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 120 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1443, 1448,
- /* 130 */ 1455, 1447, 1444, 1437, 1436, 1438, 1439, 1237, 1237, 1261,
- /* 140 */ 1237, 1237, 1258, 1312, 1237, 1237, 1237, 1237, 1237, 1542,
- /* 150 */ 1541, 1237, 1440, 1237, 1270, 1429, 1428, 1452, 1441, 1451,
- /* 160 */ 1450, 1530, 1594, 1593, 1488, 1237, 1237, 1237, 1237, 1237,
- /* 170 */ 1237, 1558, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 180 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 190 */ 1237, 1237, 1237, 1237, 1237, 1558, 1558, 1237, 1270, 1558,
- /* 200 */ 1558, 1266, 1266, 1372, 1237, 1537, 1339, 1339, 1339, 1339,
- /* 210 */ 1348, 1339, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 220 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1527, 1525, 1237,
- /* 230 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 240 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 250 */ 1237, 1237, 1237, 1237, 1237, 1237, 1344, 1237, 1237, 1237,
- /* 260 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1587, 1237,
- /* 270 */ 1500, 1326, 1344, 1344, 1344, 1344, 1346, 1327, 1325, 1338,
- /* 280 */ 1271, 1244, 1633, 1404, 1393, 1345, 1367, 1393, 1367, 1630,
- /* 290 */ 1391, 1404, 1404, 1391, 1404, 1345, 1630, 1287, 1610, 1282,
- /* 300 */ 1378, 1378, 1378, 1367, 1372, 1372, 1467, 1345, 1338, 1237,
- /* 310 */ 1633, 1633, 1353, 1353, 1632, 1632, 1353, 1488, 1617, 1413,
- /* 320 */ 1386, 1372, 1315, 1386, 1372, 1321, 1321, 1321, 1321, 1353,
- /* 330 */ 1255, 1391, 1617, 1617, 1391, 1413, 1315, 1391, 1315, 1391,
- /* 340 */ 1353, 1255, 1504, 1627, 1353, 1255, 1478, 1353, 1255, 1353,
- /* 350 */ 1255, 1478, 1313, 1313, 1313, 1302, 1237, 1237, 1478, 1313,
- /* 360 */ 1287, 1313, 1302, 1313, 1313, 1576, 1237, 1482, 1482, 1478,
- /* 370 */ 1371, 1366, 1371, 1366, 1371, 1366, 1371, 1366, 1353, 1568,
- /* 380 */ 1568, 1381, 1381, 1386, 1372, 1473, 1353, 1237, 1386, 1384,
- /* 390 */ 1382, 1391, 1305, 1590, 1590, 1586, 1586, 1586, 1638, 1638,
- /* 400 */ 1537, 1603, 1270, 1270, 1270, 1270, 1603, 1289, 1289, 1271,
- /* 410 */ 1271, 1270, 1603, 1237, 1237, 1237, 1237, 1237, 1237, 1598,
- /* 420 */ 1237, 1532, 1489, 1357, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 430 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1543, 1237,
- /* 440 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1418,
- /* 450 */ 1237, 1240, 1534, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 460 */ 1237, 1395, 1396, 1358, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 470 */ 1237, 1410, 1237, 1237, 1237, 1405, 1237, 1237, 1237, 1237,
- /* 480 */ 1237, 1237, 1237, 1237, 1629, 1237, 1237, 1237, 1237, 1237,
- /* 490 */ 1237, 1503, 1502, 1237, 1237, 1355, 1237, 1237, 1237, 1237,
- /* 500 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1285,
- /* 510 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 520 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 530 */ 1237, 1237, 1237, 1383, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 540 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1573, 1373,
- /* 550 */ 1237, 1237, 1620, 1237, 1237, 1237, 1237, 1237, 1237, 1237,
- /* 560 */ 1237, 1237, 1237, 1237, 1237, 1237, 1237, 1614, 1329, 1420,
- /* 570 */ 1237, 1419, 1423, 1259, 1237, 1249, 1237, 1237,
+ /* 0 */ 1651, 1651, 1651, 1479, 1244, 1355, 1244, 1244, 1244, 1479,
+ /* 10 */ 1479, 1479, 1244, 1385, 1385, 1532, 1277, 1244, 1244, 1244,
+ /* 20 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1478, 1244, 1244,
+ /* 30 */ 1244, 1244, 1567, 1567, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 40 */ 1244, 1244, 1394, 1244, 1401, 1244, 1244, 1244, 1244, 1244,
+ /* 50 */ 1480, 1481, 1244, 1244, 1244, 1531, 1533, 1496, 1408, 1407,
+ /* 60 */ 1406, 1405, 1514, 1373, 1399, 1392, 1396, 1474, 1475, 1473,
+ /* 70 */ 1477, 1481, 1480, 1244, 1395, 1442, 1458, 1441, 1244, 1244,
+ /* 80 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 90 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 100 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 110 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 120 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 130 */ 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443, 1445,
+ /* 140 */ 1446, 1244, 1244, 1268, 1244, 1244, 1265, 1319, 1244, 1244,
+ /* 150 */ 1244, 1244, 1244, 1551, 1550, 1244, 1447, 1244, 1277, 1436,
+ /* 160 */ 1435, 1461, 1448, 1460, 1459, 1539, 1603, 1602, 1497, 1244,
+ /* 170 */ 1244, 1244, 1244, 1244, 1244, 1567, 1244, 1244, 1244, 1244,
+ /* 180 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 190 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1375,
+ /* 200 */ 1567, 1567, 1244, 1277, 1567, 1567, 1376, 1376, 1273, 1273,
+ /* 210 */ 1379, 1244, 1546, 1346, 1346, 1346, 1346, 1355, 1346, 1244,
+ /* 220 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 230 */ 1244, 1244, 1244, 1244, 1536, 1534, 1244, 1244, 1244, 1244,
+ /* 240 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 250 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 260 */ 1244, 1244, 1244, 1351, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 270 */ 1244, 1244, 1244, 1244, 1244, 1596, 1244, 1509, 1333, 1351,
+ /* 280 */ 1351, 1351, 1351, 1353, 1334, 1332, 1345, 1278, 1251, 1643,
+ /* 290 */ 1411, 1400, 1352, 1400, 1640, 1398, 1411, 1411, 1398, 1411,
+ /* 300 */ 1352, 1640, 1294, 1619, 1289, 1385, 1385, 1385, 1375, 1375,
+ /* 310 */ 1375, 1375, 1379, 1379, 1476, 1352, 1345, 1244, 1643, 1643,
+ /* 320 */ 1361, 1361, 1642, 1642, 1361, 1497, 1627, 1420, 1393, 1379,
+ /* 330 */ 1322, 1393, 1379, 1328, 1328, 1328, 1328, 1361, 1262, 1398,
+ /* 340 */ 1627, 1627, 1398, 1420, 1322, 1398, 1322, 1398, 1361, 1262,
+ /* 350 */ 1513, 1637, 1361, 1262, 1487, 1361, 1262, 1361, 1262, 1487,
+ /* 360 */ 1320, 1320, 1320, 1309, 1244, 1244, 1487, 1320, 1294, 1320,
+ /* 370 */ 1309, 1320, 1320, 1585, 1244, 1491, 1491, 1487, 1361, 1577,
+ /* 380 */ 1577, 1388, 1388, 1393, 1379, 1482, 1361, 1244, 1393, 1391,
+ /* 390 */ 1389, 1398, 1312, 1599, 1599, 1595, 1595, 1595, 1648, 1648,
+ /* 400 */ 1546, 1612, 1277, 1277, 1277, 1277, 1612, 1296, 1296, 1278,
+ /* 410 */ 1278, 1277, 1612, 1244, 1244, 1244, 1244, 1244, 1244, 1607,
+ /* 420 */ 1244, 1541, 1498, 1365, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 430 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1552, 1244,
+ /* 440 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1425,
+ /* 450 */ 1244, 1247, 1543, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 460 */ 1244, 1402, 1403, 1366, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 470 */ 1244, 1417, 1244, 1244, 1244, 1412, 1244, 1244, 1244, 1244,
+ /* 480 */ 1244, 1244, 1244, 1244, 1639, 1244, 1244, 1244, 1244, 1244,
+ /* 490 */ 1244, 1512, 1511, 1244, 1244, 1363, 1244, 1244, 1244, 1244,
+ /* 500 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1292,
+ /* 510 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 520 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 530 */ 1244, 1244, 1244, 1390, 1244, 1244, 1244, 1244, 1244, 1244,
+ /* 540 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1582, 1380,
+ /* 550 */ 1244, 1244, 1244, 1244, 1630, 1244, 1244, 1244, 1244, 1244,
+ /* 560 */ 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1244, 1623,
+ /* 570 */ 1336, 1427, 1244, 1426, 1430, 1266, 1244, 1256, 1244, 1244,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -163155,12 +167073,12 @@ static const char *const yyTokenName[] = {
/* 256 */ "seltablist",
/* 257 */ "stl_prefix",
/* 258 */ "joinop",
- /* 259 */ "indexed_opt",
- /* 260 */ "on_opt",
- /* 261 */ "using_opt",
- /* 262 */ "exprlist",
- /* 263 */ "xfullname",
- /* 264 */ "idlist",
+ /* 259 */ "on_using",
+ /* 260 */ "indexed_by",
+ /* 261 */ "exprlist",
+ /* 262 */ "xfullname",
+ /* 263 */ "idlist",
+ /* 264 */ "indexed_opt",
/* 265 */ "nulls",
/* 266 */ "with",
/* 267 */ "where_opt_ret",
@@ -163331,29 +167249,29 @@ static const char *const yyRuleName[] = {
/* 106 */ "from ::= FROM seltablist",
/* 107 */ "stl_prefix ::= seltablist joinop",
/* 108 */ "stl_prefix ::=",
- /* 109 */ "seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt",
- /* 110 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt",
- /* 111 */ "seltablist ::= stl_prefix LP select RP as on_opt using_opt",
- /* 112 */ "seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt",
- /* 113 */ "dbnm ::=",
- /* 114 */ "dbnm ::= DOT nm",
- /* 115 */ "fullname ::= nm",
- /* 116 */ "fullname ::= nm DOT nm",
- /* 117 */ "xfullname ::= nm",
- /* 118 */ "xfullname ::= nm DOT nm",
- /* 119 */ "xfullname ::= nm DOT nm AS nm",
- /* 120 */ "xfullname ::= nm AS nm",
- /* 121 */ "joinop ::= COMMA|JOIN",
- /* 122 */ "joinop ::= JOIN_KW JOIN",
- /* 123 */ "joinop ::= JOIN_KW nm JOIN",
- /* 124 */ "joinop ::= JOIN_KW nm nm JOIN",
- /* 125 */ "on_opt ::= ON expr",
- /* 126 */ "on_opt ::=",
- /* 127 */ "indexed_opt ::=",
- /* 128 */ "indexed_opt ::= INDEXED BY nm",
- /* 129 */ "indexed_opt ::= NOT INDEXED",
- /* 130 */ "using_opt ::= USING LP idlist RP",
- /* 131 */ "using_opt ::=",
+ /* 109 */ "seltablist ::= stl_prefix nm dbnm as on_using",
+ /* 110 */ "seltablist ::= stl_prefix nm dbnm as indexed_by on_using",
+ /* 111 */ "seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using",
+ /* 112 */ "seltablist ::= stl_prefix LP select RP as on_using",
+ /* 113 */ "seltablist ::= stl_prefix LP seltablist RP as on_using",
+ /* 114 */ "dbnm ::=",
+ /* 115 */ "dbnm ::= DOT nm",
+ /* 116 */ "fullname ::= nm",
+ /* 117 */ "fullname ::= nm DOT nm",
+ /* 118 */ "xfullname ::= nm",
+ /* 119 */ "xfullname ::= nm DOT nm",
+ /* 120 */ "xfullname ::= nm DOT nm AS nm",
+ /* 121 */ "xfullname ::= nm AS nm",
+ /* 122 */ "joinop ::= COMMA|JOIN",
+ /* 123 */ "joinop ::= JOIN_KW JOIN",
+ /* 124 */ "joinop ::= JOIN_KW nm JOIN",
+ /* 125 */ "joinop ::= JOIN_KW nm nm JOIN",
+ /* 126 */ "on_using ::= ON expr",
+ /* 127 */ "on_using ::= USING LP idlist RP",
+ /* 128 */ "on_using ::=",
+ /* 129 */ "indexed_opt ::=",
+ /* 130 */ "indexed_by ::= INDEXED BY nm",
+ /* 131 */ "indexed_by ::= NOT INDEXED",
/* 132 */ "orderby_opt ::=",
/* 133 */ "orderby_opt ::= ORDER BY sortlist",
/* 134 */ "sortlist ::= sortlist COMMA expr sortorder nulls",
@@ -163431,199 +167349,202 @@ static const char *const yyRuleName[] = {
/* 206 */ "expr ::= expr NOT NULL",
/* 207 */ "expr ::= expr IS expr",
/* 208 */ "expr ::= expr IS NOT expr",
- /* 209 */ "expr ::= NOT expr",
- /* 210 */ "expr ::= BITNOT expr",
- /* 211 */ "expr ::= PLUS|MINUS expr",
- /* 212 */ "expr ::= expr PTR expr",
- /* 213 */ "between_op ::= BETWEEN",
- /* 214 */ "between_op ::= NOT BETWEEN",
- /* 215 */ "expr ::= expr between_op expr AND expr",
- /* 216 */ "in_op ::= IN",
- /* 217 */ "in_op ::= NOT IN",
- /* 218 */ "expr ::= expr in_op LP exprlist RP",
- /* 219 */ "expr ::= LP select RP",
- /* 220 */ "expr ::= expr in_op LP select RP",
- /* 221 */ "expr ::= expr in_op nm dbnm paren_exprlist",
- /* 222 */ "expr ::= EXISTS LP select RP",
- /* 223 */ "expr ::= CASE case_operand case_exprlist case_else END",
- /* 224 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
- /* 225 */ "case_exprlist ::= WHEN expr THEN expr",
- /* 226 */ "case_else ::= ELSE expr",
- /* 227 */ "case_else ::=",
- /* 228 */ "case_operand ::= expr",
- /* 229 */ "case_operand ::=",
- /* 230 */ "exprlist ::=",
- /* 231 */ "nexprlist ::= nexprlist COMMA expr",
- /* 232 */ "nexprlist ::= expr",
- /* 233 */ "paren_exprlist ::=",
- /* 234 */ "paren_exprlist ::= LP exprlist RP",
- /* 235 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
- /* 236 */ "uniqueflag ::= UNIQUE",
- /* 237 */ "uniqueflag ::=",
- /* 238 */ "eidlist_opt ::=",
- /* 239 */ "eidlist_opt ::= LP eidlist RP",
- /* 240 */ "eidlist ::= eidlist COMMA nm collate sortorder",
- /* 241 */ "eidlist ::= nm collate sortorder",
- /* 242 */ "collate ::=",
- /* 243 */ "collate ::= COLLATE ID|STRING",
- /* 244 */ "cmd ::= DROP INDEX ifexists fullname",
- /* 245 */ "cmd ::= VACUUM vinto",
- /* 246 */ "cmd ::= VACUUM nm vinto",
- /* 247 */ "vinto ::= INTO expr",
- /* 248 */ "vinto ::=",
- /* 249 */ "cmd ::= PRAGMA nm dbnm",
- /* 250 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
- /* 251 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
- /* 252 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
- /* 253 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
- /* 254 */ "plus_num ::= PLUS INTEGER|FLOAT",
- /* 255 */ "minus_num ::= MINUS INTEGER|FLOAT",
- /* 256 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
- /* 257 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
- /* 258 */ "trigger_time ::= BEFORE|AFTER",
- /* 259 */ "trigger_time ::= INSTEAD OF",
- /* 260 */ "trigger_time ::=",
- /* 261 */ "trigger_event ::= DELETE|INSERT",
- /* 262 */ "trigger_event ::= UPDATE",
- /* 263 */ "trigger_event ::= UPDATE OF idlist",
- /* 264 */ "when_clause ::=",
- /* 265 */ "when_clause ::= WHEN expr",
- /* 266 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
- /* 267 */ "trigger_cmd_list ::= trigger_cmd SEMI",
- /* 268 */ "trnm ::= nm DOT nm",
- /* 269 */ "tridxby ::= INDEXED BY nm",
- /* 270 */ "tridxby ::= NOT INDEXED",
- /* 271 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
- /* 272 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
- /* 273 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
- /* 274 */ "trigger_cmd ::= scanpt select scanpt",
- /* 275 */ "expr ::= RAISE LP IGNORE RP",
- /* 276 */ "expr ::= RAISE LP raisetype COMMA nm RP",
- /* 277 */ "raisetype ::= ROLLBACK",
- /* 278 */ "raisetype ::= ABORT",
- /* 279 */ "raisetype ::= FAIL",
- /* 280 */ "cmd ::= DROP TRIGGER ifexists fullname",
- /* 281 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
- /* 282 */ "cmd ::= DETACH database_kw_opt expr",
- /* 283 */ "key_opt ::=",
- /* 284 */ "key_opt ::= KEY expr",
- /* 285 */ "cmd ::= REINDEX",
- /* 286 */ "cmd ::= REINDEX nm dbnm",
- /* 287 */ "cmd ::= ANALYZE",
- /* 288 */ "cmd ::= ANALYZE nm dbnm",
- /* 289 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
- /* 290 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
- /* 291 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm",
- /* 292 */ "add_column_fullname ::= fullname",
- /* 293 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
- /* 294 */ "cmd ::= create_vtab",
- /* 295 */ "cmd ::= create_vtab LP vtabarglist RP",
- /* 296 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
- /* 297 */ "vtabarg ::=",
- /* 298 */ "vtabargtoken ::= ANY",
- /* 299 */ "vtabargtoken ::= lp anylist RP",
- /* 300 */ "lp ::= LP",
- /* 301 */ "with ::= WITH wqlist",
- /* 302 */ "with ::= WITH RECURSIVE wqlist",
- /* 303 */ "wqas ::= AS",
- /* 304 */ "wqas ::= AS MATERIALIZED",
- /* 305 */ "wqas ::= AS NOT MATERIALIZED",
- /* 306 */ "wqitem ::= nm eidlist_opt wqas LP select RP",
- /* 307 */ "wqlist ::= wqitem",
- /* 308 */ "wqlist ::= wqlist COMMA wqitem",
- /* 309 */ "windowdefn_list ::= windowdefn",
- /* 310 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
- /* 311 */ "windowdefn ::= nm AS LP window RP",
- /* 312 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
- /* 313 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
- /* 314 */ "window ::= ORDER BY sortlist frame_opt",
- /* 315 */ "window ::= nm ORDER BY sortlist frame_opt",
- /* 316 */ "window ::= frame_opt",
- /* 317 */ "window ::= nm frame_opt",
- /* 318 */ "frame_opt ::=",
- /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt",
- /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt",
- /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS",
- /* 322 */ "frame_bound_s ::= frame_bound",
- /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING",
- /* 324 */ "frame_bound_e ::= frame_bound",
- /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING",
- /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING",
- /* 327 */ "frame_bound ::= CURRENT ROW",
- /* 328 */ "frame_exclude_opt ::=",
- /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude",
- /* 330 */ "frame_exclude ::= NO OTHERS",
- /* 331 */ "frame_exclude ::= CURRENT ROW",
- /* 332 */ "frame_exclude ::= GROUP|TIES",
- /* 333 */ "window_clause ::= WINDOW windowdefn_list",
- /* 334 */ "filter_over ::= filter_clause over_clause",
- /* 335 */ "filter_over ::= over_clause",
- /* 336 */ "filter_over ::= filter_clause",
- /* 337 */ "over_clause ::= OVER LP window RP",
- /* 338 */ "over_clause ::= OVER nm",
- /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP",
- /* 340 */ "input ::= cmdlist",
- /* 341 */ "cmdlist ::= cmdlist ecmd",
- /* 342 */ "cmdlist ::= ecmd",
- /* 343 */ "ecmd ::= SEMI",
- /* 344 */ "ecmd ::= cmdx SEMI",
- /* 345 */ "ecmd ::= explain cmdx SEMI",
- /* 346 */ "trans_opt ::=",
- /* 347 */ "trans_opt ::= TRANSACTION",
- /* 348 */ "trans_opt ::= TRANSACTION nm",
- /* 349 */ "savepoint_opt ::= SAVEPOINT",
- /* 350 */ "savepoint_opt ::=",
- /* 351 */ "cmd ::= create_table create_table_args",
- /* 352 */ "table_option_set ::= table_option",
- /* 353 */ "columnlist ::= columnlist COMMA columnname carglist",
- /* 354 */ "columnlist ::= columnname carglist",
- /* 355 */ "nm ::= ID|INDEXED",
- /* 356 */ "nm ::= STRING",
- /* 357 */ "nm ::= JOIN_KW",
- /* 358 */ "typetoken ::= typename",
- /* 359 */ "typename ::= ID|STRING",
- /* 360 */ "signed ::= plus_num",
- /* 361 */ "signed ::= minus_num",
- /* 362 */ "carglist ::= carglist ccons",
- /* 363 */ "carglist ::=",
- /* 364 */ "ccons ::= NULL onconf",
- /* 365 */ "ccons ::= GENERATED ALWAYS AS generated",
- /* 366 */ "ccons ::= AS generated",
- /* 367 */ "conslist_opt ::= COMMA conslist",
- /* 368 */ "conslist ::= conslist tconscomma tcons",
- /* 369 */ "conslist ::= tcons",
- /* 370 */ "tconscomma ::=",
- /* 371 */ "defer_subclause_opt ::= defer_subclause",
- /* 372 */ "resolvetype ::= raisetype",
- /* 373 */ "selectnowith ::= oneselect",
- /* 374 */ "oneselect ::= values",
- /* 375 */ "sclp ::= selcollist COMMA",
- /* 376 */ "as ::= ID|STRING",
- /* 377 */ "returning ::=",
- /* 378 */ "expr ::= term",
- /* 379 */ "likeop ::= LIKE_KW|MATCH",
- /* 380 */ "exprlist ::= nexprlist",
- /* 381 */ "nmnum ::= plus_num",
- /* 382 */ "nmnum ::= nm",
- /* 383 */ "nmnum ::= ON",
- /* 384 */ "nmnum ::= DELETE",
- /* 385 */ "nmnum ::= DEFAULT",
- /* 386 */ "plus_num ::= INTEGER|FLOAT",
- /* 387 */ "foreach_clause ::=",
- /* 388 */ "foreach_clause ::= FOR EACH ROW",
- /* 389 */ "trnm ::= nm",
- /* 390 */ "tridxby ::=",
- /* 391 */ "database_kw_opt ::= DATABASE",
- /* 392 */ "database_kw_opt ::=",
- /* 393 */ "kwcolumn_opt ::=",
- /* 394 */ "kwcolumn_opt ::= COLUMNKW",
- /* 395 */ "vtabarglist ::= vtabarg",
- /* 396 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
- /* 397 */ "vtabarg ::= vtabarg vtabargtoken",
- /* 398 */ "anylist ::=",
- /* 399 */ "anylist ::= anylist LP anylist RP",
- /* 400 */ "anylist ::= anylist ANY",
- /* 401 */ "with ::=",
+ /* 209 */ "expr ::= expr IS NOT DISTINCT FROM expr",
+ /* 210 */ "expr ::= expr IS DISTINCT FROM expr",
+ /* 211 */ "expr ::= NOT expr",
+ /* 212 */ "expr ::= BITNOT expr",
+ /* 213 */ "expr ::= PLUS|MINUS expr",
+ /* 214 */ "expr ::= expr PTR expr",
+ /* 215 */ "between_op ::= BETWEEN",
+ /* 216 */ "between_op ::= NOT BETWEEN",
+ /* 217 */ "expr ::= expr between_op expr AND expr",
+ /* 218 */ "in_op ::= IN",
+ /* 219 */ "in_op ::= NOT IN",
+ /* 220 */ "expr ::= expr in_op LP exprlist RP",
+ /* 221 */ "expr ::= LP select RP",
+ /* 222 */ "expr ::= expr in_op LP select RP",
+ /* 223 */ "expr ::= expr in_op nm dbnm paren_exprlist",
+ /* 224 */ "expr ::= EXISTS LP select RP",
+ /* 225 */ "expr ::= CASE case_operand case_exprlist case_else END",
+ /* 226 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr",
+ /* 227 */ "case_exprlist ::= WHEN expr THEN expr",
+ /* 228 */ "case_else ::= ELSE expr",
+ /* 229 */ "case_else ::=",
+ /* 230 */ "case_operand ::= expr",
+ /* 231 */ "case_operand ::=",
+ /* 232 */ "exprlist ::=",
+ /* 233 */ "nexprlist ::= nexprlist COMMA expr",
+ /* 234 */ "nexprlist ::= expr",
+ /* 235 */ "paren_exprlist ::=",
+ /* 236 */ "paren_exprlist ::= LP exprlist RP",
+ /* 237 */ "cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt",
+ /* 238 */ "uniqueflag ::= UNIQUE",
+ /* 239 */ "uniqueflag ::=",
+ /* 240 */ "eidlist_opt ::=",
+ /* 241 */ "eidlist_opt ::= LP eidlist RP",
+ /* 242 */ "eidlist ::= eidlist COMMA nm collate sortorder",
+ /* 243 */ "eidlist ::= nm collate sortorder",
+ /* 244 */ "collate ::=",
+ /* 245 */ "collate ::= COLLATE ID|STRING",
+ /* 246 */ "cmd ::= DROP INDEX ifexists fullname",
+ /* 247 */ "cmd ::= VACUUM vinto",
+ /* 248 */ "cmd ::= VACUUM nm vinto",
+ /* 249 */ "vinto ::= INTO expr",
+ /* 250 */ "vinto ::=",
+ /* 251 */ "cmd ::= PRAGMA nm dbnm",
+ /* 252 */ "cmd ::= PRAGMA nm dbnm EQ nmnum",
+ /* 253 */ "cmd ::= PRAGMA nm dbnm LP nmnum RP",
+ /* 254 */ "cmd ::= PRAGMA nm dbnm EQ minus_num",
+ /* 255 */ "cmd ::= PRAGMA nm dbnm LP minus_num RP",
+ /* 256 */ "plus_num ::= PLUS INTEGER|FLOAT",
+ /* 257 */ "minus_num ::= MINUS INTEGER|FLOAT",
+ /* 258 */ "cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END",
+ /* 259 */ "trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause",
+ /* 260 */ "trigger_time ::= BEFORE|AFTER",
+ /* 261 */ "trigger_time ::= INSTEAD OF",
+ /* 262 */ "trigger_time ::=",
+ /* 263 */ "trigger_event ::= DELETE|INSERT",
+ /* 264 */ "trigger_event ::= UPDATE",
+ /* 265 */ "trigger_event ::= UPDATE OF idlist",
+ /* 266 */ "when_clause ::=",
+ /* 267 */ "when_clause ::= WHEN expr",
+ /* 268 */ "trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI",
+ /* 269 */ "trigger_cmd_list ::= trigger_cmd SEMI",
+ /* 270 */ "trnm ::= nm DOT nm",
+ /* 271 */ "tridxby ::= INDEXED BY nm",
+ /* 272 */ "tridxby ::= NOT INDEXED",
+ /* 273 */ "trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt",
+ /* 274 */ "trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt",
+ /* 275 */ "trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt",
+ /* 276 */ "trigger_cmd ::= scanpt select scanpt",
+ /* 277 */ "expr ::= RAISE LP IGNORE RP",
+ /* 278 */ "expr ::= RAISE LP raisetype COMMA nm RP",
+ /* 279 */ "raisetype ::= ROLLBACK",
+ /* 280 */ "raisetype ::= ABORT",
+ /* 281 */ "raisetype ::= FAIL",
+ /* 282 */ "cmd ::= DROP TRIGGER ifexists fullname",
+ /* 283 */ "cmd ::= ATTACH database_kw_opt expr AS expr key_opt",
+ /* 284 */ "cmd ::= DETACH database_kw_opt expr",
+ /* 285 */ "key_opt ::=",
+ /* 286 */ "key_opt ::= KEY expr",
+ /* 287 */ "cmd ::= REINDEX",
+ /* 288 */ "cmd ::= REINDEX nm dbnm",
+ /* 289 */ "cmd ::= ANALYZE",
+ /* 290 */ "cmd ::= ANALYZE nm dbnm",
+ /* 291 */ "cmd ::= ALTER TABLE fullname RENAME TO nm",
+ /* 292 */ "cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist",
+ /* 293 */ "cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm",
+ /* 294 */ "add_column_fullname ::= fullname",
+ /* 295 */ "cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm",
+ /* 296 */ "cmd ::= create_vtab",
+ /* 297 */ "cmd ::= create_vtab LP vtabarglist RP",
+ /* 298 */ "create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm",
+ /* 299 */ "vtabarg ::=",
+ /* 300 */ "vtabargtoken ::= ANY",
+ /* 301 */ "vtabargtoken ::= lp anylist RP",
+ /* 302 */ "lp ::= LP",
+ /* 303 */ "with ::= WITH wqlist",
+ /* 304 */ "with ::= WITH RECURSIVE wqlist",
+ /* 305 */ "wqas ::= AS",
+ /* 306 */ "wqas ::= AS MATERIALIZED",
+ /* 307 */ "wqas ::= AS NOT MATERIALIZED",
+ /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP",
+ /* 309 */ "wqlist ::= wqitem",
+ /* 310 */ "wqlist ::= wqlist COMMA wqitem",
+ /* 311 */ "windowdefn_list ::= windowdefn",
+ /* 312 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn",
+ /* 313 */ "windowdefn ::= nm AS LP window RP",
+ /* 314 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 315 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt",
+ /* 316 */ "window ::= ORDER BY sortlist frame_opt",
+ /* 317 */ "window ::= nm ORDER BY sortlist frame_opt",
+ /* 318 */ "window ::= frame_opt",
+ /* 319 */ "window ::= nm frame_opt",
+ /* 320 */ "frame_opt ::=",
+ /* 321 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt",
+ /* 322 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt",
+ /* 323 */ "range_or_rows ::= RANGE|ROWS|GROUPS",
+ /* 324 */ "frame_bound_s ::= frame_bound",
+ /* 325 */ "frame_bound_s ::= UNBOUNDED PRECEDING",
+ /* 326 */ "frame_bound_e ::= frame_bound",
+ /* 327 */ "frame_bound_e ::= UNBOUNDED FOLLOWING",
+ /* 328 */ "frame_bound ::= expr PRECEDING|FOLLOWING",
+ /* 329 */ "frame_bound ::= CURRENT ROW",
+ /* 330 */ "frame_exclude_opt ::=",
+ /* 331 */ "frame_exclude_opt ::= EXCLUDE frame_exclude",
+ /* 332 */ "frame_exclude ::= NO OTHERS",
+ /* 333 */ "frame_exclude ::= CURRENT ROW",
+ /* 334 */ "frame_exclude ::= GROUP|TIES",
+ /* 335 */ "window_clause ::= WINDOW windowdefn_list",
+ /* 336 */ "filter_over ::= filter_clause over_clause",
+ /* 337 */ "filter_over ::= over_clause",
+ /* 338 */ "filter_over ::= filter_clause",
+ /* 339 */ "over_clause ::= OVER LP window RP",
+ /* 340 */ "over_clause ::= OVER nm",
+ /* 341 */ "filter_clause ::= FILTER LP WHERE expr RP",
+ /* 342 */ "input ::= cmdlist",
+ /* 343 */ "cmdlist ::= cmdlist ecmd",
+ /* 344 */ "cmdlist ::= ecmd",
+ /* 345 */ "ecmd ::= SEMI",
+ /* 346 */ "ecmd ::= cmdx SEMI",
+ /* 347 */ "ecmd ::= explain cmdx SEMI",
+ /* 348 */ "trans_opt ::=",
+ /* 349 */ "trans_opt ::= TRANSACTION",
+ /* 350 */ "trans_opt ::= TRANSACTION nm",
+ /* 351 */ "savepoint_opt ::= SAVEPOINT",
+ /* 352 */ "savepoint_opt ::=",
+ /* 353 */ "cmd ::= create_table create_table_args",
+ /* 354 */ "table_option_set ::= table_option",
+ /* 355 */ "columnlist ::= columnlist COMMA columnname carglist",
+ /* 356 */ "columnlist ::= columnname carglist",
+ /* 357 */ "nm ::= ID|INDEXED",
+ /* 358 */ "nm ::= STRING",
+ /* 359 */ "nm ::= JOIN_KW",
+ /* 360 */ "typetoken ::= typename",
+ /* 361 */ "typename ::= ID|STRING",
+ /* 362 */ "signed ::= plus_num",
+ /* 363 */ "signed ::= minus_num",
+ /* 364 */ "carglist ::= carglist ccons",
+ /* 365 */ "carglist ::=",
+ /* 366 */ "ccons ::= NULL onconf",
+ /* 367 */ "ccons ::= GENERATED ALWAYS AS generated",
+ /* 368 */ "ccons ::= AS generated",
+ /* 369 */ "conslist_opt ::= COMMA conslist",
+ /* 370 */ "conslist ::= conslist tconscomma tcons",
+ /* 371 */ "conslist ::= tcons",
+ /* 372 */ "tconscomma ::=",
+ /* 373 */ "defer_subclause_opt ::= defer_subclause",
+ /* 374 */ "resolvetype ::= raisetype",
+ /* 375 */ "selectnowith ::= oneselect",
+ /* 376 */ "oneselect ::= values",
+ /* 377 */ "sclp ::= selcollist COMMA",
+ /* 378 */ "as ::= ID|STRING",
+ /* 379 */ "indexed_opt ::= indexed_by",
+ /* 380 */ "returning ::=",
+ /* 381 */ "expr ::= term",
+ /* 382 */ "likeop ::= LIKE_KW|MATCH",
+ /* 383 */ "exprlist ::= nexprlist",
+ /* 384 */ "nmnum ::= plus_num",
+ /* 385 */ "nmnum ::= nm",
+ /* 386 */ "nmnum ::= ON",
+ /* 387 */ "nmnum ::= DELETE",
+ /* 388 */ "nmnum ::= DEFAULT",
+ /* 389 */ "plus_num ::= INTEGER|FLOAT",
+ /* 390 */ "foreach_clause ::=",
+ /* 391 */ "foreach_clause ::= FOR EACH ROW",
+ /* 392 */ "trnm ::= nm",
+ /* 393 */ "tridxby ::=",
+ /* 394 */ "database_kw_opt ::= DATABASE",
+ /* 395 */ "database_kw_opt ::=",
+ /* 396 */ "kwcolumn_opt ::=",
+ /* 397 */ "kwcolumn_opt ::= COLUMNKW",
+ /* 398 */ "vtabarglist ::= vtabarg",
+ /* 399 */ "vtabarglist ::= vtabarglist COMMA vtabarg",
+ /* 400 */ "vtabarg ::= vtabarg vtabargtoken",
+ /* 401 */ "anylist ::=",
+ /* 402 */ "anylist ::= anylist LP anylist RP",
+ /* 403 */ "anylist ::= anylist ANY",
+ /* 404 */ "with ::=",
};
#endif /* NDEBUG */
@@ -163761,7 +167682,6 @@ sqlite3SelectDelete(pParse->db, (yypminor->yy47));
case 217: /* expr */
case 246: /* where_opt */
case 248: /* having_opt */
- case 260: /* on_opt */
case 267: /* where_opt_ret */
case 278: /* case_operand */
case 280: /* case_else */
@@ -163781,7 +167701,7 @@ sqlite3ExprDelete(pParse->db, (yypminor->yy528));
case 249: /* orderby_opt */
case 253: /* nexprlist */
case 254: /* sclp */
- case 262: /* exprlist */
+ case 261: /* exprlist */
case 268: /* setlist */
case 277: /* paren_exprlist */
case 279: /* case_exprlist */
@@ -163794,7 +167714,7 @@ sqlite3ExprListDelete(pParse->db, (yypminor->yy322));
case 245: /* from */
case 256: /* seltablist */
case 257: /* stl_prefix */
- case 263: /* xfullname */
+ case 262: /* xfullname */
{
sqlite3SrcListDelete(pParse->db, (yypminor->yy131));
}
@@ -163810,8 +167730,7 @@ sqlite3WithDelete(pParse->db, (yypminor->yy521));
sqlite3WindowListDelete(pParse->db, (yypminor->yy41));
}
break;
- case 261: /* using_opt */
- case 264: /* idlist */
+ case 263: /* idlist */
case 270: /* idlist_opt */
{
sqlite3IdListDelete(pParse->db, (yypminor->yy254));
@@ -164241,29 +168160,29 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
245, /* (106) from ::= FROM seltablist */
257, /* (107) stl_prefix ::= seltablist joinop */
257, /* (108) stl_prefix ::= */
- 256, /* (109) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
- 256, /* (110) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
- 256, /* (111) seltablist ::= stl_prefix LP select RP as on_opt using_opt */
- 256, /* (112) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
- 200, /* (113) dbnm ::= */
- 200, /* (114) dbnm ::= DOT nm */
- 238, /* (115) fullname ::= nm */
- 238, /* (116) fullname ::= nm DOT nm */
- 263, /* (117) xfullname ::= nm */
- 263, /* (118) xfullname ::= nm DOT nm */
- 263, /* (119) xfullname ::= nm DOT nm AS nm */
- 263, /* (120) xfullname ::= nm AS nm */
- 258, /* (121) joinop ::= COMMA|JOIN */
- 258, /* (122) joinop ::= JOIN_KW JOIN */
- 258, /* (123) joinop ::= JOIN_KW nm JOIN */
- 258, /* (124) joinop ::= JOIN_KW nm nm JOIN */
- 260, /* (125) on_opt ::= ON expr */
- 260, /* (126) on_opt ::= */
- 259, /* (127) indexed_opt ::= */
- 259, /* (128) indexed_opt ::= INDEXED BY nm */
- 259, /* (129) indexed_opt ::= NOT INDEXED */
- 261, /* (130) using_opt ::= USING LP idlist RP */
- 261, /* (131) using_opt ::= */
+ 256, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */
+ 256, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */
+ 256, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */
+ 256, /* (112) seltablist ::= stl_prefix LP select RP as on_using */
+ 256, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */
+ 200, /* (114) dbnm ::= */
+ 200, /* (115) dbnm ::= DOT nm */
+ 238, /* (116) fullname ::= nm */
+ 238, /* (117) fullname ::= nm DOT nm */
+ 262, /* (118) xfullname ::= nm */
+ 262, /* (119) xfullname ::= nm DOT nm */
+ 262, /* (120) xfullname ::= nm DOT nm AS nm */
+ 262, /* (121) xfullname ::= nm AS nm */
+ 258, /* (122) joinop ::= COMMA|JOIN */
+ 258, /* (123) joinop ::= JOIN_KW JOIN */
+ 258, /* (124) joinop ::= JOIN_KW nm JOIN */
+ 258, /* (125) joinop ::= JOIN_KW nm nm JOIN */
+ 259, /* (126) on_using ::= ON expr */
+ 259, /* (127) on_using ::= USING LP idlist RP */
+ 259, /* (128) on_using ::= */
+ 264, /* (129) indexed_opt ::= */
+ 260, /* (130) indexed_by ::= INDEXED BY nm */
+ 260, /* (131) indexed_by ::= NOT INDEXED */
249, /* (132) orderby_opt ::= */
249, /* (133) orderby_opt ::= ORDER BY sortlist */
231, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */
@@ -164307,8 +168226,8 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
269, /* (172) insert_cmd ::= REPLACE */
270, /* (173) idlist_opt ::= */
270, /* (174) idlist_opt ::= LP idlist RP */
- 264, /* (175) idlist ::= idlist COMMA nm */
- 264, /* (176) idlist ::= nm */
+ 263, /* (175) idlist ::= idlist COMMA nm */
+ 263, /* (176) idlist ::= nm */
217, /* (177) expr ::= LP expr RP */
217, /* (178) expr ::= ID|INDEXED */
217, /* (179) expr ::= JOIN_KW */
@@ -164341,199 +168260,202 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
217, /* (206) expr ::= expr NOT NULL */
217, /* (207) expr ::= expr IS expr */
217, /* (208) expr ::= expr IS NOT expr */
- 217, /* (209) expr ::= NOT expr */
- 217, /* (210) expr ::= BITNOT expr */
- 217, /* (211) expr ::= PLUS|MINUS expr */
- 217, /* (212) expr ::= expr PTR expr */
- 275, /* (213) between_op ::= BETWEEN */
- 275, /* (214) between_op ::= NOT BETWEEN */
- 217, /* (215) expr ::= expr between_op expr AND expr */
- 276, /* (216) in_op ::= IN */
- 276, /* (217) in_op ::= NOT IN */
- 217, /* (218) expr ::= expr in_op LP exprlist RP */
- 217, /* (219) expr ::= LP select RP */
- 217, /* (220) expr ::= expr in_op LP select RP */
- 217, /* (221) expr ::= expr in_op nm dbnm paren_exprlist */
- 217, /* (222) expr ::= EXISTS LP select RP */
- 217, /* (223) expr ::= CASE case_operand case_exprlist case_else END */
- 279, /* (224) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- 279, /* (225) case_exprlist ::= WHEN expr THEN expr */
- 280, /* (226) case_else ::= ELSE expr */
- 280, /* (227) case_else ::= */
- 278, /* (228) case_operand ::= expr */
- 278, /* (229) case_operand ::= */
- 262, /* (230) exprlist ::= */
- 253, /* (231) nexprlist ::= nexprlist COMMA expr */
- 253, /* (232) nexprlist ::= expr */
- 277, /* (233) paren_exprlist ::= */
- 277, /* (234) paren_exprlist ::= LP exprlist RP */
- 190, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- 281, /* (236) uniqueflag ::= UNIQUE */
- 281, /* (237) uniqueflag ::= */
- 221, /* (238) eidlist_opt ::= */
- 221, /* (239) eidlist_opt ::= LP eidlist RP */
- 232, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */
- 232, /* (241) eidlist ::= nm collate sortorder */
- 282, /* (242) collate ::= */
- 282, /* (243) collate ::= COLLATE ID|STRING */
- 190, /* (244) cmd ::= DROP INDEX ifexists fullname */
- 190, /* (245) cmd ::= VACUUM vinto */
- 190, /* (246) cmd ::= VACUUM nm vinto */
- 283, /* (247) vinto ::= INTO expr */
- 283, /* (248) vinto ::= */
- 190, /* (249) cmd ::= PRAGMA nm dbnm */
- 190, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */
- 190, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- 190, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */
- 190, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- 211, /* (254) plus_num ::= PLUS INTEGER|FLOAT */
- 212, /* (255) minus_num ::= MINUS INTEGER|FLOAT */
- 190, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- 285, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- 287, /* (258) trigger_time ::= BEFORE|AFTER */
- 287, /* (259) trigger_time ::= INSTEAD OF */
- 287, /* (260) trigger_time ::= */
- 288, /* (261) trigger_event ::= DELETE|INSERT */
- 288, /* (262) trigger_event ::= UPDATE */
- 288, /* (263) trigger_event ::= UPDATE OF idlist */
- 290, /* (264) when_clause ::= */
- 290, /* (265) when_clause ::= WHEN expr */
- 286, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- 286, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */
- 292, /* (268) trnm ::= nm DOT nm */
- 293, /* (269) tridxby ::= INDEXED BY nm */
- 293, /* (270) tridxby ::= NOT INDEXED */
- 291, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
- 291, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- 291, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- 291, /* (274) trigger_cmd ::= scanpt select scanpt */
- 217, /* (275) expr ::= RAISE LP IGNORE RP */
- 217, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */
- 236, /* (277) raisetype ::= ROLLBACK */
- 236, /* (278) raisetype ::= ABORT */
- 236, /* (279) raisetype ::= FAIL */
- 190, /* (280) cmd ::= DROP TRIGGER ifexists fullname */
- 190, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- 190, /* (282) cmd ::= DETACH database_kw_opt expr */
- 295, /* (283) key_opt ::= */
- 295, /* (284) key_opt ::= KEY expr */
- 190, /* (285) cmd ::= REINDEX */
- 190, /* (286) cmd ::= REINDEX nm dbnm */
- 190, /* (287) cmd ::= ANALYZE */
- 190, /* (288) cmd ::= ANALYZE nm dbnm */
- 190, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */
- 190, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- 190, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
- 296, /* (292) add_column_fullname ::= fullname */
- 190, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- 190, /* (294) cmd ::= create_vtab */
- 190, /* (295) cmd ::= create_vtab LP vtabarglist RP */
- 298, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- 300, /* (297) vtabarg ::= */
- 301, /* (298) vtabargtoken ::= ANY */
- 301, /* (299) vtabargtoken ::= lp anylist RP */
- 302, /* (300) lp ::= LP */
- 266, /* (301) with ::= WITH wqlist */
- 266, /* (302) with ::= WITH RECURSIVE wqlist */
- 305, /* (303) wqas ::= AS */
- 305, /* (304) wqas ::= AS MATERIALIZED */
- 305, /* (305) wqas ::= AS NOT MATERIALIZED */
- 304, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */
- 241, /* (307) wqlist ::= wqitem */
- 241, /* (308) wqlist ::= wqlist COMMA wqitem */
- 306, /* (309) windowdefn_list ::= windowdefn */
- 306, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- 307, /* (311) windowdefn ::= nm AS LP window RP */
- 308, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
- 308, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
- 308, /* (314) window ::= ORDER BY sortlist frame_opt */
- 308, /* (315) window ::= nm ORDER BY sortlist frame_opt */
- 308, /* (316) window ::= frame_opt */
- 308, /* (317) window ::= nm frame_opt */
- 309, /* (318) frame_opt ::= */
- 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
- 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
- 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */
- 315, /* (322) frame_bound_s ::= frame_bound */
- 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */
- 316, /* (324) frame_bound_e ::= frame_bound */
- 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */
- 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */
- 314, /* (327) frame_bound ::= CURRENT ROW */
- 317, /* (328) frame_exclude_opt ::= */
- 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */
- 318, /* (330) frame_exclude ::= NO OTHERS */
- 318, /* (331) frame_exclude ::= CURRENT ROW */
- 318, /* (332) frame_exclude ::= GROUP|TIES */
- 251, /* (333) window_clause ::= WINDOW windowdefn_list */
- 273, /* (334) filter_over ::= filter_clause over_clause */
- 273, /* (335) filter_over ::= over_clause */
- 273, /* (336) filter_over ::= filter_clause */
- 312, /* (337) over_clause ::= OVER LP window RP */
- 312, /* (338) over_clause ::= OVER nm */
- 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */
- 185, /* (340) input ::= cmdlist */
- 186, /* (341) cmdlist ::= cmdlist ecmd */
- 186, /* (342) cmdlist ::= ecmd */
- 187, /* (343) ecmd ::= SEMI */
- 187, /* (344) ecmd ::= cmdx SEMI */
- 187, /* (345) ecmd ::= explain cmdx SEMI */
- 192, /* (346) trans_opt ::= */
- 192, /* (347) trans_opt ::= TRANSACTION */
- 192, /* (348) trans_opt ::= TRANSACTION nm */
- 194, /* (349) savepoint_opt ::= SAVEPOINT */
- 194, /* (350) savepoint_opt ::= */
- 190, /* (351) cmd ::= create_table create_table_args */
- 203, /* (352) table_option_set ::= table_option */
- 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */
- 201, /* (354) columnlist ::= columnname carglist */
- 193, /* (355) nm ::= ID|INDEXED */
- 193, /* (356) nm ::= STRING */
- 193, /* (357) nm ::= JOIN_KW */
- 208, /* (358) typetoken ::= typename */
- 209, /* (359) typename ::= ID|STRING */
- 210, /* (360) signed ::= plus_num */
- 210, /* (361) signed ::= minus_num */
- 207, /* (362) carglist ::= carglist ccons */
- 207, /* (363) carglist ::= */
- 215, /* (364) ccons ::= NULL onconf */
- 215, /* (365) ccons ::= GENERATED ALWAYS AS generated */
- 215, /* (366) ccons ::= AS generated */
- 202, /* (367) conslist_opt ::= COMMA conslist */
- 228, /* (368) conslist ::= conslist tconscomma tcons */
- 228, /* (369) conslist ::= tcons */
- 229, /* (370) tconscomma ::= */
- 233, /* (371) defer_subclause_opt ::= defer_subclause */
- 235, /* (372) resolvetype ::= raisetype */
- 239, /* (373) selectnowith ::= oneselect */
- 240, /* (374) oneselect ::= values */
- 254, /* (375) sclp ::= selcollist COMMA */
- 255, /* (376) as ::= ID|STRING */
- 272, /* (377) returning ::= */
- 217, /* (378) expr ::= term */
- 274, /* (379) likeop ::= LIKE_KW|MATCH */
- 262, /* (380) exprlist ::= nexprlist */
- 284, /* (381) nmnum ::= plus_num */
- 284, /* (382) nmnum ::= nm */
- 284, /* (383) nmnum ::= ON */
- 284, /* (384) nmnum ::= DELETE */
- 284, /* (385) nmnum ::= DEFAULT */
- 211, /* (386) plus_num ::= INTEGER|FLOAT */
- 289, /* (387) foreach_clause ::= */
- 289, /* (388) foreach_clause ::= FOR EACH ROW */
- 292, /* (389) trnm ::= nm */
- 293, /* (390) tridxby ::= */
- 294, /* (391) database_kw_opt ::= DATABASE */
- 294, /* (392) database_kw_opt ::= */
- 297, /* (393) kwcolumn_opt ::= */
- 297, /* (394) kwcolumn_opt ::= COLUMNKW */
- 299, /* (395) vtabarglist ::= vtabarg */
- 299, /* (396) vtabarglist ::= vtabarglist COMMA vtabarg */
- 300, /* (397) vtabarg ::= vtabarg vtabargtoken */
- 303, /* (398) anylist ::= */
- 303, /* (399) anylist ::= anylist LP anylist RP */
- 303, /* (400) anylist ::= anylist ANY */
- 266, /* (401) with ::= */
+ 217, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */
+ 217, /* (210) expr ::= expr IS DISTINCT FROM expr */
+ 217, /* (211) expr ::= NOT expr */
+ 217, /* (212) expr ::= BITNOT expr */
+ 217, /* (213) expr ::= PLUS|MINUS expr */
+ 217, /* (214) expr ::= expr PTR expr */
+ 275, /* (215) between_op ::= BETWEEN */
+ 275, /* (216) between_op ::= NOT BETWEEN */
+ 217, /* (217) expr ::= expr between_op expr AND expr */
+ 276, /* (218) in_op ::= IN */
+ 276, /* (219) in_op ::= NOT IN */
+ 217, /* (220) expr ::= expr in_op LP exprlist RP */
+ 217, /* (221) expr ::= LP select RP */
+ 217, /* (222) expr ::= expr in_op LP select RP */
+ 217, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */
+ 217, /* (224) expr ::= EXISTS LP select RP */
+ 217, /* (225) expr ::= CASE case_operand case_exprlist case_else END */
+ 279, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ 279, /* (227) case_exprlist ::= WHEN expr THEN expr */
+ 280, /* (228) case_else ::= ELSE expr */
+ 280, /* (229) case_else ::= */
+ 278, /* (230) case_operand ::= expr */
+ 278, /* (231) case_operand ::= */
+ 261, /* (232) exprlist ::= */
+ 253, /* (233) nexprlist ::= nexprlist COMMA expr */
+ 253, /* (234) nexprlist ::= expr */
+ 277, /* (235) paren_exprlist ::= */
+ 277, /* (236) paren_exprlist ::= LP exprlist RP */
+ 190, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ 281, /* (238) uniqueflag ::= UNIQUE */
+ 281, /* (239) uniqueflag ::= */
+ 221, /* (240) eidlist_opt ::= */
+ 221, /* (241) eidlist_opt ::= LP eidlist RP */
+ 232, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */
+ 232, /* (243) eidlist ::= nm collate sortorder */
+ 282, /* (244) collate ::= */
+ 282, /* (245) collate ::= COLLATE ID|STRING */
+ 190, /* (246) cmd ::= DROP INDEX ifexists fullname */
+ 190, /* (247) cmd ::= VACUUM vinto */
+ 190, /* (248) cmd ::= VACUUM nm vinto */
+ 283, /* (249) vinto ::= INTO expr */
+ 283, /* (250) vinto ::= */
+ 190, /* (251) cmd ::= PRAGMA nm dbnm */
+ 190, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ 190, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ 190, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ 190, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ 211, /* (256) plus_num ::= PLUS INTEGER|FLOAT */
+ 212, /* (257) minus_num ::= MINUS INTEGER|FLOAT */
+ 190, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ 285, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ 287, /* (260) trigger_time ::= BEFORE|AFTER */
+ 287, /* (261) trigger_time ::= INSTEAD OF */
+ 287, /* (262) trigger_time ::= */
+ 288, /* (263) trigger_event ::= DELETE|INSERT */
+ 288, /* (264) trigger_event ::= UPDATE */
+ 288, /* (265) trigger_event ::= UPDATE OF idlist */
+ 290, /* (266) when_clause ::= */
+ 290, /* (267) when_clause ::= WHEN expr */
+ 286, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ 286, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */
+ 292, /* (270) trnm ::= nm DOT nm */
+ 293, /* (271) tridxby ::= INDEXED BY nm */
+ 293, /* (272) tridxby ::= NOT INDEXED */
+ 291, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ 291, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ 291, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ 291, /* (276) trigger_cmd ::= scanpt select scanpt */
+ 217, /* (277) expr ::= RAISE LP IGNORE RP */
+ 217, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */
+ 236, /* (279) raisetype ::= ROLLBACK */
+ 236, /* (280) raisetype ::= ABORT */
+ 236, /* (281) raisetype ::= FAIL */
+ 190, /* (282) cmd ::= DROP TRIGGER ifexists fullname */
+ 190, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ 190, /* (284) cmd ::= DETACH database_kw_opt expr */
+ 295, /* (285) key_opt ::= */
+ 295, /* (286) key_opt ::= KEY expr */
+ 190, /* (287) cmd ::= REINDEX */
+ 190, /* (288) cmd ::= REINDEX nm dbnm */
+ 190, /* (289) cmd ::= ANALYZE */
+ 190, /* (290) cmd ::= ANALYZE nm dbnm */
+ 190, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ 190, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ 190, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ 296, /* (294) add_column_fullname ::= fullname */
+ 190, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ 190, /* (296) cmd ::= create_vtab */
+ 190, /* (297) cmd ::= create_vtab LP vtabarglist RP */
+ 298, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 300, /* (299) vtabarg ::= */
+ 301, /* (300) vtabargtoken ::= ANY */
+ 301, /* (301) vtabargtoken ::= lp anylist RP */
+ 302, /* (302) lp ::= LP */
+ 266, /* (303) with ::= WITH wqlist */
+ 266, /* (304) with ::= WITH RECURSIVE wqlist */
+ 305, /* (305) wqas ::= AS */
+ 305, /* (306) wqas ::= AS MATERIALIZED */
+ 305, /* (307) wqas ::= AS NOT MATERIALIZED */
+ 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */
+ 241, /* (309) wqlist ::= wqitem */
+ 241, /* (310) wqlist ::= wqlist COMMA wqitem */
+ 306, /* (311) windowdefn_list ::= windowdefn */
+ 306, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ 307, /* (313) windowdefn ::= nm AS LP window RP */
+ 308, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ 308, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ 308, /* (316) window ::= ORDER BY sortlist frame_opt */
+ 308, /* (317) window ::= nm ORDER BY sortlist frame_opt */
+ 308, /* (318) window ::= frame_opt */
+ 308, /* (319) window ::= nm frame_opt */
+ 309, /* (320) frame_opt ::= */
+ 309, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
+ 309, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
+ 313, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */
+ 315, /* (324) frame_bound_s ::= frame_bound */
+ 315, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */
+ 316, /* (326) frame_bound_e ::= frame_bound */
+ 316, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */
+ 314, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */
+ 314, /* (329) frame_bound ::= CURRENT ROW */
+ 317, /* (330) frame_exclude_opt ::= */
+ 317, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */
+ 318, /* (332) frame_exclude ::= NO OTHERS */
+ 318, /* (333) frame_exclude ::= CURRENT ROW */
+ 318, /* (334) frame_exclude ::= GROUP|TIES */
+ 251, /* (335) window_clause ::= WINDOW windowdefn_list */
+ 273, /* (336) filter_over ::= filter_clause over_clause */
+ 273, /* (337) filter_over ::= over_clause */
+ 273, /* (338) filter_over ::= filter_clause */
+ 312, /* (339) over_clause ::= OVER LP window RP */
+ 312, /* (340) over_clause ::= OVER nm */
+ 311, /* (341) filter_clause ::= FILTER LP WHERE expr RP */
+ 185, /* (342) input ::= cmdlist */
+ 186, /* (343) cmdlist ::= cmdlist ecmd */
+ 186, /* (344) cmdlist ::= ecmd */
+ 187, /* (345) ecmd ::= SEMI */
+ 187, /* (346) ecmd ::= cmdx SEMI */
+ 187, /* (347) ecmd ::= explain cmdx SEMI */
+ 192, /* (348) trans_opt ::= */
+ 192, /* (349) trans_opt ::= TRANSACTION */
+ 192, /* (350) trans_opt ::= TRANSACTION nm */
+ 194, /* (351) savepoint_opt ::= SAVEPOINT */
+ 194, /* (352) savepoint_opt ::= */
+ 190, /* (353) cmd ::= create_table create_table_args */
+ 203, /* (354) table_option_set ::= table_option */
+ 201, /* (355) columnlist ::= columnlist COMMA columnname carglist */
+ 201, /* (356) columnlist ::= columnname carglist */
+ 193, /* (357) nm ::= ID|INDEXED */
+ 193, /* (358) nm ::= STRING */
+ 193, /* (359) nm ::= JOIN_KW */
+ 208, /* (360) typetoken ::= typename */
+ 209, /* (361) typename ::= ID|STRING */
+ 210, /* (362) signed ::= plus_num */
+ 210, /* (363) signed ::= minus_num */
+ 207, /* (364) carglist ::= carglist ccons */
+ 207, /* (365) carglist ::= */
+ 215, /* (366) ccons ::= NULL onconf */
+ 215, /* (367) ccons ::= GENERATED ALWAYS AS generated */
+ 215, /* (368) ccons ::= AS generated */
+ 202, /* (369) conslist_opt ::= COMMA conslist */
+ 228, /* (370) conslist ::= conslist tconscomma tcons */
+ 228, /* (371) conslist ::= tcons */
+ 229, /* (372) tconscomma ::= */
+ 233, /* (373) defer_subclause_opt ::= defer_subclause */
+ 235, /* (374) resolvetype ::= raisetype */
+ 239, /* (375) selectnowith ::= oneselect */
+ 240, /* (376) oneselect ::= values */
+ 254, /* (377) sclp ::= selcollist COMMA */
+ 255, /* (378) as ::= ID|STRING */
+ 264, /* (379) indexed_opt ::= indexed_by */
+ 272, /* (380) returning ::= */
+ 217, /* (381) expr ::= term */
+ 274, /* (382) likeop ::= LIKE_KW|MATCH */
+ 261, /* (383) exprlist ::= nexprlist */
+ 284, /* (384) nmnum ::= plus_num */
+ 284, /* (385) nmnum ::= nm */
+ 284, /* (386) nmnum ::= ON */
+ 284, /* (387) nmnum ::= DELETE */
+ 284, /* (388) nmnum ::= DEFAULT */
+ 211, /* (389) plus_num ::= INTEGER|FLOAT */
+ 289, /* (390) foreach_clause ::= */
+ 289, /* (391) foreach_clause ::= FOR EACH ROW */
+ 292, /* (392) trnm ::= nm */
+ 293, /* (393) tridxby ::= */
+ 294, /* (394) database_kw_opt ::= DATABASE */
+ 294, /* (395) database_kw_opt ::= */
+ 297, /* (396) kwcolumn_opt ::= */
+ 297, /* (397) kwcolumn_opt ::= COLUMNKW */
+ 299, /* (398) vtabarglist ::= vtabarg */
+ 299, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */
+ 300, /* (400) vtabarg ::= vtabarg vtabargtoken */
+ 303, /* (401) anylist ::= */
+ 303, /* (402) anylist ::= anylist LP anylist RP */
+ 303, /* (403) anylist ::= anylist ANY */
+ 266, /* (404) with ::= */
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
@@ -164648,29 +168570,29 @@ static const signed char yyRuleInfoNRhs[] = {
-2, /* (106) from ::= FROM seltablist */
-2, /* (107) stl_prefix ::= seltablist joinop */
0, /* (108) stl_prefix ::= */
- -7, /* (109) seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
- -9, /* (110) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
- -7, /* (111) seltablist ::= stl_prefix LP select RP as on_opt using_opt */
- -7, /* (112) seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
- 0, /* (113) dbnm ::= */
- -2, /* (114) dbnm ::= DOT nm */
- -1, /* (115) fullname ::= nm */
- -3, /* (116) fullname ::= nm DOT nm */
- -1, /* (117) xfullname ::= nm */
- -3, /* (118) xfullname ::= nm DOT nm */
- -5, /* (119) xfullname ::= nm DOT nm AS nm */
- -3, /* (120) xfullname ::= nm AS nm */
- -1, /* (121) joinop ::= COMMA|JOIN */
- -2, /* (122) joinop ::= JOIN_KW JOIN */
- -3, /* (123) joinop ::= JOIN_KW nm JOIN */
- -4, /* (124) joinop ::= JOIN_KW nm nm JOIN */
- -2, /* (125) on_opt ::= ON expr */
- 0, /* (126) on_opt ::= */
- 0, /* (127) indexed_opt ::= */
- -3, /* (128) indexed_opt ::= INDEXED BY nm */
- -2, /* (129) indexed_opt ::= NOT INDEXED */
- -4, /* (130) using_opt ::= USING LP idlist RP */
- 0, /* (131) using_opt ::= */
+ -5, /* (109) seltablist ::= stl_prefix nm dbnm as on_using */
+ -6, /* (110) seltablist ::= stl_prefix nm dbnm as indexed_by on_using */
+ -8, /* (111) seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */
+ -6, /* (112) seltablist ::= stl_prefix LP select RP as on_using */
+ -6, /* (113) seltablist ::= stl_prefix LP seltablist RP as on_using */
+ 0, /* (114) dbnm ::= */
+ -2, /* (115) dbnm ::= DOT nm */
+ -1, /* (116) fullname ::= nm */
+ -3, /* (117) fullname ::= nm DOT nm */
+ -1, /* (118) xfullname ::= nm */
+ -3, /* (119) xfullname ::= nm DOT nm */
+ -5, /* (120) xfullname ::= nm DOT nm AS nm */
+ -3, /* (121) xfullname ::= nm AS nm */
+ -1, /* (122) joinop ::= COMMA|JOIN */
+ -2, /* (123) joinop ::= JOIN_KW JOIN */
+ -3, /* (124) joinop ::= JOIN_KW nm JOIN */
+ -4, /* (125) joinop ::= JOIN_KW nm nm JOIN */
+ -2, /* (126) on_using ::= ON expr */
+ -4, /* (127) on_using ::= USING LP idlist RP */
+ 0, /* (128) on_using ::= */
+ 0, /* (129) indexed_opt ::= */
+ -3, /* (130) indexed_by ::= INDEXED BY nm */
+ -2, /* (131) indexed_by ::= NOT INDEXED */
0, /* (132) orderby_opt ::= */
-3, /* (133) orderby_opt ::= ORDER BY sortlist */
-5, /* (134) sortlist ::= sortlist COMMA expr sortorder nulls */
@@ -164748,199 +168670,202 @@ static const signed char yyRuleInfoNRhs[] = {
-3, /* (206) expr ::= expr NOT NULL */
-3, /* (207) expr ::= expr IS expr */
-4, /* (208) expr ::= expr IS NOT expr */
- -2, /* (209) expr ::= NOT expr */
- -2, /* (210) expr ::= BITNOT expr */
- -2, /* (211) expr ::= PLUS|MINUS expr */
- -3, /* (212) expr ::= expr PTR expr */
- -1, /* (213) between_op ::= BETWEEN */
- -2, /* (214) between_op ::= NOT BETWEEN */
- -5, /* (215) expr ::= expr between_op expr AND expr */
- -1, /* (216) in_op ::= IN */
- -2, /* (217) in_op ::= NOT IN */
- -5, /* (218) expr ::= expr in_op LP exprlist RP */
- -3, /* (219) expr ::= LP select RP */
- -5, /* (220) expr ::= expr in_op LP select RP */
- -5, /* (221) expr ::= expr in_op nm dbnm paren_exprlist */
- -4, /* (222) expr ::= EXISTS LP select RP */
- -5, /* (223) expr ::= CASE case_operand case_exprlist case_else END */
- -5, /* (224) case_exprlist ::= case_exprlist WHEN expr THEN expr */
- -4, /* (225) case_exprlist ::= WHEN expr THEN expr */
- -2, /* (226) case_else ::= ELSE expr */
- 0, /* (227) case_else ::= */
- -1, /* (228) case_operand ::= expr */
- 0, /* (229) case_operand ::= */
- 0, /* (230) exprlist ::= */
- -3, /* (231) nexprlist ::= nexprlist COMMA expr */
- -1, /* (232) nexprlist ::= expr */
- 0, /* (233) paren_exprlist ::= */
- -3, /* (234) paren_exprlist ::= LP exprlist RP */
- -12, /* (235) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
- -1, /* (236) uniqueflag ::= UNIQUE */
- 0, /* (237) uniqueflag ::= */
- 0, /* (238) eidlist_opt ::= */
- -3, /* (239) eidlist_opt ::= LP eidlist RP */
- -5, /* (240) eidlist ::= eidlist COMMA nm collate sortorder */
- -3, /* (241) eidlist ::= nm collate sortorder */
- 0, /* (242) collate ::= */
- -2, /* (243) collate ::= COLLATE ID|STRING */
- -4, /* (244) cmd ::= DROP INDEX ifexists fullname */
- -2, /* (245) cmd ::= VACUUM vinto */
- -3, /* (246) cmd ::= VACUUM nm vinto */
- -2, /* (247) vinto ::= INTO expr */
- 0, /* (248) vinto ::= */
- -3, /* (249) cmd ::= PRAGMA nm dbnm */
- -5, /* (250) cmd ::= PRAGMA nm dbnm EQ nmnum */
- -6, /* (251) cmd ::= PRAGMA nm dbnm LP nmnum RP */
- -5, /* (252) cmd ::= PRAGMA nm dbnm EQ minus_num */
- -6, /* (253) cmd ::= PRAGMA nm dbnm LP minus_num RP */
- -2, /* (254) plus_num ::= PLUS INTEGER|FLOAT */
- -2, /* (255) minus_num ::= MINUS INTEGER|FLOAT */
- -5, /* (256) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
- -11, /* (257) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
- -1, /* (258) trigger_time ::= BEFORE|AFTER */
- -2, /* (259) trigger_time ::= INSTEAD OF */
- 0, /* (260) trigger_time ::= */
- -1, /* (261) trigger_event ::= DELETE|INSERT */
- -1, /* (262) trigger_event ::= UPDATE */
- -3, /* (263) trigger_event ::= UPDATE OF idlist */
- 0, /* (264) when_clause ::= */
- -2, /* (265) when_clause ::= WHEN expr */
- -3, /* (266) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
- -2, /* (267) trigger_cmd_list ::= trigger_cmd SEMI */
- -3, /* (268) trnm ::= nm DOT nm */
- -3, /* (269) tridxby ::= INDEXED BY nm */
- -2, /* (270) tridxby ::= NOT INDEXED */
- -9, /* (271) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
- -8, /* (272) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
- -6, /* (273) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
- -3, /* (274) trigger_cmd ::= scanpt select scanpt */
- -4, /* (275) expr ::= RAISE LP IGNORE RP */
- -6, /* (276) expr ::= RAISE LP raisetype COMMA nm RP */
- -1, /* (277) raisetype ::= ROLLBACK */
- -1, /* (278) raisetype ::= ABORT */
- -1, /* (279) raisetype ::= FAIL */
- -4, /* (280) cmd ::= DROP TRIGGER ifexists fullname */
- -6, /* (281) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
- -3, /* (282) cmd ::= DETACH database_kw_opt expr */
- 0, /* (283) key_opt ::= */
- -2, /* (284) key_opt ::= KEY expr */
- -1, /* (285) cmd ::= REINDEX */
- -3, /* (286) cmd ::= REINDEX nm dbnm */
- -1, /* (287) cmd ::= ANALYZE */
- -3, /* (288) cmd ::= ANALYZE nm dbnm */
- -6, /* (289) cmd ::= ALTER TABLE fullname RENAME TO nm */
- -7, /* (290) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
- -6, /* (291) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
- -1, /* (292) add_column_fullname ::= fullname */
- -8, /* (293) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
- -1, /* (294) cmd ::= create_vtab */
- -4, /* (295) cmd ::= create_vtab LP vtabarglist RP */
- -8, /* (296) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
- 0, /* (297) vtabarg ::= */
- -1, /* (298) vtabargtoken ::= ANY */
- -3, /* (299) vtabargtoken ::= lp anylist RP */
- -1, /* (300) lp ::= LP */
- -2, /* (301) with ::= WITH wqlist */
- -3, /* (302) with ::= WITH RECURSIVE wqlist */
- -1, /* (303) wqas ::= AS */
- -2, /* (304) wqas ::= AS MATERIALIZED */
- -3, /* (305) wqas ::= AS NOT MATERIALIZED */
- -6, /* (306) wqitem ::= nm eidlist_opt wqas LP select RP */
- -1, /* (307) wqlist ::= wqitem */
- -3, /* (308) wqlist ::= wqlist COMMA wqitem */
- -1, /* (309) windowdefn_list ::= windowdefn */
- -3, /* (310) windowdefn_list ::= windowdefn_list COMMA windowdefn */
- -5, /* (311) windowdefn ::= nm AS LP window RP */
- -5, /* (312) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
- -6, /* (313) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
- -4, /* (314) window ::= ORDER BY sortlist frame_opt */
- -5, /* (315) window ::= nm ORDER BY sortlist frame_opt */
- -1, /* (316) window ::= frame_opt */
- -2, /* (317) window ::= nm frame_opt */
- 0, /* (318) frame_opt ::= */
- -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
- -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
- -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */
- -1, /* (322) frame_bound_s ::= frame_bound */
- -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */
- -1, /* (324) frame_bound_e ::= frame_bound */
- -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */
- -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */
- -2, /* (327) frame_bound ::= CURRENT ROW */
- 0, /* (328) frame_exclude_opt ::= */
- -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */
- -2, /* (330) frame_exclude ::= NO OTHERS */
- -2, /* (331) frame_exclude ::= CURRENT ROW */
- -1, /* (332) frame_exclude ::= GROUP|TIES */
- -2, /* (333) window_clause ::= WINDOW windowdefn_list */
- -2, /* (334) filter_over ::= filter_clause over_clause */
- -1, /* (335) filter_over ::= over_clause */
- -1, /* (336) filter_over ::= filter_clause */
- -4, /* (337) over_clause ::= OVER LP window RP */
- -2, /* (338) over_clause ::= OVER nm */
- -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */
- -1, /* (340) input ::= cmdlist */
- -2, /* (341) cmdlist ::= cmdlist ecmd */
- -1, /* (342) cmdlist ::= ecmd */
- -1, /* (343) ecmd ::= SEMI */
- -2, /* (344) ecmd ::= cmdx SEMI */
- -3, /* (345) ecmd ::= explain cmdx SEMI */
- 0, /* (346) trans_opt ::= */
- -1, /* (347) trans_opt ::= TRANSACTION */
- -2, /* (348) trans_opt ::= TRANSACTION nm */
- -1, /* (349) savepoint_opt ::= SAVEPOINT */
- 0, /* (350) savepoint_opt ::= */
- -2, /* (351) cmd ::= create_table create_table_args */
- -1, /* (352) table_option_set ::= table_option */
- -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */
- -2, /* (354) columnlist ::= columnname carglist */
- -1, /* (355) nm ::= ID|INDEXED */
- -1, /* (356) nm ::= STRING */
- -1, /* (357) nm ::= JOIN_KW */
- -1, /* (358) typetoken ::= typename */
- -1, /* (359) typename ::= ID|STRING */
- -1, /* (360) signed ::= plus_num */
- -1, /* (361) signed ::= minus_num */
- -2, /* (362) carglist ::= carglist ccons */
- 0, /* (363) carglist ::= */
- -2, /* (364) ccons ::= NULL onconf */
- -4, /* (365) ccons ::= GENERATED ALWAYS AS generated */
- -2, /* (366) ccons ::= AS generated */
- -2, /* (367) conslist_opt ::= COMMA conslist */
- -3, /* (368) conslist ::= conslist tconscomma tcons */
- -1, /* (369) conslist ::= tcons */
- 0, /* (370) tconscomma ::= */
- -1, /* (371) defer_subclause_opt ::= defer_subclause */
- -1, /* (372) resolvetype ::= raisetype */
- -1, /* (373) selectnowith ::= oneselect */
- -1, /* (374) oneselect ::= values */
- -2, /* (375) sclp ::= selcollist COMMA */
- -1, /* (376) as ::= ID|STRING */
- 0, /* (377) returning ::= */
- -1, /* (378) expr ::= term */
- -1, /* (379) likeop ::= LIKE_KW|MATCH */
- -1, /* (380) exprlist ::= nexprlist */
- -1, /* (381) nmnum ::= plus_num */
- -1, /* (382) nmnum ::= nm */
- -1, /* (383) nmnum ::= ON */
- -1, /* (384) nmnum ::= DELETE */
- -1, /* (385) nmnum ::= DEFAULT */
- -1, /* (386) plus_num ::= INTEGER|FLOAT */
- 0, /* (387) foreach_clause ::= */
- -3, /* (388) foreach_clause ::= FOR EACH ROW */
- -1, /* (389) trnm ::= nm */
- 0, /* (390) tridxby ::= */
- -1, /* (391) database_kw_opt ::= DATABASE */
- 0, /* (392) database_kw_opt ::= */
- 0, /* (393) kwcolumn_opt ::= */
- -1, /* (394) kwcolumn_opt ::= COLUMNKW */
- -1, /* (395) vtabarglist ::= vtabarg */
- -3, /* (396) vtabarglist ::= vtabarglist COMMA vtabarg */
- -2, /* (397) vtabarg ::= vtabarg vtabargtoken */
- 0, /* (398) anylist ::= */
- -4, /* (399) anylist ::= anylist LP anylist RP */
- -2, /* (400) anylist ::= anylist ANY */
- 0, /* (401) with ::= */
+ -6, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */
+ -5, /* (210) expr ::= expr IS DISTINCT FROM expr */
+ -2, /* (211) expr ::= NOT expr */
+ -2, /* (212) expr ::= BITNOT expr */
+ -2, /* (213) expr ::= PLUS|MINUS expr */
+ -3, /* (214) expr ::= expr PTR expr */
+ -1, /* (215) between_op ::= BETWEEN */
+ -2, /* (216) between_op ::= NOT BETWEEN */
+ -5, /* (217) expr ::= expr between_op expr AND expr */
+ -1, /* (218) in_op ::= IN */
+ -2, /* (219) in_op ::= NOT IN */
+ -5, /* (220) expr ::= expr in_op LP exprlist RP */
+ -3, /* (221) expr ::= LP select RP */
+ -5, /* (222) expr ::= expr in_op LP select RP */
+ -5, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */
+ -4, /* (224) expr ::= EXISTS LP select RP */
+ -5, /* (225) expr ::= CASE case_operand case_exprlist case_else END */
+ -5, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ -4, /* (227) case_exprlist ::= WHEN expr THEN expr */
+ -2, /* (228) case_else ::= ELSE expr */
+ 0, /* (229) case_else ::= */
+ -1, /* (230) case_operand ::= expr */
+ 0, /* (231) case_operand ::= */
+ 0, /* (232) exprlist ::= */
+ -3, /* (233) nexprlist ::= nexprlist COMMA expr */
+ -1, /* (234) nexprlist ::= expr */
+ 0, /* (235) paren_exprlist ::= */
+ -3, /* (236) paren_exprlist ::= LP exprlist RP */
+ -12, /* (237) cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ -1, /* (238) uniqueflag ::= UNIQUE */
+ 0, /* (239) uniqueflag ::= */
+ 0, /* (240) eidlist_opt ::= */
+ -3, /* (241) eidlist_opt ::= LP eidlist RP */
+ -5, /* (242) eidlist ::= eidlist COMMA nm collate sortorder */
+ -3, /* (243) eidlist ::= nm collate sortorder */
+ 0, /* (244) collate ::= */
+ -2, /* (245) collate ::= COLLATE ID|STRING */
+ -4, /* (246) cmd ::= DROP INDEX ifexists fullname */
+ -2, /* (247) cmd ::= VACUUM vinto */
+ -3, /* (248) cmd ::= VACUUM nm vinto */
+ -2, /* (249) vinto ::= INTO expr */
+ 0, /* (250) vinto ::= */
+ -3, /* (251) cmd ::= PRAGMA nm dbnm */
+ -5, /* (252) cmd ::= PRAGMA nm dbnm EQ nmnum */
+ -6, /* (253) cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ -5, /* (254) cmd ::= PRAGMA nm dbnm EQ minus_num */
+ -6, /* (255) cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ -2, /* (256) plus_num ::= PLUS INTEGER|FLOAT */
+ -2, /* (257) minus_num ::= MINUS INTEGER|FLOAT */
+ -5, /* (258) cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ -11, /* (259) trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ -1, /* (260) trigger_time ::= BEFORE|AFTER */
+ -2, /* (261) trigger_time ::= INSTEAD OF */
+ 0, /* (262) trigger_time ::= */
+ -1, /* (263) trigger_event ::= DELETE|INSERT */
+ -1, /* (264) trigger_event ::= UPDATE */
+ -3, /* (265) trigger_event ::= UPDATE OF idlist */
+ 0, /* (266) when_clause ::= */
+ -2, /* (267) when_clause ::= WHEN expr */
+ -3, /* (268) trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ -2, /* (269) trigger_cmd_list ::= trigger_cmd SEMI */
+ -3, /* (270) trnm ::= nm DOT nm */
+ -3, /* (271) tridxby ::= INDEXED BY nm */
+ -2, /* (272) tridxby ::= NOT INDEXED */
+ -9, /* (273) trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ -8, /* (274) trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ -6, /* (275) trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ -3, /* (276) trigger_cmd ::= scanpt select scanpt */
+ -4, /* (277) expr ::= RAISE LP IGNORE RP */
+ -6, /* (278) expr ::= RAISE LP raisetype COMMA nm RP */
+ -1, /* (279) raisetype ::= ROLLBACK */
+ -1, /* (280) raisetype ::= ABORT */
+ -1, /* (281) raisetype ::= FAIL */
+ -4, /* (282) cmd ::= DROP TRIGGER ifexists fullname */
+ -6, /* (283) cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ -3, /* (284) cmd ::= DETACH database_kw_opt expr */
+ 0, /* (285) key_opt ::= */
+ -2, /* (286) key_opt ::= KEY expr */
+ -1, /* (287) cmd ::= REINDEX */
+ -3, /* (288) cmd ::= REINDEX nm dbnm */
+ -1, /* (289) cmd ::= ANALYZE */
+ -3, /* (290) cmd ::= ANALYZE nm dbnm */
+ -6, /* (291) cmd ::= ALTER TABLE fullname RENAME TO nm */
+ -7, /* (292) cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ -6, /* (293) cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ -1, /* (294) add_column_fullname ::= fullname */
+ -8, /* (295) cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ -1, /* (296) cmd ::= create_vtab */
+ -4, /* (297) cmd ::= create_vtab LP vtabarglist RP */
+ -8, /* (298) create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ 0, /* (299) vtabarg ::= */
+ -1, /* (300) vtabargtoken ::= ANY */
+ -3, /* (301) vtabargtoken ::= lp anylist RP */
+ -1, /* (302) lp ::= LP */
+ -2, /* (303) with ::= WITH wqlist */
+ -3, /* (304) with ::= WITH RECURSIVE wqlist */
+ -1, /* (305) wqas ::= AS */
+ -2, /* (306) wqas ::= AS MATERIALIZED */
+ -3, /* (307) wqas ::= AS NOT MATERIALIZED */
+ -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */
+ -1, /* (309) wqlist ::= wqitem */
+ -3, /* (310) wqlist ::= wqlist COMMA wqitem */
+ -1, /* (311) windowdefn_list ::= windowdefn */
+ -3, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ -5, /* (313) windowdefn ::= nm AS LP window RP */
+ -5, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ -6, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ -4, /* (316) window ::= ORDER BY sortlist frame_opt */
+ -5, /* (317) window ::= nm ORDER BY sortlist frame_opt */
+ -1, /* (318) window ::= frame_opt */
+ -2, /* (319) window ::= nm frame_opt */
+ 0, /* (320) frame_opt ::= */
+ -3, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
+ -6, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
+ -1, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */
+ -1, /* (324) frame_bound_s ::= frame_bound */
+ -2, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */
+ -1, /* (326) frame_bound_e ::= frame_bound */
+ -2, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */
+ -2, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */
+ -2, /* (329) frame_bound ::= CURRENT ROW */
+ 0, /* (330) frame_exclude_opt ::= */
+ -2, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */
+ -2, /* (332) frame_exclude ::= NO OTHERS */
+ -2, /* (333) frame_exclude ::= CURRENT ROW */
+ -1, /* (334) frame_exclude ::= GROUP|TIES */
+ -2, /* (335) window_clause ::= WINDOW windowdefn_list */
+ -2, /* (336) filter_over ::= filter_clause over_clause */
+ -1, /* (337) filter_over ::= over_clause */
+ -1, /* (338) filter_over ::= filter_clause */
+ -4, /* (339) over_clause ::= OVER LP window RP */
+ -2, /* (340) over_clause ::= OVER nm */
+ -5, /* (341) filter_clause ::= FILTER LP WHERE expr RP */
+ -1, /* (342) input ::= cmdlist */
+ -2, /* (343) cmdlist ::= cmdlist ecmd */
+ -1, /* (344) cmdlist ::= ecmd */
+ -1, /* (345) ecmd ::= SEMI */
+ -2, /* (346) ecmd ::= cmdx SEMI */
+ -3, /* (347) ecmd ::= explain cmdx SEMI */
+ 0, /* (348) trans_opt ::= */
+ -1, /* (349) trans_opt ::= TRANSACTION */
+ -2, /* (350) trans_opt ::= TRANSACTION nm */
+ -1, /* (351) savepoint_opt ::= SAVEPOINT */
+ 0, /* (352) savepoint_opt ::= */
+ -2, /* (353) cmd ::= create_table create_table_args */
+ -1, /* (354) table_option_set ::= table_option */
+ -4, /* (355) columnlist ::= columnlist COMMA columnname carglist */
+ -2, /* (356) columnlist ::= columnname carglist */
+ -1, /* (357) nm ::= ID|INDEXED */
+ -1, /* (358) nm ::= STRING */
+ -1, /* (359) nm ::= JOIN_KW */
+ -1, /* (360) typetoken ::= typename */
+ -1, /* (361) typename ::= ID|STRING */
+ -1, /* (362) signed ::= plus_num */
+ -1, /* (363) signed ::= minus_num */
+ -2, /* (364) carglist ::= carglist ccons */
+ 0, /* (365) carglist ::= */
+ -2, /* (366) ccons ::= NULL onconf */
+ -4, /* (367) ccons ::= GENERATED ALWAYS AS generated */
+ -2, /* (368) ccons ::= AS generated */
+ -2, /* (369) conslist_opt ::= COMMA conslist */
+ -3, /* (370) conslist ::= conslist tconscomma tcons */
+ -1, /* (371) conslist ::= tcons */
+ 0, /* (372) tconscomma ::= */
+ -1, /* (373) defer_subclause_opt ::= defer_subclause */
+ -1, /* (374) resolvetype ::= raisetype */
+ -1, /* (375) selectnowith ::= oneselect */
+ -1, /* (376) oneselect ::= values */
+ -2, /* (377) sclp ::= selcollist COMMA */
+ -1, /* (378) as ::= ID|STRING */
+ -1, /* (379) indexed_opt ::= indexed_by */
+ 0, /* (380) returning ::= */
+ -1, /* (381) expr ::= term */
+ -1, /* (382) likeop ::= LIKE_KW|MATCH */
+ -1, /* (383) exprlist ::= nexprlist */
+ -1, /* (384) nmnum ::= plus_num */
+ -1, /* (385) nmnum ::= nm */
+ -1, /* (386) nmnum ::= ON */
+ -1, /* (387) nmnum ::= DELETE */
+ -1, /* (388) nmnum ::= DEFAULT */
+ -1, /* (389) plus_num ::= INTEGER|FLOAT */
+ 0, /* (390) foreach_clause ::= */
+ -3, /* (391) foreach_clause ::= FOR EACH ROW */
+ -1, /* (392) trnm ::= nm */
+ 0, /* (393) tridxby ::= */
+ -1, /* (394) database_kw_opt ::= DATABASE */
+ 0, /* (395) database_kw_opt ::= */
+ 0, /* (396) kwcolumn_opt ::= */
+ -1, /* (397) kwcolumn_opt ::= COLUMNKW */
+ -1, /* (398) vtabarglist ::= vtabarg */
+ -3, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */
+ -2, /* (400) vtabarg ::= vtabarg vtabargtoken */
+ 0, /* (401) anylist ::= */
+ -4, /* (402) anylist ::= anylist LP anylist RP */
+ -2, /* (403) anylist ::= anylist ANY */
+ 0, /* (404) with ::= */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -165000,7 +168925,7 @@ static YYACTIONTYPE yy_reduce(
case 5: /* transtype ::= DEFERRED */
case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6);
case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7);
- case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321);
+ case 323: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==323);
{yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/}
break;
case 8: /* cmd ::= COMMIT|END trans_opt */
@@ -165037,7 +168962,7 @@ static YYACTIONTYPE yy_reduce(
case 72: /* defer_subclause_opt ::= */ yytestcase(yyruleno==72);
case 81: /* ifexists ::= */ yytestcase(yyruleno==81);
case 98: /* distinct ::= */ yytestcase(yyruleno==98);
- case 242: /* collate ::= */ yytestcase(yyruleno==242);
+ case 244: /* collate ::= */ yytestcase(yyruleno==244);
{yymsp[1].minor.yy394 = 0;}
break;
case 16: /* ifnotexists ::= IF NOT EXISTS */
@@ -165221,9 +169146,9 @@ static YYACTIONTYPE yy_reduce(
break;
case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */
case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80);
- case 214: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==214);
- case 217: /* in_op ::= NOT IN */ yytestcase(yyruleno==217);
- case 243: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==243);
+ case 216: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==216);
+ case 219: /* in_op ::= NOT IN */ yytestcase(yyruleno==219);
+ case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245);
{yymsp[-1].minor.yy394 = 1;}
break;
case 64: /* init_deferred_pred_opt ::= INITIALLY IMMEDIATE */
@@ -165308,7 +169233,7 @@ static YYACTIONTYPE yy_reduce(
Token x;
x.n = 0;
parserDoubleLinkSelect(pParse, pRhs);
- pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0,0);
+ pFrom = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&x,pRhs,0);
pRhs = sqlite3SelectNew(pParse,0,pFrom,0,0,0,0,0,0);
}
if( pRhs ){
@@ -165373,9 +169298,9 @@ static YYACTIONTYPE yy_reduce(
case 99: /* sclp ::= */
case 132: /* orderby_opt ::= */ yytestcase(yyruleno==132);
case 142: /* groupby_opt ::= */ yytestcase(yyruleno==142);
- case 230: /* exprlist ::= */ yytestcase(yyruleno==230);
- case 233: /* paren_exprlist ::= */ yytestcase(yyruleno==233);
- case 238: /* eidlist_opt ::= */ yytestcase(yyruleno==238);
+ case 232: /* exprlist ::= */ yytestcase(yyruleno==232);
+ case 235: /* paren_exprlist ::= */ yytestcase(yyruleno==235);
+ case 240: /* eidlist_opt ::= */ yytestcase(yyruleno==240);
{yymsp[1].minor.yy322 = 0;}
break;
case 100: /* selcollist ::= sclp scanpt expr scanpt as */
@@ -165400,9 +169325,9 @@ static YYACTIONTYPE yy_reduce(
}
break;
case 103: /* as ::= AS nm */
- case 114: /* dbnm ::= DOT nm */ yytestcase(yyruleno==114);
- case 254: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==254);
- case 255: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==255);
+ case 115: /* dbnm ::= DOT nm */ yytestcase(yyruleno==115);
+ case 256: /* plus_num ::= PLUS INTEGER|FLOAT */ yytestcase(yyruleno==256);
+ case 257: /* minus_num ::= MINUS INTEGER|FLOAT */ yytestcase(yyruleno==257);
{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0;}
break;
case 105: /* from ::= */
@@ -165412,7 +169337,7 @@ static YYACTIONTYPE yy_reduce(
case 106: /* from ::= FROM seltablist */
{
yymsp[-1].minor.yy131 = yymsp[0].minor.yy131;
- sqlite3SrcListShiftJoinType(yymsp[-1].minor.yy131);
+ sqlite3SrcListShiftJoinType(pParse,yymsp[-1].minor.yy131);
}
break;
case 107: /* stl_prefix ::= seltablist joinop */
@@ -165420,35 +169345,43 @@ static YYACTIONTYPE yy_reduce(
if( ALWAYS(yymsp[-1].minor.yy131 && yymsp[-1].minor.yy131->nSrc>0) ) yymsp[-1].minor.yy131->a[yymsp[-1].minor.yy131->nSrc-1].fg.jointype = (u8)yymsp[0].minor.yy394;
}
break;
- case 109: /* seltablist ::= stl_prefix nm dbnm as indexed_opt on_opt using_opt */
+ case 109: /* seltablist ::= stl_prefix nm dbnm as on_using */
+{
+ yymsp[-4].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-4].minor.yy131,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561);
+}
+ break;
+ case 110: /* seltablist ::= stl_prefix nm dbnm as indexed_by on_using */
{
- yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,&yymsp[-5].minor.yy0,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,0,yymsp[-1].minor.yy528,yymsp[0].minor.yy254);
- sqlite3SrcListIndexedBy(pParse, yymsp[-6].minor.yy131, &yymsp[-2].minor.yy0);
+ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,0,&yymsp[0].minor.yy561);
+ sqlite3SrcListIndexedBy(pParse, yymsp[-5].minor.yy131, &yymsp[-1].minor.yy0);
}
break;
- case 110: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_opt using_opt */
+ case 111: /* seltablist ::= stl_prefix nm dbnm LP exprlist RP as on_using */
{
- yymsp[-8].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-8].minor.yy131,&yymsp[-7].minor.yy0,&yymsp[-6].minor.yy0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy528,yymsp[0].minor.yy254);
- sqlite3SrcListFuncArgs(pParse, yymsp[-8].minor.yy131, yymsp[-4].minor.yy322);
+ yymsp[-7].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-7].minor.yy131,&yymsp[-6].minor.yy0,&yymsp[-5].minor.yy0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561);
+ sqlite3SrcListFuncArgs(pParse, yymsp[-7].minor.yy131, yymsp[-3].minor.yy322);
}
break;
- case 111: /* seltablist ::= stl_prefix LP select RP as on_opt using_opt */
+ case 112: /* seltablist ::= stl_prefix LP select RP as on_using */
{
- yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,0,0,&yymsp[-2].minor.yy0,yymsp[-4].minor.yy47,yymsp[-1].minor.yy528,yymsp[0].minor.yy254);
+ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,yymsp[-3].minor.yy47,&yymsp[0].minor.yy561);
}
break;
- case 112: /* seltablist ::= stl_prefix LP seltablist RP as on_opt using_opt */
+ case 113: /* seltablist ::= stl_prefix LP seltablist RP as on_using */
{
- if( yymsp[-6].minor.yy131==0 && yymsp[-2].minor.yy0.n==0 && yymsp[-1].minor.yy528==0 && yymsp[0].minor.yy254==0 ){
- yymsp[-6].minor.yy131 = yymsp[-4].minor.yy131;
- }else if( yymsp[-4].minor.yy131->nSrc==1 ){
- yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,0,0,&yymsp[-2].minor.yy0,0,yymsp[-1].minor.yy528,yymsp[0].minor.yy254);
- if( yymsp[-6].minor.yy131 ){
- SrcItem *pNew = &yymsp[-6].minor.yy131->a[yymsp[-6].minor.yy131->nSrc-1];
- SrcItem *pOld = yymsp[-4].minor.yy131->a;
+ if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){
+ yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131;
+ }else if( yymsp[-3].minor.yy131->nSrc==1 ){
+ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561);
+ if( yymsp[-5].minor.yy131 ){
+ SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1];
+ SrcItem *pOld = yymsp[-3].minor.yy131->a;
pNew->zName = pOld->zName;
pNew->zDatabase = pOld->zDatabase;
pNew->pSelect = pOld->pSelect;
+ if( pNew->pSelect && (pNew->pSelect->selFlags & SF_NestedFrom)!=0 ){
+ pNew->fg.isNestedFrom = 1;
+ }
if( pOld->fg.isTabFunc ){
pNew->u1.pFuncArg = pOld->u1.pFuncArg;
pOld->u1.pFuncArg = 0;
@@ -165458,94 +169391,78 @@ static YYACTIONTYPE yy_reduce(
pOld->zName = pOld->zDatabase = 0;
pOld->pSelect = 0;
}
- sqlite3SrcListDelete(pParse->db, yymsp[-4].minor.yy131);
+ sqlite3SrcListDelete(pParse->db, yymsp[-3].minor.yy131);
}else{
Select *pSubquery;
- sqlite3SrcListShiftJoinType(yymsp[-4].minor.yy131);
- pSubquery = sqlite3SelectNew(pParse,0,yymsp[-4].minor.yy131,0,0,0,0,SF_NestedFrom,0);
- yymsp[-6].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-6].minor.yy131,0,0,&yymsp[-2].minor.yy0,pSubquery,yymsp[-1].minor.yy528,yymsp[0].minor.yy254);
+ sqlite3SrcListShiftJoinType(pParse,yymsp[-3].minor.yy131);
+ pSubquery = sqlite3SelectNew(pParse,0,yymsp[-3].minor.yy131,0,0,0,0,SF_NestedFrom,0);
+ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,pSubquery,&yymsp[0].minor.yy561);
}
}
break;
- case 113: /* dbnm ::= */
- case 127: /* indexed_opt ::= */ yytestcase(yyruleno==127);
+ case 114: /* dbnm ::= */
+ case 129: /* indexed_opt ::= */ yytestcase(yyruleno==129);
{yymsp[1].minor.yy0.z=0; yymsp[1].minor.yy0.n=0;}
break;
- case 115: /* fullname ::= nm */
+ case 116: /* fullname ::= nm */
{
yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0);
if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0);
}
yymsp[0].minor.yy131 = yylhsminor.yy131;
break;
- case 116: /* fullname ::= nm DOT nm */
+ case 117: /* fullname ::= nm DOT nm */
{
yylhsminor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0);
if( IN_RENAME_OBJECT && yylhsminor.yy131 ) sqlite3RenameTokenMap(pParse, yylhsminor.yy131->a[0].zName, &yymsp[0].minor.yy0);
}
yymsp[-2].minor.yy131 = yylhsminor.yy131;
break;
- case 117: /* xfullname ::= nm */
+ case 118: /* xfullname ::= nm */
{yymsp[0].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[0].minor.yy0,0); /*A-overwrites-X*/}
break;
- case 118: /* xfullname ::= nm DOT nm */
+ case 119: /* xfullname ::= nm DOT nm */
{yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/}
break;
- case 119: /* xfullname ::= nm DOT nm AS nm */
+ case 120: /* xfullname ::= nm DOT nm AS nm */
{
yymsp[-4].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,&yymsp[-2].minor.yy0); /*A-overwrites-X*/
if( yymsp[-4].minor.yy131 ) yymsp[-4].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
}
break;
- case 120: /* xfullname ::= nm AS nm */
+ case 121: /* xfullname ::= nm AS nm */
{
yymsp[-2].minor.yy131 = sqlite3SrcListAppend(pParse,0,&yymsp[-2].minor.yy0,0); /*A-overwrites-X*/
if( yymsp[-2].minor.yy131 ) yymsp[-2].minor.yy131->a[0].zAlias = sqlite3NameFromToken(pParse->db, &yymsp[0].minor.yy0);
}
break;
- case 121: /* joinop ::= COMMA|JOIN */
+ case 122: /* joinop ::= COMMA|JOIN */
{ yymsp[0].minor.yy394 = JT_INNER; }
break;
- case 122: /* joinop ::= JOIN_KW JOIN */
+ case 123: /* joinop ::= JOIN_KW JOIN */
{yymsp[-1].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-1].minor.yy0,0,0); /*X-overwrites-A*/}
break;
- case 123: /* joinop ::= JOIN_KW nm JOIN */
+ case 124: /* joinop ::= JOIN_KW nm JOIN */
{yymsp[-2].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0,0); /*X-overwrites-A*/}
break;
- case 124: /* joinop ::= JOIN_KW nm nm JOIN */
+ case 125: /* joinop ::= JOIN_KW nm nm JOIN */
{yymsp[-3].minor.yy394 = sqlite3JoinType(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);/*X-overwrites-A*/}
break;
- case 125: /* on_opt ::= ON expr */
- case 145: /* having_opt ::= HAVING expr */ yytestcase(yyruleno==145);
- case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152);
- case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154);
- case 226: /* case_else ::= ELSE expr */ yytestcase(yyruleno==226);
- case 247: /* vinto ::= INTO expr */ yytestcase(yyruleno==247);
-{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;}
+ case 126: /* on_using ::= ON expr */
+{yymsp[-1].minor.yy561.pOn = yymsp[0].minor.yy528; yymsp[-1].minor.yy561.pUsing = 0;}
break;
- case 126: /* on_opt ::= */
- case 144: /* having_opt ::= */ yytestcase(yyruleno==144);
- case 146: /* limit_opt ::= */ yytestcase(yyruleno==146);
- case 151: /* where_opt ::= */ yytestcase(yyruleno==151);
- case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153);
- case 227: /* case_else ::= */ yytestcase(yyruleno==227);
- case 229: /* case_operand ::= */ yytestcase(yyruleno==229);
- case 248: /* vinto ::= */ yytestcase(yyruleno==248);
-{yymsp[1].minor.yy528 = 0;}
+ case 127: /* on_using ::= USING LP idlist RP */
+{yymsp[-3].minor.yy561.pOn = 0; yymsp[-3].minor.yy561.pUsing = yymsp[-1].minor.yy254;}
+ break;
+ case 128: /* on_using ::= */
+{yymsp[1].minor.yy561.pOn = 0; yymsp[1].minor.yy561.pUsing = 0;}
break;
- case 128: /* indexed_opt ::= INDEXED BY nm */
+ case 130: /* indexed_by ::= INDEXED BY nm */
{yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;}
break;
- case 129: /* indexed_opt ::= NOT INDEXED */
+ case 131: /* indexed_by ::= NOT INDEXED */
{yymsp[-1].minor.yy0.z=0; yymsp[-1].minor.yy0.n=1;}
break;
- case 130: /* using_opt ::= USING LP idlist RP */
-{yymsp[-3].minor.yy254 = yymsp[-1].minor.yy254;}
- break;
- case 131: /* using_opt ::= */
- case 173: /* idlist_opt ::= */ yytestcase(yyruleno==173);
-{yymsp[1].minor.yy254 = 0;}
- break;
case 133: /* orderby_opt ::= ORDER BY sortlist */
case 143: /* groupby_opt ::= GROUP BY nexprlist */ yytestcase(yyruleno==143);
{yymsp[-2].minor.yy322 = yymsp[0].minor.yy322;}
@@ -165578,6 +169495,22 @@ static YYACTIONTYPE yy_reduce(
case 140: /* nulls ::= NULLS LAST */
{yymsp[-1].minor.yy394 = SQLITE_SO_DESC;}
break;
+ case 144: /* having_opt ::= */
+ case 146: /* limit_opt ::= */ yytestcase(yyruleno==146);
+ case 151: /* where_opt ::= */ yytestcase(yyruleno==151);
+ case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153);
+ case 229: /* case_else ::= */ yytestcase(yyruleno==229);
+ case 231: /* case_operand ::= */ yytestcase(yyruleno==231);
+ case 250: /* vinto ::= */ yytestcase(yyruleno==250);
+{yymsp[1].minor.yy528 = 0;}
+ break;
+ case 145: /* having_opt ::= HAVING expr */
+ case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152);
+ case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154);
+ case 228: /* case_else ::= ELSE expr */ yytestcase(yyruleno==228);
+ case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249);
+{yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;}
+ break;
case 147: /* limit_opt ::= LIMIT expr */
{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,TK_LIMIT,yymsp[0].minor.yy528,0);}
break;
@@ -165609,7 +169542,18 @@ static YYACTIONTYPE yy_reduce(
case 157: /* cmd ::= with UPDATE orconf xfullname indexed_opt SET setlist from where_opt_ret orderby_opt limit_opt */
{
sqlite3SrcListIndexedBy(pParse, yymsp[-7].minor.yy131, &yymsp[-6].minor.yy0);
- yymsp[-7].minor.yy131 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy131, yymsp[-3].minor.yy131);
+ if( yymsp[-3].minor.yy131 ){
+ SrcList *pFromClause = yymsp[-3].minor.yy131;
+ if( pFromClause->nSrc>1 ){
+ Select *pSubquery;
+ Token as;
+ pSubquery = sqlite3SelectNew(pParse,0,pFromClause,0,0,0,0,SF_NestedFrom,0);
+ as.n = 0;
+ as.z = 0;
+ pFromClause = sqlite3SrcListAppendFromTerm(pParse,0,0,0,&as,pSubquery,0);
+ }
+ yymsp[-7].minor.yy131 = sqlite3SrcListAppendList(pParse, yymsp[-7].minor.yy131, pFromClause);
+ }
sqlite3ExprListCheckLength(pParse,yymsp[-4].minor.yy322,"set list");
#ifndef SQLITE_ENABLE_UPDATE_DELETE_LIMIT
if( yymsp[-1].minor.yy322 || yymsp[0].minor.yy528 ){
@@ -165675,6 +169619,9 @@ static YYACTIONTYPE yy_reduce(
case 170: /* returning ::= RETURNING selcollist */
{sqlite3AddReturning(pParse,yymsp[0].minor.yy322);}
break;
+ case 173: /* idlist_opt ::= */
+{yymsp[1].minor.yy254 = 0;}
+ break;
case 174: /* idlist_opt ::= LP idlist RP */
{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;}
break;
@@ -165860,17 +169807,29 @@ static YYACTIONTYPE yy_reduce(
binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL);
}
break;
- case 209: /* expr ::= NOT expr */
- case 210: /* expr ::= BITNOT expr */ yytestcase(yyruleno==210);
+ case 209: /* expr ::= expr IS NOT DISTINCT FROM expr */
+{
+ yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL);
+}
+ break;
+ case 210: /* expr ::= expr IS DISTINCT FROM expr */
+{
+ yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528);
+ binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL);
+}
+ break;
+ case 211: /* expr ::= NOT expr */
+ case 212: /* expr ::= BITNOT expr */ yytestcase(yyruleno==212);
{yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/}
break;
- case 211: /* expr ::= PLUS|MINUS expr */
+ case 213: /* expr ::= PLUS|MINUS expr */
{
yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0);
/*A-overwrites-B*/
}
break;
- case 212: /* expr ::= expr PTR expr */
+ case 214: /* expr ::= expr PTR expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528);
pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528);
@@ -165878,11 +169837,11 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-2].minor.yy528 = yylhsminor.yy528;
break;
- case 213: /* between_op ::= BETWEEN */
- case 216: /* in_op ::= IN */ yytestcase(yyruleno==216);
+ case 215: /* between_op ::= BETWEEN */
+ case 218: /* in_op ::= IN */ yytestcase(yyruleno==218);
{yymsp[0].minor.yy394 = 0;}
break;
- case 215: /* expr ::= expr between_op expr AND expr */
+ case 217: /* expr ::= expr between_op expr AND expr */
{
ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528);
@@ -165895,7 +169854,7 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 218: /* expr ::= expr in_op LP exprlist RP */
+ case 220: /* expr ::= expr in_op LP exprlist RP */
{
if( yymsp[-1].minor.yy322==0 ){
/* Expressions of the form
@@ -165907,7 +169866,8 @@ static YYACTIONTYPE yy_reduce(
** regardless of the value of expr1.
*/
sqlite3ExprUnmapAndDelete(pParse, yymsp[-4].minor.yy528);
- yymsp[-4].minor.yy528 = sqlite3Expr(pParse->db, TK_INTEGER, yymsp[-3].minor.yy394 ? "1" : "0");
+ yymsp[-4].minor.yy528 = sqlite3Expr(pParse->db, TK_STRING, yymsp[-3].minor.yy394 ? "true" : "false");
+ if( yymsp[-4].minor.yy528 ) sqlite3ExprIdToTrueFalse(yymsp[-4].minor.yy528);
}else{
Expr *pRHS = yymsp[-1].minor.yy322->a[0].pExpr;
if( yymsp[-1].minor.yy322->nExpr==1 && sqlite3ExprIsConstant(pRHS) && yymsp[-4].minor.yy528->op!=TK_VECTOR ){
@@ -165935,20 +169895,20 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 219: /* expr ::= LP select RP */
+ case 221: /* expr ::= LP select RP */
{
yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47);
}
break;
- case 220: /* expr ::= expr in_op LP select RP */
+ case 222: /* expr ::= expr in_op LP select RP */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0);
sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47);
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 221: /* expr ::= expr in_op nm dbnm paren_exprlist */
+ case 223: /* expr ::= expr in_op nm dbnm paren_exprlist */
{
SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0);
@@ -165958,14 +169918,14 @@ static YYACTIONTYPE yy_reduce(
if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0);
}
break;
- case 222: /* expr ::= EXISTS LP select RP */
+ case 224: /* expr ::= EXISTS LP select RP */
{
Expr *p;
p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47);
}
break;
- case 223: /* expr ::= CASE case_operand case_exprlist case_else END */
+ case 225: /* expr ::= CASE case_operand case_exprlist case_else END */
{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0);
if( yymsp[-4].minor.yy528 ){
@@ -165977,32 +169937,32 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 224: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
+ case 226: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */
{
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528);
yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528);
}
break;
- case 225: /* case_exprlist ::= WHEN expr THEN expr */
+ case 227: /* case_exprlist ::= WHEN expr THEN expr */
{
yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528);
yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528);
}
break;
- case 228: /* case_operand ::= expr */
+ case 230: /* case_operand ::= expr */
{yymsp[0].minor.yy528 = yymsp[0].minor.yy528; /*A-overwrites-X*/}
break;
- case 231: /* nexprlist ::= nexprlist COMMA expr */
+ case 233: /* nexprlist ::= nexprlist COMMA expr */
{yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);}
break;
- case 232: /* nexprlist ::= expr */
+ case 234: /* nexprlist ::= expr */
{yymsp[0].minor.yy322 = sqlite3ExprListAppend(pParse,0,yymsp[0].minor.yy528); /*A-overwrites-Y*/}
break;
- case 234: /* paren_exprlist ::= LP exprlist RP */
- case 239: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==239);
+ case 236: /* paren_exprlist ::= LP exprlist RP */
+ case 241: /* eidlist_opt ::= LP eidlist RP */ yytestcase(yyruleno==241);
{yymsp[-2].minor.yy322 = yymsp[-1].minor.yy322;}
break;
- case 235: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
+ case 237: /* cmd ::= createkw uniqueflag INDEX ifnotexists nm dbnm ON nm LP sortlist RP where_opt */
{
sqlite3CreateIndex(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0,
sqlite3SrcListAppend(pParse,0,&yymsp[-4].minor.yy0,0), yymsp[-2].minor.yy322, yymsp[-10].minor.yy394,
@@ -166012,48 +169972,48 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 236: /* uniqueflag ::= UNIQUE */
- case 278: /* raisetype ::= ABORT */ yytestcase(yyruleno==278);
+ case 238: /* uniqueflag ::= UNIQUE */
+ case 280: /* raisetype ::= ABORT */ yytestcase(yyruleno==280);
{yymsp[0].minor.yy394 = OE_Abort;}
break;
- case 237: /* uniqueflag ::= */
+ case 239: /* uniqueflag ::= */
{yymsp[1].minor.yy394 = OE_None;}
break;
- case 240: /* eidlist ::= eidlist COMMA nm collate sortorder */
+ case 242: /* eidlist ::= eidlist COMMA nm collate sortorder */
{
yymsp[-4].minor.yy322 = parserAddExprIdListTerm(pParse, yymsp[-4].minor.yy322, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394);
}
break;
- case 241: /* eidlist ::= nm collate sortorder */
+ case 243: /* eidlist ::= nm collate sortorder */
{
yymsp[-2].minor.yy322 = parserAddExprIdListTerm(pParse, 0, &yymsp[-2].minor.yy0, yymsp[-1].minor.yy394, yymsp[0].minor.yy394); /*A-overwrites-Y*/
}
break;
- case 244: /* cmd ::= DROP INDEX ifexists fullname */
+ case 246: /* cmd ::= DROP INDEX ifexists fullname */
{sqlite3DropIndex(pParse, yymsp[0].minor.yy131, yymsp[-1].minor.yy394);}
break;
- case 245: /* cmd ::= VACUUM vinto */
+ case 247: /* cmd ::= VACUUM vinto */
{sqlite3Vacuum(pParse,0,yymsp[0].minor.yy528);}
break;
- case 246: /* cmd ::= VACUUM nm vinto */
+ case 248: /* cmd ::= VACUUM nm vinto */
{sqlite3Vacuum(pParse,&yymsp[-1].minor.yy0,yymsp[0].minor.yy528);}
break;
- case 249: /* cmd ::= PRAGMA nm dbnm */
+ case 251: /* cmd ::= PRAGMA nm dbnm */
{sqlite3Pragma(pParse,&yymsp[-1].minor.yy0,&yymsp[0].minor.yy0,0,0);}
break;
- case 250: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
+ case 252: /* cmd ::= PRAGMA nm dbnm EQ nmnum */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,0);}
break;
- case 251: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
+ case 253: /* cmd ::= PRAGMA nm dbnm LP nmnum RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,0);}
break;
- case 252: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
+ case 254: /* cmd ::= PRAGMA nm dbnm EQ minus_num */
{sqlite3Pragma(pParse,&yymsp[-3].minor.yy0,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0,1);}
break;
- case 253: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
+ case 255: /* cmd ::= PRAGMA nm dbnm LP minus_num RP */
{sqlite3Pragma(pParse,&yymsp[-4].minor.yy0,&yymsp[-3].minor.yy0,&yymsp[-1].minor.yy0,1);}
break;
- case 256: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
+ case 258: /* cmd ::= createkw trigger_decl BEGIN trigger_cmd_list END */
{
Token all;
all.z = yymsp[-3].minor.yy0.z;
@@ -166061,50 +170021,50 @@ static YYACTIONTYPE yy_reduce(
sqlite3FinishTrigger(pParse, yymsp[-1].minor.yy33, &all);
}
break;
- case 257: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
+ case 259: /* trigger_decl ::= temp TRIGGER ifnotexists nm dbnm trigger_time trigger_event ON fullname foreach_clause when_clause */
{
sqlite3BeginTrigger(pParse, &yymsp[-7].minor.yy0, &yymsp[-6].minor.yy0, yymsp[-5].minor.yy394, yymsp[-4].minor.yy180.a, yymsp[-4].minor.yy180.b, yymsp[-2].minor.yy131, yymsp[0].minor.yy528, yymsp[-10].minor.yy394, yymsp[-8].minor.yy394);
yymsp[-10].minor.yy0 = (yymsp[-6].minor.yy0.n==0?yymsp[-7].minor.yy0:yymsp[-6].minor.yy0); /*A-overwrites-T*/
}
break;
- case 258: /* trigger_time ::= BEFORE|AFTER */
+ case 260: /* trigger_time ::= BEFORE|AFTER */
{ yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/ }
break;
- case 259: /* trigger_time ::= INSTEAD OF */
+ case 261: /* trigger_time ::= INSTEAD OF */
{ yymsp[-1].minor.yy394 = TK_INSTEAD;}
break;
- case 260: /* trigger_time ::= */
+ case 262: /* trigger_time ::= */
{ yymsp[1].minor.yy394 = TK_BEFORE; }
break;
- case 261: /* trigger_event ::= DELETE|INSERT */
- case 262: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==262);
+ case 263: /* trigger_event ::= DELETE|INSERT */
+ case 264: /* trigger_event ::= UPDATE */ yytestcase(yyruleno==264);
{yymsp[0].minor.yy180.a = yymsp[0].major; /*A-overwrites-X*/ yymsp[0].minor.yy180.b = 0;}
break;
- case 263: /* trigger_event ::= UPDATE OF idlist */
+ case 265: /* trigger_event ::= UPDATE OF idlist */
{yymsp[-2].minor.yy180.a = TK_UPDATE; yymsp[-2].minor.yy180.b = yymsp[0].minor.yy254;}
break;
- case 264: /* when_clause ::= */
- case 283: /* key_opt ::= */ yytestcase(yyruleno==283);
+ case 266: /* when_clause ::= */
+ case 285: /* key_opt ::= */ yytestcase(yyruleno==285);
{ yymsp[1].minor.yy528 = 0; }
break;
- case 265: /* when_clause ::= WHEN expr */
- case 284: /* key_opt ::= KEY expr */ yytestcase(yyruleno==284);
+ case 267: /* when_clause ::= WHEN expr */
+ case 286: /* key_opt ::= KEY expr */ yytestcase(yyruleno==286);
{ yymsp[-1].minor.yy528 = yymsp[0].minor.yy528; }
break;
- case 266: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
+ case 268: /* trigger_cmd_list ::= trigger_cmd_list trigger_cmd SEMI */
{
assert( yymsp[-2].minor.yy33!=0 );
yymsp[-2].minor.yy33->pLast->pNext = yymsp[-1].minor.yy33;
yymsp[-2].minor.yy33->pLast = yymsp[-1].minor.yy33;
}
break;
- case 267: /* trigger_cmd_list ::= trigger_cmd SEMI */
+ case 269: /* trigger_cmd_list ::= trigger_cmd SEMI */
{
assert( yymsp[-1].minor.yy33!=0 );
yymsp[-1].minor.yy33->pLast = yymsp[-1].minor.yy33;
}
break;
- case 268: /* trnm ::= nm DOT nm */
+ case 270: /* trnm ::= nm DOT nm */
{
yymsp[-2].minor.yy0 = yymsp[0].minor.yy0;
sqlite3ErrorMsg(pParse,
@@ -166112,39 +170072,39 @@ static YYACTIONTYPE yy_reduce(
"statements within triggers");
}
break;
- case 269: /* tridxby ::= INDEXED BY nm */
+ case 271: /* tridxby ::= INDEXED BY nm */
{
sqlite3ErrorMsg(pParse,
"the INDEXED BY clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 270: /* tridxby ::= NOT INDEXED */
+ case 272: /* tridxby ::= NOT INDEXED */
{
sqlite3ErrorMsg(pParse,
"the NOT INDEXED clause is not allowed on UPDATE or DELETE statements "
"within triggers");
}
break;
- case 271: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
+ case 273: /* trigger_cmd ::= UPDATE orconf trnm tridxby SET setlist from where_opt scanpt */
{yylhsminor.yy33 = sqlite3TriggerUpdateStep(pParse, &yymsp[-6].minor.yy0, yymsp[-2].minor.yy131, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528, yymsp[-7].minor.yy394, yymsp[-8].minor.yy0.z, yymsp[0].minor.yy522);}
yymsp[-8].minor.yy33 = yylhsminor.yy33;
break;
- case 272: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
+ case 274: /* trigger_cmd ::= scanpt insert_cmd INTO trnm idlist_opt select upsert scanpt */
{
yylhsminor.yy33 = sqlite3TriggerInsertStep(pParse,&yymsp[-4].minor.yy0,yymsp[-3].minor.yy254,yymsp[-2].minor.yy47,yymsp[-6].minor.yy394,yymsp[-1].minor.yy444,yymsp[-7].minor.yy522,yymsp[0].minor.yy522);/*yylhsminor.yy33-overwrites-yymsp[-6].minor.yy394*/
}
yymsp[-7].minor.yy33 = yylhsminor.yy33;
break;
- case 273: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
+ case 275: /* trigger_cmd ::= DELETE FROM trnm tridxby where_opt scanpt */
{yylhsminor.yy33 = sqlite3TriggerDeleteStep(pParse, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy528, yymsp[-5].minor.yy0.z, yymsp[0].minor.yy522);}
yymsp[-5].minor.yy33 = yylhsminor.yy33;
break;
- case 274: /* trigger_cmd ::= scanpt select scanpt */
+ case 276: /* trigger_cmd ::= scanpt select scanpt */
{yylhsminor.yy33 = sqlite3TriggerSelectStep(pParse->db, yymsp[-1].minor.yy47, yymsp[-2].minor.yy522, yymsp[0].minor.yy522); /*yylhsminor.yy33-overwrites-yymsp[-1].minor.yy47*/}
yymsp[-2].minor.yy33 = yylhsminor.yy33;
break;
- case 275: /* expr ::= RAISE LP IGNORE RP */
+ case 277: /* expr ::= RAISE LP IGNORE RP */
{
yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
if( yymsp[-3].minor.yy528 ){
@@ -166152,7 +170112,7 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 276: /* expr ::= RAISE LP raisetype COMMA nm RP */
+ case 278: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
if( yymsp[-5].minor.yy528 ) {
@@ -166160,118 +170120,118 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 277: /* raisetype ::= ROLLBACK */
+ case 279: /* raisetype ::= ROLLBACK */
{yymsp[0].minor.yy394 = OE_Rollback;}
break;
- case 279: /* raisetype ::= FAIL */
+ case 281: /* raisetype ::= FAIL */
{yymsp[0].minor.yy394 = OE_Fail;}
break;
- case 280: /* cmd ::= DROP TRIGGER ifexists fullname */
+ case 282: /* cmd ::= DROP TRIGGER ifexists fullname */
{
sqlite3DropTrigger(pParse,yymsp[0].minor.yy131,yymsp[-1].minor.yy394);
}
break;
- case 281: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
+ case 283: /* cmd ::= ATTACH database_kw_opt expr AS expr key_opt */
{
sqlite3Attach(pParse, yymsp[-3].minor.yy528, yymsp[-1].minor.yy528, yymsp[0].minor.yy528);
}
break;
- case 282: /* cmd ::= DETACH database_kw_opt expr */
+ case 284: /* cmd ::= DETACH database_kw_opt expr */
{
sqlite3Detach(pParse, yymsp[0].minor.yy528);
}
break;
- case 285: /* cmd ::= REINDEX */
+ case 287: /* cmd ::= REINDEX */
{sqlite3Reindex(pParse, 0, 0);}
break;
- case 286: /* cmd ::= REINDEX nm dbnm */
+ case 288: /* cmd ::= REINDEX nm dbnm */
{sqlite3Reindex(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 287: /* cmd ::= ANALYZE */
+ case 289: /* cmd ::= ANALYZE */
{sqlite3Analyze(pParse, 0, 0);}
break;
- case 288: /* cmd ::= ANALYZE nm dbnm */
+ case 290: /* cmd ::= ANALYZE nm dbnm */
{sqlite3Analyze(pParse, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0);}
break;
- case 289: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
+ case 291: /* cmd ::= ALTER TABLE fullname RENAME TO nm */
{
sqlite3AlterRenameTable(pParse,yymsp[-3].minor.yy131,&yymsp[0].minor.yy0);
}
break;
- case 290: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
+ case 292: /* cmd ::= ALTER TABLE add_column_fullname ADD kwcolumn_opt columnname carglist */
{
yymsp[-1].minor.yy0.n = (int)(pParse->sLastToken.z-yymsp[-1].minor.yy0.z) + pParse->sLastToken.n;
sqlite3AlterFinishAddColumn(pParse, &yymsp[-1].minor.yy0);
}
break;
- case 291: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
+ case 293: /* cmd ::= ALTER TABLE fullname DROP kwcolumn_opt nm */
{
sqlite3AlterDropColumn(pParse, yymsp[-3].minor.yy131, &yymsp[0].minor.yy0);
}
break;
- case 292: /* add_column_fullname ::= fullname */
+ case 294: /* add_column_fullname ::= fullname */
{
disableLookaside(pParse);
sqlite3AlterBeginAddColumn(pParse, yymsp[0].minor.yy131);
}
break;
- case 293: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
+ case 295: /* cmd ::= ALTER TABLE fullname RENAME kwcolumn_opt nm TO nm */
{
sqlite3AlterRenameColumn(pParse, yymsp[-5].minor.yy131, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);
}
break;
- case 294: /* cmd ::= create_vtab */
+ case 296: /* cmd ::= create_vtab */
{sqlite3VtabFinishParse(pParse,0);}
break;
- case 295: /* cmd ::= create_vtab LP vtabarglist RP */
+ case 297: /* cmd ::= create_vtab LP vtabarglist RP */
{sqlite3VtabFinishParse(pParse,&yymsp[0].minor.yy0);}
break;
- case 296: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
+ case 298: /* create_vtab ::= createkw VIRTUAL TABLE ifnotexists nm dbnm USING nm */
{
sqlite3VtabBeginParse(pParse, &yymsp[-3].minor.yy0, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-4].minor.yy394);
}
break;
- case 297: /* vtabarg ::= */
+ case 299: /* vtabarg ::= */
{sqlite3VtabArgInit(pParse);}
break;
- case 298: /* vtabargtoken ::= ANY */
- case 299: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==299);
- case 300: /* lp ::= LP */ yytestcase(yyruleno==300);
+ case 300: /* vtabargtoken ::= ANY */
+ case 301: /* vtabargtoken ::= lp anylist RP */ yytestcase(yyruleno==301);
+ case 302: /* lp ::= LP */ yytestcase(yyruleno==302);
{sqlite3VtabArgExtend(pParse,&yymsp[0].minor.yy0);}
break;
- case 301: /* with ::= WITH wqlist */
- case 302: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==302);
+ case 303: /* with ::= WITH wqlist */
+ case 304: /* with ::= WITH RECURSIVE wqlist */ yytestcase(yyruleno==304);
{ sqlite3WithPush(pParse, yymsp[0].minor.yy521, 1); }
break;
- case 303: /* wqas ::= AS */
+ case 305: /* wqas ::= AS */
{yymsp[0].minor.yy516 = M10d_Any;}
break;
- case 304: /* wqas ::= AS MATERIALIZED */
+ case 306: /* wqas ::= AS MATERIALIZED */
{yymsp[-1].minor.yy516 = M10d_Yes;}
break;
- case 305: /* wqas ::= AS NOT MATERIALIZED */
+ case 307: /* wqas ::= AS NOT MATERIALIZED */
{yymsp[-2].minor.yy516 = M10d_No;}
break;
- case 306: /* wqitem ::= nm eidlist_opt wqas LP select RP */
+ case 308: /* wqitem ::= nm eidlist_opt wqas LP select RP */
{
yymsp[-5].minor.yy385 = sqlite3CteNew(pParse, &yymsp[-5].minor.yy0, yymsp[-4].minor.yy322, yymsp[-1].minor.yy47, yymsp[-3].minor.yy516); /*A-overwrites-X*/
}
break;
- case 307: /* wqlist ::= wqitem */
+ case 309: /* wqlist ::= wqitem */
{
yymsp[0].minor.yy521 = sqlite3WithAdd(pParse, 0, yymsp[0].minor.yy385); /*A-overwrites-X*/
}
break;
- case 308: /* wqlist ::= wqlist COMMA wqitem */
+ case 310: /* wqlist ::= wqlist COMMA wqitem */
{
yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385);
}
break;
- case 309: /* windowdefn_list ::= windowdefn */
+ case 311: /* windowdefn_list ::= windowdefn */
{ yylhsminor.yy41 = yymsp[0].minor.yy41; }
yymsp[0].minor.yy41 = yylhsminor.yy41;
break;
- case 310: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
+ case 312: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */
{
assert( yymsp[0].minor.yy41!=0 );
sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41);
@@ -166280,7 +170240,7 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-2].minor.yy41 = yylhsminor.yy41;
break;
- case 311: /* windowdefn ::= nm AS LP window RP */
+ case 313: /* windowdefn ::= nm AS LP window RP */
{
if( ALWAYS(yymsp[-1].minor.yy41) ){
yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n);
@@ -166289,90 +170249,90 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-4].minor.yy41 = yylhsminor.yy41;
break;
- case 312: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
+ case 314: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */
{
yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0);
}
break;
- case 313: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
+ case 315: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0);
}
yymsp[-5].minor.yy41 = yylhsminor.yy41;
break;
- case 314: /* window ::= ORDER BY sortlist frame_opt */
+ case 316: /* window ::= ORDER BY sortlist frame_opt */
{
yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0);
}
break;
- case 315: /* window ::= nm ORDER BY sortlist frame_opt */
+ case 317: /* window ::= nm ORDER BY sortlist frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0);
}
yymsp[-4].minor.yy41 = yylhsminor.yy41;
break;
- case 316: /* window ::= frame_opt */
- case 335: /* filter_over ::= over_clause */ yytestcase(yyruleno==335);
+ case 318: /* window ::= frame_opt */
+ case 337: /* filter_over ::= over_clause */ yytestcase(yyruleno==337);
{
yylhsminor.yy41 = yymsp[0].minor.yy41;
}
yymsp[0].minor.yy41 = yylhsminor.yy41;
break;
- case 317: /* window ::= nm frame_opt */
+ case 319: /* window ::= nm frame_opt */
{
yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0);
}
yymsp[-1].minor.yy41 = yylhsminor.yy41;
break;
- case 318: /* frame_opt ::= */
+ case 320: /* frame_opt ::= */
{
yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0);
}
break;
- case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
+ case 321: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */
{
yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516);
}
yymsp[-2].minor.yy41 = yylhsminor.yy41;
break;
- case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
+ case 322: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */
{
yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516);
}
yymsp[-5].minor.yy41 = yylhsminor.yy41;
break;
- case 322: /* frame_bound_s ::= frame_bound */
- case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324);
+ case 324: /* frame_bound_s ::= frame_bound */
+ case 326: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==326);
{yylhsminor.yy595 = yymsp[0].minor.yy595;}
yymsp[0].minor.yy595 = yylhsminor.yy595;
break;
- case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */
- case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325);
- case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327);
+ case 325: /* frame_bound_s ::= UNBOUNDED PRECEDING */
+ case 327: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==327);
+ case 329: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==329);
{yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;}
yymsp[-1].minor.yy595 = yylhsminor.yy595;
break;
- case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */
+ case 328: /* frame_bound ::= expr PRECEDING|FOLLOWING */
{yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;}
yymsp[-1].minor.yy595 = yylhsminor.yy595;
break;
- case 328: /* frame_exclude_opt ::= */
+ case 330: /* frame_exclude_opt ::= */
{yymsp[1].minor.yy516 = 0;}
break;
- case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */
+ case 331: /* frame_exclude_opt ::= EXCLUDE frame_exclude */
{yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;}
break;
- case 330: /* frame_exclude ::= NO OTHERS */
- case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331);
+ case 332: /* frame_exclude ::= NO OTHERS */
+ case 333: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==333);
{yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/}
break;
- case 332: /* frame_exclude ::= GROUP|TIES */
+ case 334: /* frame_exclude ::= GROUP|TIES */
{yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/}
break;
- case 333: /* window_clause ::= WINDOW windowdefn_list */
+ case 335: /* window_clause ::= WINDOW windowdefn_list */
{ yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; }
break;
- case 334: /* filter_over ::= filter_clause over_clause */
+ case 336: /* filter_over ::= filter_clause over_clause */
{
if( yymsp[0].minor.yy41 ){
yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528;
@@ -166383,7 +170343,7 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-1].minor.yy41 = yylhsminor.yy41;
break;
- case 336: /* filter_over ::= filter_clause */
+ case 338: /* filter_over ::= filter_clause */
{
yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
if( yylhsminor.yy41 ){
@@ -166395,13 +170355,13 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[0].minor.yy41 = yylhsminor.yy41;
break;
- case 337: /* over_clause ::= OVER LP window RP */
+ case 339: /* over_clause ::= OVER LP window RP */
{
yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41;
assert( yymsp[-3].minor.yy41!=0 );
}
break;
- case 338: /* over_clause ::= OVER nm */
+ case 340: /* over_clause ::= OVER nm */
{
yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window));
if( yymsp[-1].minor.yy41 ){
@@ -166409,72 +170369,73 @@ static YYACTIONTYPE yy_reduce(
}
}
break;
- case 339: /* filter_clause ::= FILTER LP WHERE expr RP */
+ case 341: /* filter_clause ::= FILTER LP WHERE expr RP */
{ yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; }
break;
default:
- /* (340) input ::= cmdlist */ yytestcase(yyruleno==340);
- /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341);
- /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342);
- /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343);
- /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344);
- /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345);
- /* (346) trans_opt ::= */ yytestcase(yyruleno==346);
- /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347);
- /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348);
- /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349);
- /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350);
- /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351);
- /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352);
- /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353);
- /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354);
- /* (355) nm ::= ID|INDEXED */ yytestcase(yyruleno==355);
- /* (356) nm ::= STRING */ yytestcase(yyruleno==356);
- /* (357) nm ::= JOIN_KW */ yytestcase(yyruleno==357);
- /* (358) typetoken ::= typename */ yytestcase(yyruleno==358);
- /* (359) typename ::= ID|STRING */ yytestcase(yyruleno==359);
- /* (360) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=360);
- /* (361) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=361);
- /* (362) carglist ::= carglist ccons */ yytestcase(yyruleno==362);
- /* (363) carglist ::= */ yytestcase(yyruleno==363);
- /* (364) ccons ::= NULL onconf */ yytestcase(yyruleno==364);
- /* (365) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==365);
- /* (366) ccons ::= AS generated */ yytestcase(yyruleno==366);
- /* (367) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==367);
- /* (368) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==368);
- /* (369) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=369);
- /* (370) tconscomma ::= */ yytestcase(yyruleno==370);
- /* (371) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=371);
- /* (372) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=372);
- /* (373) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=373);
- /* (374) oneselect ::= values */ yytestcase(yyruleno==374);
- /* (375) sclp ::= selcollist COMMA */ yytestcase(yyruleno==375);
- /* (376) as ::= ID|STRING */ yytestcase(yyruleno==376);
- /* (377) returning ::= */ yytestcase(yyruleno==377);
- /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378);
- /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379);
- /* (380) exprlist ::= nexprlist */ yytestcase(yyruleno==380);
- /* (381) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=381);
- /* (382) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=382);
- /* (383) nmnum ::= ON */ yytestcase(yyruleno==383);
- /* (384) nmnum ::= DELETE */ yytestcase(yyruleno==384);
- /* (385) nmnum ::= DEFAULT */ yytestcase(yyruleno==385);
- /* (386) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==386);
- /* (387) foreach_clause ::= */ yytestcase(yyruleno==387);
- /* (388) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==388);
- /* (389) trnm ::= nm */ yytestcase(yyruleno==389);
- /* (390) tridxby ::= */ yytestcase(yyruleno==390);
- /* (391) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==391);
- /* (392) database_kw_opt ::= */ yytestcase(yyruleno==392);
- /* (393) kwcolumn_opt ::= */ yytestcase(yyruleno==393);
- /* (394) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==394);
- /* (395) vtabarglist ::= vtabarg */ yytestcase(yyruleno==395);
- /* (396) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==396);
- /* (397) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==397);
- /* (398) anylist ::= */ yytestcase(yyruleno==398);
- /* (399) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==399);
- /* (400) anylist ::= anylist ANY */ yytestcase(yyruleno==400);
- /* (401) with ::= */ yytestcase(yyruleno==401);
+ /* (342) input ::= cmdlist */ yytestcase(yyruleno==342);
+ /* (343) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==343);
+ /* (344) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=344);
+ /* (345) ecmd ::= SEMI */ yytestcase(yyruleno==345);
+ /* (346) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==346);
+ /* (347) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=347);
+ /* (348) trans_opt ::= */ yytestcase(yyruleno==348);
+ /* (349) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==349);
+ /* (350) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==350);
+ /* (351) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==351);
+ /* (352) savepoint_opt ::= */ yytestcase(yyruleno==352);
+ /* (353) cmd ::= create_table create_table_args */ yytestcase(yyruleno==353);
+ /* (354) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=354);
+ /* (355) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==355);
+ /* (356) columnlist ::= columnname carglist */ yytestcase(yyruleno==356);
+ /* (357) nm ::= ID|INDEXED */ yytestcase(yyruleno==357);
+ /* (358) nm ::= STRING */ yytestcase(yyruleno==358);
+ /* (359) nm ::= JOIN_KW */ yytestcase(yyruleno==359);
+ /* (360) typetoken ::= typename */ yytestcase(yyruleno==360);
+ /* (361) typename ::= ID|STRING */ yytestcase(yyruleno==361);
+ /* (362) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=362);
+ /* (363) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=363);
+ /* (364) carglist ::= carglist ccons */ yytestcase(yyruleno==364);
+ /* (365) carglist ::= */ yytestcase(yyruleno==365);
+ /* (366) ccons ::= NULL onconf */ yytestcase(yyruleno==366);
+ /* (367) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==367);
+ /* (368) ccons ::= AS generated */ yytestcase(yyruleno==368);
+ /* (369) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==369);
+ /* (370) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==370);
+ /* (371) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=371);
+ /* (372) tconscomma ::= */ yytestcase(yyruleno==372);
+ /* (373) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=373);
+ /* (374) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=374);
+ /* (375) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=375);
+ /* (376) oneselect ::= values */ yytestcase(yyruleno==376);
+ /* (377) sclp ::= selcollist COMMA */ yytestcase(yyruleno==377);
+ /* (378) as ::= ID|STRING */ yytestcase(yyruleno==378);
+ /* (379) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=379);
+ /* (380) returning ::= */ yytestcase(yyruleno==380);
+ /* (381) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=381);
+ /* (382) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==382);
+ /* (383) exprlist ::= nexprlist */ yytestcase(yyruleno==383);
+ /* (384) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=384);
+ /* (385) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=385);
+ /* (386) nmnum ::= ON */ yytestcase(yyruleno==386);
+ /* (387) nmnum ::= DELETE */ yytestcase(yyruleno==387);
+ /* (388) nmnum ::= DEFAULT */ yytestcase(yyruleno==388);
+ /* (389) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==389);
+ /* (390) foreach_clause ::= */ yytestcase(yyruleno==390);
+ /* (391) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==391);
+ /* (392) trnm ::= nm */ yytestcase(yyruleno==392);
+ /* (393) tridxby ::= */ yytestcase(yyruleno==393);
+ /* (394) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==394);
+ /* (395) database_kw_opt ::= */ yytestcase(yyruleno==395);
+ /* (396) kwcolumn_opt ::= */ yytestcase(yyruleno==396);
+ /* (397) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==397);
+ /* (398) vtabarglist ::= vtabarg */ yytestcase(yyruleno==398);
+ /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==399);
+ /* (400) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==400);
+ /* (401) anylist ::= */ yytestcase(yyruleno==401);
+ /* (402) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==402);
+ /* (403) anylist ::= anylist ANY */ yytestcase(yyruleno==403);
+ /* (404) with ::= */ yytestcase(yyruleno==404);
break;
/********** End reduce actions ************************************************/
};
@@ -167921,6 +171882,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){
mxSqlLen -= n;
if( mxSqlLen<0 ){
pParse->rc = SQLITE_TOOBIG;
+ pParse->nErr++;
break;
}
#ifndef SQLITE_OMIT_WINDOWFUNC
@@ -168017,7 +171979,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){
if( pParse->pNewTrigger && !IN_RENAME_OBJECT ){
sqlite3DeleteTrigger(db, pParse->pNewTrigger);
}
- sqlite3DbFree(db, pParse->pVList);
+ if( pParse->pVList ) sqlite3DbNNFreeNN(db, pParse->pVList);
db->pParse = pParentParse;
assert( nErr==0 || pParse->rc!=SQLITE_OK );
return nErr;
@@ -169373,18 +173335,19 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){
db->lookaside.bMalloced = pBuf==0 ?1:0;
db->lookaside.nSlot = nBig+nSm;
}else{
- db->lookaside.pStart = db;
+ db->lookaside.pStart = 0;
#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE
db->lookaside.pSmallInit = 0;
db->lookaside.pSmallFree = 0;
- db->lookaside.pMiddle = db;
+ db->lookaside.pMiddle = 0;
#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */
- db->lookaside.pEnd = db;
+ db->lookaside.pEnd = 0;
db->lookaside.bDisable = 1;
db->lookaside.sz = 0;
db->lookaside.bMalloced = 0;
db->lookaside.nSlot = 0;
}
+ db->lookaside.pTrueEnd = db->lookaside.pEnd;
assert( sqlite3LookasideUsed(db,0)==0 );
#endif /* SQLITE_OMIT_LOOKASIDE */
return SQLITE_OK;
@@ -169463,6 +173426,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){
SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
va_list ap;
int rc;
+ sqlite3_mutex_enter(db->mutex);
va_start(ap, op);
switch( op ){
case SQLITE_DBCONFIG_MAINDBNAME: {
@@ -169528,6 +173492,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
}
}
va_end(ap);
+ sqlite3_mutex_leave(db->mutex);
return rc;
}
@@ -170664,7 +174629,7 @@ SQLITE_API int sqlite3_overload_function(
rc = sqlite3FindFunction(db, zName, nArg, SQLITE_UTF8, 0)!=0;
sqlite3_mutex_leave(db->mutex);
if( rc ) return SQLITE_OK;
- zCopy = sqlite3_mprintf(zName);
+ zCopy = sqlite3_mprintf("%s", zName);
if( zCopy==0 ) return SQLITE_NOMEM;
return sqlite3_create_function_v2(db, zName, nArg, SQLITE_UTF8,
zCopy, sqlite3InvalidFunction, 0, 0, sqlite3_free);
@@ -171898,6 +175863,19 @@ static int openDatabase(
goto opendb_out;
}
+#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)
+ /* Process magic filenames ":localStorage:" and ":sessionStorage:" */
+ if( zFilename && zFilename[0]==':' ){
+ if( strcmp(zFilename, ":localStorage:")==0 ){
+ zFilename = "file:local?vfs=kvvfs";
+ flags |= SQLITE_OPEN_URI;
+ }else if( strcmp(zFilename, ":sessionStorage:")==0 ){
+ zFilename = "file:session?vfs=kvvfs";
+ flags |= SQLITE_OPEN_URI;
+ }
+ }
+#endif /* SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) */
+
/* Parse the filename/URI argument
**
** Only allow sensible combinations of bits in the flags argument.
@@ -171928,6 +175906,12 @@ static int openDatabase(
sqlite3_free(zErrMsg);
goto opendb_out;
}
+ assert( db->pVfs!=0 );
+#if SQLITE_OS_KV || defined(SQLITE_OS_KV_OPTIONAL)
+ if( sqlite3_stricmp(db->pVfs->zName, "kvvfs")==0 ){
+ db->temp_store = 2;
+ }
+#endif
/* Open the backend database driver */
rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0,
@@ -172477,6 +176461,9 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo
sqlite3BtreeSetPageSize(pBtree, 0, iNew, 0);
}
rc = SQLITE_OK;
+ }else if( op==SQLITE_FCNTL_RESET_CACHE ){
+ sqlite3BtreeClearCache(pBtree);
+ rc = SQLITE_OK;
}else{
int nSave = db->busyHandler.nBusy;
rc = sqlite3OsFileControl(fd, op, pArg);
@@ -172646,6 +176633,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){
volatile int x = 0;
assert( /*side-effects-ok*/ (x = va_arg(ap,int))!=0 );
rc = x;
+#if defined(SQLITE_DEBUG)
+ /* Invoke these debugging routines so that the compiler does not
+ ** issue "defined but not used" warnings. */
+ if( x==9999 ){
+ sqlite3ShowExpr(0);
+ sqlite3ShowExpr(0);
+ sqlite3ShowExprList(0);
+ sqlite3ShowIdList(0);
+ sqlite3ShowSrcList(0);
+ sqlite3ShowWith(0);
+ sqlite3ShowUpsert(0);
+ sqlite3ShowTriggerStep(0);
+ sqlite3ShowTriggerStepList(0);
+ sqlite3ShowTrigger(0);
+ sqlite3ShowTriggerList(0);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ sqlite3ShowWindow(0);
+ sqlite3ShowWinFunc(0);
+#endif
+ sqlite3ShowSelect(0);
+ }
+#endif
break;
}
@@ -172907,8 +176916,8 @@ SQLITE_API int sqlite3_test_control(int op, ...){
**
** "ptr" is a pointer to a u32.
**
- ** op==0 Store the current sqlite3SelectTrace in *ptr
- ** op==1 Set sqlite3SelectTrace to the value *ptr
+ ** op==0 Store the current sqlite3TreeTrace in *ptr
+ ** op==1 Set sqlite3TreeTrace to the value *ptr
** op==3 Store the current sqlite3WhereTrace in *ptr
** op==3 Set sqlite3WhereTrace to the value *ptr
*/
@@ -172916,10 +176925,10 @@ SQLITE_API int sqlite3_test_control(int op, ...){
int opTrace = va_arg(ap, int);
u32 *ptr = va_arg(ap, u32*);
switch( opTrace ){
- case 0: *ptr = sqlite3SelectTrace; break;
- case 1: sqlite3SelectTrace = *ptr; break;
- case 2: *ptr = sqlite3WhereTrace; break;
- case 3: sqlite3WhereTrace = *ptr; break;
+ case 0: *ptr = sqlite3TreeTrace; break;
+ case 1: sqlite3TreeTrace = *ptr; break;
+ case 2: *ptr = sqlite3WhereTrace; break;
+ case 3: sqlite3WhereTrace = *ptr; break;
}
break;
}
@@ -172936,10 +176945,12 @@ SQLITE_API int sqlite3_test_control(int op, ...){
case SQLITE_TESTCTRL_LOGEST: {
double rIn = va_arg(ap, double);
LogEst rLogEst = sqlite3LogEstFromDouble(rIn);
- u64 iInt = sqlite3LogEstToInt(rLogEst);
- va_arg(ap, int*)[0] = rLogEst;
- va_arg(ap, u64*)[0] = iInt;
- va_arg(ap, int*)[0] = sqlite3LogEst(iInt);
+ int *pI1 = va_arg(ap,int*);
+ u64 *pU64 = va_arg(ap,u64*);
+ int *pI2 = va_arg(ap,int*);
+ *pI1 = rLogEst;
+ *pU64 = sqlite3LogEstToInt(rLogEst);
+ *pI2 = sqlite3LogEst(*pU64);
break;
}
@@ -173013,7 +177024,7 @@ static char *appendText(char *p, const char *z){
** Memory layout must be compatible with that generated by the pager
** and expected by sqlite3_uri_parameter() and databaseName().
*/
-SQLITE_API char *sqlite3_create_filename(
+SQLITE_API const char *sqlite3_create_filename(
const char *zDatabase,
const char *zJournal,
const char *zWal,
@@ -173049,10 +177060,10 @@ SQLITE_API char *sqlite3_create_filename(
** error to call this routine with any parameter other than a pointer
** previously obtained from sqlite3_create_filename() or a NULL pointer.
*/
-SQLITE_API void sqlite3_free_filename(char *p){
+SQLITE_API void sqlite3_free_filename(const char *p){
if( p==0 ) return;
- p = (char*)databaseName(p);
- sqlite3_free(p - 4);
+ p = databaseName(p);
+ sqlite3_free((char*)p - 4);
}
@@ -173154,6 +177165,24 @@ SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){
return iDb<0 ? 0 : db->aDb[iDb].pBt;
}
+/*
+** Return the name of the N-th database schema. Return NULL if N is out
+** of range.
+*/
+SQLITE_API const char *sqlite3_db_name(sqlite3 *db, int N){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ if( N<0 || N>=db->nDb ){
+ return 0;
+ }else{
+ return db->aDb[N].zDbSName;
+ }
+}
+
/*
** Return the filename of the database associated with a database
** connection.
@@ -173285,8 +177314,8 @@ SQLITE_API int sqlite3_snapshot_open(
*/
SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){
int rc = SQLITE_ERROR;
- int iDb;
#ifndef SQLITE_OMIT_WAL
+ int iDb;
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
@@ -174841,7 +178870,7 @@ struct Fts3MultiSegReader {
int nAdvance; /* How many seg-readers to advance */
Fts3SegFilter *pFilter; /* Pointer to filter object */
char *aBuffer; /* Buffer to merge doclists in */
- int nBuffer; /* Allocated size of aBuffer[] in bytes */
+ i64 nBuffer; /* Allocated size of aBuffer[] in bytes */
int iColFilter; /* If >=0, filter for this column */
int bRestart;
@@ -177537,7 +181566,7 @@ static int fts3TermSelectMerge(
**
** Similar padding is added in the fts3DoclistOrMerge() function.
*/
- pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1);
+ pTS->aaOutput[0] = sqlite3_malloc64((i64)nDoclist + FTS3_VARINT_MAX + 1);
pTS->anOutput[0] = nDoclist;
if( pTS->aaOutput[0] ){
memcpy(pTS->aaOutput[0], aDoclist, nDoclist);
@@ -178957,8 +182986,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
char *aPoslist = 0; /* Position list for deferred tokens */
int nPoslist = 0; /* Number of bytes in aPoslist */
int iPrev = -1; /* Token number of previous deferred token */
-
- assert( pPhrase->doclist.bFreeList==0 );
+ char *aFree = (pPhrase->doclist.bFreeList ? pPhrase->doclist.pList : 0);
for(iToken=0; iTokennToken; iToken++){
Fts3PhraseToken *pToken = &pPhrase->aToken[iToken];
@@ -178972,6 +183000,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
if( pList==0 ){
sqlite3_free(aPoslist);
+ sqlite3_free(aFree);
pPhrase->doclist.pList = 0;
pPhrase->doclist.nList = 0;
return SQLITE_OK;
@@ -178992,6 +183021,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
nPoslist = (int)(aOut - aPoslist);
if( nPoslist==0 ){
sqlite3_free(aPoslist);
+ sqlite3_free(aFree);
pPhrase->doclist.pList = 0;
pPhrase->doclist.nList = 0;
return SQLITE_OK;
@@ -179024,13 +183054,14 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
nDistance = iPrev - nMaxUndeferred;
}
- aOut = (char *)sqlite3_malloc(nPoslist+8);
+ aOut = (char *)sqlite3Fts3MallocZero(nPoslist+FTS3_BUFFER_PADDING);
if( !aOut ){
sqlite3_free(aPoslist);
return SQLITE_NOMEM;
}
pPhrase->doclist.pList = aOut;
+ assert( p1 && p2 );
if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){
pPhrase->doclist.bFreeList = 1;
pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList);
@@ -179043,6 +183074,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){
}
}
+ if( pPhrase->doclist.pList!=aFree ) sqlite3_free(aFree);
return SQLITE_OK;
}
#endif /* SQLITE_DISABLE_FTS4_DEFERRED */
@@ -179391,7 +183423,7 @@ static int fts3EvalIncrPhraseNext(
if( bEof==0 ){
int nList = 0;
int nByte = a[p->nToken-1].nList;
- char *aDoclist = sqlite3_malloc(nByte+FTS3_BUFFER_PADDING);
+ char *aDoclist = sqlite3_malloc64((i64)nByte+FTS3_BUFFER_PADDING);
if( !aDoclist ) return SQLITE_NOMEM;
memcpy(aDoclist, a[p->nToken-1].pList, nByte+1);
memset(&aDoclist[nByte], 0, FTS3_BUFFER_PADDING);
@@ -180217,11 +184249,10 @@ static int fts3EvalTestExpr(
default: {
#ifndef SQLITE_DISABLE_FTS4_DEFERRED
- if( pCsr->pDeferred
- && (pExpr->iDocid==pCsr->iPrevId || pExpr->bDeferred)
- ){
+ if( pCsr->pDeferred && (pExpr->bDeferred || (
+ pExpr->iDocid==pCsr->iPrevId && pExpr->pPhrase->doclist.pList
+ ))){
Fts3Phrase *pPhrase = pExpr->pPhrase;
- assert( pExpr->bDeferred || pPhrase->doclist.bFreeList==0 );
if( pExpr->bDeferred ){
fts3EvalInvalidatePoslist(pPhrase);
}
@@ -183628,7 +187659,7 @@ static int porterNext(
if( n>c->nAllocated ){
char *pNew;
c->nAllocated = n+20;
- pNew = sqlite3_realloc(c->zToken, c->nAllocated);
+ pNew = sqlite3_realloc64(c->zToken, c->nAllocated);
if( !pNew ) return SQLITE_NOMEM;
c->zToken = pNew;
}
@@ -184380,7 +188411,7 @@ static int simpleNext(
if( n>c->nTokenAllocated ){
char *pNew;
c->nTokenAllocated = n+20;
- pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated);
+ pNew = sqlite3_realloc64(c->pToken, c->nTokenAllocated);
if( !pNew ) return SQLITE_NOMEM;
c->pToken = pNew;
}
@@ -185542,7 +189573,7 @@ static int fts3PendingListAppendVarint(
/* Allocate or grow the PendingList as required. */
if( !p ){
- p = sqlite3_malloc(sizeof(*p) + 100);
+ p = sqlite3_malloc64(sizeof(*p) + 100);
if( !p ){
return SQLITE_NOMEM;
}
@@ -185551,14 +189582,14 @@ static int fts3PendingListAppendVarint(
p->nData = 0;
}
else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){
- int nNew = p->nSpace * 2;
- p = sqlite3_realloc(p, sizeof(*p) + nNew);
+ i64 nNew = p->nSpace * 2;
+ p = sqlite3_realloc64(p, sizeof(*p) + nNew);
if( !p ){
sqlite3_free(*pp);
*pp = 0;
return SQLITE_NOMEM;
}
- p->nSpace = nNew;
+ p->nSpace = (int)nNew;
p->aData = (char *)&p[1];
}
@@ -186115,7 +190146,7 @@ SQLITE_PRIVATE int sqlite3Fts3ReadBlock(
int nByte = sqlite3_blob_bytes(p->pSegments);
*pnBlob = nByte;
if( paBlob ){
- char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING);
+ char *aByte = sqlite3_malloc64((i64)nByte + FTS3_NODE_PADDING);
if( !aByte ){
rc = SQLITE_NOMEM;
}else{
@@ -186232,7 +190263,7 @@ static int fts3SegReaderNext(
int nTerm = fts3HashKeysize(pElem);
if( (nTerm+1)>pReader->nTermAlloc ){
sqlite3_free(pReader->zTerm);
- pReader->zTerm = (char*)sqlite3_malloc((nTerm+1)*2);
+ pReader->zTerm = (char*)sqlite3_malloc64(((i64)nTerm+1)*2);
if( !pReader->zTerm ) return SQLITE_NOMEM;
pReader->nTermAlloc = (nTerm+1)*2;
}
@@ -186240,7 +190271,7 @@ static int fts3SegReaderNext(
pReader->zTerm[nTerm] = '\0';
pReader->nTerm = nTerm;
- aCopy = (char*)sqlite3_malloc(nCopy);
+ aCopy = (char*)sqlite3_malloc64(nCopy);
if( !aCopy ) return SQLITE_NOMEM;
memcpy(aCopy, pList->aData, nCopy);
pReader->nNode = pReader->nDoclist = nCopy;
@@ -186527,7 +190558,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderNew(
nExtra = nRoot + FTS3_NODE_PADDING;
}
- pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra);
+ pReader = (Fts3SegReader *)sqlite3_malloc64(sizeof(Fts3SegReader) + nExtra);
if( !pReader ){
return SQLITE_NOMEM;
}
@@ -186619,7 +190650,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderPending(
if( nElem==nAlloc ){
Fts3HashElem **aElem2;
nAlloc += 16;
- aElem2 = (Fts3HashElem **)sqlite3_realloc(
+ aElem2 = (Fts3HashElem **)sqlite3_realloc64(
aElem, nAlloc*sizeof(Fts3HashElem *)
);
if( !aElem2 ){
@@ -186953,7 +190984,7 @@ static int fts3NodeAddTerm(
** this is not expected to be a serious problem.
*/
assert( pTree->aData==(char *)&pTree[1] );
- pTree->aData = (char *)sqlite3_malloc(nReq);
+ pTree->aData = (char *)sqlite3_malloc64(nReq);
if( !pTree->aData ){
return SQLITE_NOMEM;
}
@@ -186971,7 +191002,7 @@ static int fts3NodeAddTerm(
if( isCopyTerm ){
if( pTree->nMalloczMalloc, nTerm*2);
+ char *zNew = sqlite3_realloc64(pTree->zMalloc, (i64)nTerm*2);
if( !zNew ){
return SQLITE_NOMEM;
}
@@ -186997,7 +191028,7 @@ static int fts3NodeAddTerm(
** now. Instead, the term is inserted into the parent of pTree. If pTree
** has no parent, one is created here.
*/
- pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize);
+ pNew = (SegmentNode *)sqlite3_malloc64(sizeof(SegmentNode) + p->nNodeSize);
if( !pNew ){
return SQLITE_NOMEM;
}
@@ -187135,7 +191166,7 @@ static int fts3SegWriterAdd(
){
int nPrefix; /* Size of term prefix in bytes */
int nSuffix; /* Size of term suffix in bytes */
- int nReq; /* Number of bytes required on leaf page */
+ i64 nReq; /* Number of bytes required on leaf page */
int nData;
SegmentWriter *pWriter = *ppWriter;
@@ -187144,13 +191175,13 @@ static int fts3SegWriterAdd(
sqlite3_stmt *pStmt;
/* Allocate the SegmentWriter structure */
- pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter));
+ pWriter = (SegmentWriter *)sqlite3_malloc64(sizeof(SegmentWriter));
if( !pWriter ) return SQLITE_NOMEM;
memset(pWriter, 0, sizeof(SegmentWriter));
*ppWriter = pWriter;
/* Allocate a buffer in which to accumulate data */
- pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize);
+ pWriter->aData = (char *)sqlite3_malloc64(p->nNodeSize);
if( !pWriter->aData ) return SQLITE_NOMEM;
pWriter->nSize = p->nNodeSize;
@@ -187225,7 +191256,7 @@ static int fts3SegWriterAdd(
** the buffer to make it large enough.
*/
if( nReq>pWriter->nSize ){
- char *aNew = sqlite3_realloc(pWriter->aData, nReq);
+ char *aNew = sqlite3_realloc64(pWriter->aData, nReq);
if( !aNew ) return SQLITE_NOMEM;
pWriter->aData = aNew;
pWriter->nSize = nReq;
@@ -187250,7 +191281,7 @@ static int fts3SegWriterAdd(
*/
if( isCopyTerm ){
if( nTerm>pWriter->nMalloc ){
- char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2);
+ char *zNew = sqlite3_realloc64(pWriter->zMalloc, (i64)nTerm*2);
if( !zNew ){
return SQLITE_NOMEM;
}
@@ -187558,12 +191589,12 @@ static void fts3ColumnFilter(
static int fts3MsrBufferData(
Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */
char *pList,
- int nList
+ i64 nList
){
if( nList>pMsr->nBuffer ){
char *pNew;
pMsr->nBuffer = nList*2;
- pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer);
+ pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, pMsr->nBuffer);
if( !pNew ) return SQLITE_NOMEM;
pMsr->aBuffer = pNew;
}
@@ -187619,7 +191650,7 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrNext(
fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp);
if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){
- rc = fts3MsrBufferData(pMsr, pList, nList+1);
+ rc = fts3MsrBufferData(pMsr, pList, (i64)nList+1);
if( rc!=SQLITE_OK ) return rc;
assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 );
pList = pMsr->aBuffer;
@@ -187756,11 +191787,11 @@ SQLITE_PRIVATE int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){
return SQLITE_OK;
}
-static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, int nReq){
+static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, i64 nReq){
if( nReq>pCsr->nBuffer ){
char *aNew;
pCsr->nBuffer = nReq*2;
- aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer);
+ aNew = sqlite3_realloc64(pCsr->aBuffer, pCsr->nBuffer);
if( !aNew ){
return SQLITE_NOMEM;
}
@@ -187851,7 +191882,8 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
){
pCsr->nDoclist = apSegment[0]->nDoclist;
if( fts3SegReaderIsPending(apSegment[0]) ){
- rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist);
+ rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist,
+ (i64)pCsr->nDoclist);
pCsr->aDoclist = pCsr->aBuffer;
}else{
pCsr->aDoclist = apSegment[0]->aDoclist;
@@ -187904,7 +191936,8 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0);
- rc = fts3GrowSegReaderBuffer(pCsr, nByte+nDoclist+FTS3_NODE_PADDING);
+ rc = fts3GrowSegReaderBuffer(pCsr,
+ (i64)nByte+nDoclist+FTS3_NODE_PADDING);
if( rc ) return rc;
if( isFirst ){
@@ -187930,7 +191963,7 @@ SQLITE_PRIVATE int sqlite3Fts3SegReaderStep(
fts3SegReaderSort(apSegment, nMerge, j, xCmp);
}
if( nDoclist>0 ){
- rc = fts3GrowSegReaderBuffer(pCsr, nDoclist+FTS3_NODE_PADDING);
+ rc = fts3GrowSegReaderBuffer(pCsr, (i64)nDoclist+FTS3_NODE_PADDING);
if( rc ) return rc;
memset(&pCsr->aBuffer[nDoclist], 0, FTS3_NODE_PADDING);
pCsr->aDoclist = pCsr->aBuffer;
@@ -188643,7 +192676,7 @@ struct NodeReader {
static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){
if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){
int nAlloc = nMin;
- char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc);
+ char *a = (char *)sqlite3_realloc64(pBlob->a, nAlloc);
if( a ){
pBlob->nAlloc = nAlloc;
pBlob->a = a;
@@ -188792,6 +192825,8 @@ static int fts3IncrmergePush(
pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix);
}
pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix);
+ assert( nPrefix+nSuffix<=nTerm );
+ assert( nPrefix>=0 );
memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix);
pBlk->n += nSuffix;
@@ -188914,6 +192949,7 @@ static int fts3IncrmergeAppend(
pLeaf = &pWriter->aNodeWriter[0];
nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm);
nSuffix = nTerm - nPrefix;
+ if(nSuffix<=0 ) return FTS_CORRUPT_VTAB;
nSpace = sqlite3Fts3VarintLen(nPrefix);
nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix;
@@ -189437,7 +193473,7 @@ static int fts3RepackSegdirLevel(
if( nIdx>=nAlloc ){
int *aNew;
nAlloc += 16;
- aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int));
+ aNew = sqlite3_realloc64(aIdx, nAlloc*sizeof(int));
if( !aNew ){
rc = SQLITE_NOMEM;
break;
@@ -189811,7 +193847,7 @@ SQLITE_PRIVATE int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){
/* Allocate space for the cursor, filter and writer objects */
const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter);
- pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc);
+ pWriter = (IncrmergeWriter *)sqlite3_malloc64(nAlloc);
if( !pWriter ) return SQLITE_NOMEM;
pFilter = (Fts3SegFilter *)&pWriter[1];
pCsr = (Fts3MultiSegReader *)&pFilter[1];
@@ -190447,7 +194483,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferredTokenList(
return SQLITE_OK;
}
- pRet = (char *)sqlite3_malloc(p->pList->nData);
+ pRet = (char *)sqlite3_malloc64(p->pList->nData);
if( !pRet ) return SQLITE_NOMEM;
nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy);
@@ -190467,7 +194503,7 @@ SQLITE_PRIVATE int sqlite3Fts3DeferToken(
int iCol /* Column that token must appear in (or -1) */
){
Fts3DeferredToken *pDeferred;
- pDeferred = sqlite3_malloc(sizeof(*pDeferred));
+ pDeferred = sqlite3_malloc64(sizeof(*pDeferred));
if( !pDeferred ){
return SQLITE_NOMEM;
}
@@ -194871,7 +198907,7 @@ static JsonNode *jsonMergePatch(
if( pPatch->eType!=JSON_OBJECT ){
return pPatch;
}
- assert( iTarget>=0 && iTargetnNode );
+ assert( iTargetnNode );
pTarget = &pParse->aNode[iTarget];
assert( (pPatch->jnFlags & JNODE_APPEND)==0 );
if( pTarget->eType!=JSON_OBJECT ){
@@ -199182,7 +203218,7 @@ static int rtreeUpdate(
rtreeReference(pRtree);
assert(nData>=1);
- cell.iRowid = 0; /* Used only to suppress a compiler warning */
+ memset(&cell, 0, sizeof(cell));
/* Constraint handling. A write operation on an r-tree table may return
** SQLITE_CONSTRAINT for two reasons:
@@ -202046,7 +206082,7 @@ static int geopolyUpdate(
sqlite3_free(p);
nChange = 1;
}
- for(jj=1; jjnAux; jj++){
+ for(jj=1; jjdbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p);
if( p->zState==0 ){
const char *zFile = sqlite3_db_filename(p->dbRbu, "main");
- p->zState = rbuMPrintf(p, "file://%s-vacuum?modeof=%s", zFile, zFile);
+ p->zState = rbuMPrintf(p, "file:///%s-vacuum?modeof=%s", zFile, zFile);
}
}
@@ -207022,32 +211089,7 @@ static void rbuMoveOalFile(sqlite3rbu *p){
}
if( p->rc==SQLITE_OK ){
-#if defined(_WIN32_WCE)
- {
- LPWSTR zWideOal;
- LPWSTR zWideWal;
-
- zWideOal = rbuWinUtf8ToUnicode(zOal);
- if( zWideOal ){
- zWideWal = rbuWinUtf8ToUnicode(zWal);
- if( zWideWal ){
- if( MoveFileW(zWideOal, zWideWal) ){
- p->rc = SQLITE_OK;
- }else{
- p->rc = SQLITE_IOERR;
- }
- sqlite3_free(zWideWal);
- }else{
- p->rc = SQLITE_IOERR_NOMEM;
- }
- sqlite3_free(zWideOal);
- }else{
- p->rc = SQLITE_IOERR_NOMEM;
- }
- }
-#else
- p->rc = rename(zOal, zWal) ? SQLITE_IOERR : SQLITE_OK;
-#endif
+ p->rc = p->xRename(p->pRenameArg, zOal, zWal);
}
if( p->rc!=SQLITE_OK
@@ -207786,6 +211828,7 @@ static sqlite3rbu *openRbuHandle(
/* Create the custom VFS. */
memset(p, 0, sizeof(sqlite3rbu));
+ sqlite3rbu_rename_handler(p, 0, 0);
rbuCreateVfs(p);
/* Open the target, RBU and state databases */
@@ -208177,6 +212220,54 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){
return rc;
}
+/*
+** Default xRename callback for RBU.
+*/
+static int xDefaultRename(void *pArg, const char *zOld, const char *zNew){
+ int rc = SQLITE_OK;
+#if defined(_WIN32_WCE)
+ {
+ LPWSTR zWideOld;
+ LPWSTR zWideNew;
+
+ zWideOld = rbuWinUtf8ToUnicode(zOld);
+ if( zWideOld ){
+ zWideNew = rbuWinUtf8ToUnicode(zNew);
+ if( zWideNew ){
+ if( MoveFileW(zWideOld, zWideNew) ){
+ rc = SQLITE_OK;
+ }else{
+ rc = SQLITE_IOERR;
+ }
+ sqlite3_free(zWideNew);
+ }else{
+ rc = SQLITE_IOERR_NOMEM;
+ }
+ sqlite3_free(zWideOld);
+ }else{
+ rc = SQLITE_IOERR_NOMEM;
+ }
+ }
+#else
+ rc = rename(zOld, zNew) ? SQLITE_IOERR : SQLITE_OK;
+#endif
+ return rc;
+}
+
+SQLITE_API void sqlite3rbu_rename_handler(
+ sqlite3rbu *pRbu,
+ void *pArg,
+ int (*xRename)(void *pArg, const char *zOld, const char *zNew)
+){
+ if( xRename ){
+ pRbu->xRename = xRename;
+ pRbu->pRenameArg = pArg;
+ }else{
+ pRbu->xRename = xDefaultRename;
+ pRbu->pRenameArg = 0;
+ }
+}
+
/**************************************************************************
** Beginning of RBU VFS shim methods. The VFS shim modifies the behaviour
** of a standard VFS in the following ways:
@@ -209285,7 +213376,7 @@ struct StatTable {
*/
static int statConnect(
sqlite3 *db,
- void *pAux __maybe_unused,
+ void *pAux __maybe_unused,
int argc, const char *const*argv,
sqlite3_vtab **ppVtab,
char **pzErr
@@ -210189,7 +214280,7 @@ static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
){
pIdxInfo->orderByConsumed = 1;
}
- sqlite3VtabWriteAll(pIdxInfo);
+ sqlite3VtabUsesAllSchemas(pIdxInfo);
return SQLITE_OK;
}
@@ -210307,12 +214398,18 @@ static int dbpageColumn(
}
case 1: { /* data */
DbPage *pDbPage = 0;
- rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0);
- if( rc==SQLITE_OK ){
- sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage,
- SQLITE_TRANSIENT);
+ if( pCsr->pgno==((PENDING_BYTE/pCsr->szPage)+1) ){
+ /* The pending byte page. Assume it is zeroed out. Attempting to
+ ** request this page from the page is an SQLITE_CORRUPT error. */
+ sqlite3_result_zeroblob(ctx, pCsr->szPage);
+ }else{
+ rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0);
+ if( rc==SQLITE_OK ){
+ sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage,
+ SQLITE_TRANSIENT);
+ }
+ sqlite3PagerUnref(pDbPage);
}
- sqlite3PagerUnref(pDbPage);
break;
}
default: { /* schema */
@@ -210321,7 +214418,7 @@ static int dbpageColumn(
break;
}
}
- return SQLITE_OK;
+ return rc;
}
static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){
@@ -210367,7 +214464,7 @@ static int dbpageUpdate(
goto update_fail;
}
pBt = pTab->db->aDb[iDb].pBt;
- if( pgno<1 || pBt==0 || pgno>(int)sqlite3BtreeLastPage(pBt) ){
+ if( pgno<1 || pBt==0 || pgno>sqlite3BtreeLastPage(pBt) ){
zErr = "bad page number";
goto update_fail;
}
@@ -210381,11 +214478,12 @@ static int dbpageUpdate(
pPager = sqlite3BtreePager(pBt);
rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0);
if( rc==SQLITE_OK ){
- rc = sqlite3PagerWrite(pDbPage);
- if( rc==SQLITE_OK ){
- memcpy(sqlite3PagerGetData(pDbPage),
- sqlite3_value_blob(argv[3]),
- szPage);
+ const void *pData = sqlite3_value_blob(argv[3]);
+ assert( pData!=0 || pTab->db->mallocFailed );
+ if( pData
+ && (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK
+ ){
+ memcpy(sqlite3PagerGetData(pDbPage), pData, szPage);
}
}
sqlite3PagerUnref(pDbPage);
@@ -210405,11 +214503,12 @@ static int dbpageBegin(sqlite3_vtab *pVtab){
DbpageTable *pTab = (DbpageTable *)pVtab;
sqlite3 *db = pTab->db;
int i;
- for(i=0; inDb; i++){
+ int rc = SQLITE_OK;
+ for(i=0; rc==SQLITE_OK && inDb; i++){
Btree *pBt = db->aDb[i].pBt;
- if( pBt ) sqlite3BtreeBeginTrans(pBt, 1, 0);
+ if( pBt ) rc = sqlite3BtreeBeginTrans(pBt, 1, 0);
}
- return SQLITE_OK;
+ return rc;
}
@@ -213780,6 +217879,22 @@ static int sessionChangesetNextOne(
if( p->op==SQLITE_INSERT ) p->op = SQLITE_DELETE;
else if( p->op==SQLITE_DELETE ) p->op = SQLITE_INSERT;
}
+
+ /* If this is an UPDATE that is part of a changeset, then check that
+ ** there are no fields in the old.* record that are not (a) PK fields,
+ ** or (b) also present in the new.* record.
+ **
+ ** Such records are technically corrupt, but the rebaser was at one
+ ** point generating them. Under most circumstances this is benign, but
+ ** can cause spurious SQLITE_RANGE errors when applying the changeset. */
+ if( p->bPatchset==0 && p->op==SQLITE_UPDATE){
+ for(i=0; inCol; i++){
+ if( p->abPK[i]==0 && p->apValue[i+p->nCol]==0 ){
+ sqlite3ValueFree(p->apValue[i]);
+ p->apValue[i] = 0;
+ }
+ }
+ }
}
return SQLITE_ROW;
@@ -215976,7 +220091,7 @@ static void sessionAppendPartialUpdate(
if( !pIter->abPK[i] && a1[0] ) bData = 1;
memcpy(pOut, a1, n1);
pOut += n1;
- }else if( a2[0]!=0xFF ){
+ }else if( a2[0]!=0xFF && a1[0] ){
bData = 1;
memcpy(pOut, a2, n2);
pOut += n2;
@@ -217133,7 +221248,7 @@ static void sqlite3Fts5BufferAppendPrintf(int *, Fts5Buffer*, char *zFmt, ...);
static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...);
#define fts5BufferZero(x) sqlite3Fts5BufferZero(x)
-#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,c)
+#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,(i64)c)
#define fts5BufferFree(a) sqlite3Fts5BufferFree(a)
#define fts5BufferAppendBlob(a,b,c,d) sqlite3Fts5BufferAppendBlob(a,b,c,d)
#define fts5BufferSet(a,b,c,d) sqlite3Fts5BufferSet(a,b,c,d)
@@ -222917,6 +227032,9 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset(
}else{
if( pRet->nPhrase>0 ){
Fts5ExprPhrase *pLast = pRet->apPhrase[pRet->nPhrase-1];
+ assert( pParse!=0 );
+ assert( pParse->apPhrase!=0 );
+ assert( pParse->nPhrase>=2 );
assert( pLast==pParse->apPhrase[pParse->nPhrase-2] );
if( pPhrase->nTerm==0 ){
fts5ExprPhraseFree(pPhrase);
@@ -225216,7 +229334,7 @@ struct Fts5Index {
sqlite3_stmt *pWriter; /* "INSERT ... %_data VALUES(?,?)" */
sqlite3_stmt *pDeleter; /* "DELETE FROM %_data ... id>=? AND id<=?" */
sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */
- sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=? */
+ sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */
sqlite3_stmt *pIdxSelect;
int nRead; /* Total number of blocks read */
@@ -229007,7 +233125,9 @@ static void fts5WriteAppendRowid(
fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid);
}else{
assert_nc( p->rc || iRowid>pWriter->iPrevRowid );
- fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid - pWriter->iPrevRowid);
+ fts5BufferAppendVarint(&p->rc, &pPage->buf,
+ (u64)iRowid - (u64)pWriter->iPrevRowid
+ );
}
pWriter->iPrevRowid = iRowid;
pWriter->bFirstRowidInDoclist = 0;
@@ -229771,7 +233891,7 @@ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){
static void fts5AppendRowid(
Fts5Index *p,
- i64 iDelta,
+ u64 iDelta,
Fts5Iter *pUnused,
Fts5Buffer *pBuf
){
@@ -229781,7 +233901,7 @@ static void fts5AppendRowid(
static void fts5AppendPoslist(
Fts5Index *p,
- i64 iDelta,
+ u64 iDelta,
Fts5Iter *pMulti,
Fts5Buffer *pBuf
){
@@ -229856,10 +233976,10 @@ static void fts5MergeAppendDocid(
}
#endif
-#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \
- assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \
- fts5BufferSafeAppendVarint((pBuf), (iRowid) - (iLastRowid)); \
- (iLastRowid) = (iRowid); \
+#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \
+ assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \
+ fts5BufferSafeAppendVarint((pBuf), (u64)(iRowid) - (u64)(iLastRowid)); \
+ (iLastRowid) = (iRowid); \
}
/*
@@ -230130,7 +234250,7 @@ static void fts5SetupPrefixIter(
int nMerge = 1;
void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*);
- void (*xAppend)(Fts5Index*, i64, Fts5Iter*, Fts5Buffer*);
+ void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*);
if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){
xMerge = fts5MergeRowidLists;
xAppend = fts5AppendRowid;
@@ -230169,7 +234289,7 @@ static void fts5SetupPrefixIter(
Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ];
p1->xSetOutputs(p1, pSeg);
if( p1->base.nData ){
- xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist);
+ xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist);
iLastRowid = p1->base.iRowid;
}
}
@@ -230217,7 +234337,7 @@ static void fts5SetupPrefixIter(
iLastRowid = 0;
}
- xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist);
+ xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist);
iLastRowid = p1->base.iRowid;
}
@@ -231196,6 +235316,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum
/* If this is a new term, query for it. Update cksum3 with the results. */
fts5TestTerm(p, &term, z, n, cksum2, &cksum3);
+ if( p->rc ) break;
if( eDetail==FTS5_DETAIL_NONE ){
if( 0==fts5MultiIterIsEmpty(p, pIter) ){
@@ -232000,7 +236121,7 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){
break;
case FTS5_SYNC:
- assert( p->ts.eState==1 );
+ assert( p->ts.eState==1 || p->ts.eState==2 );
p->ts.eState = 2;
break;
@@ -232015,21 +236136,21 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){
break;
case FTS5_SAVEPOINT:
- assert( p->ts.eState==1 );
+ assert( p->ts.eState>=1 );
assert( iSavepoint>=0 );
assert( iSavepoint>=p->ts.iSavepoint );
p->ts.iSavepoint = iSavepoint;
break;
case FTS5_RELEASE:
- assert( p->ts.eState==1 );
+ assert( p->ts.eState>=1 );
assert( iSavepoint>=0 );
assert( iSavepoint<=p->ts.iSavepoint );
p->ts.iSavepoint = iSavepoint-1;
break;
case FTS5_ROLLBACKTO:
- assert( p->ts.eState==1 );
+ assert( p->ts.eState>=1 );
assert( iSavepoint>=-1 );
/* The following assert() can fail if another vtab strikes an error
** within an xSavepoint() call then SQLite calls xRollbackTo() - without
@@ -233365,7 +237486,7 @@ static int fts5UpdateMethod(
int rc = SQLITE_OK; /* Return code */
/* A transaction must be open when this is called. */
- assert( pTab->ts.eState==1 );
+ assert( pTab->ts.eState==1 || pTab->ts.eState==2 );
assert( pVtab->zErrMsg==0 );
assert( nArg==1 || nArg==(2+pConfig->nCol+2) );
@@ -234533,7 +238654,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2022-05-06 15:25:27 78d9c993d404cdfaa7fdd2973fa1052e3da9f66215cff9c5540ebe55c407d9fe", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2022-12-28 14:03:47 df5c253c0b3dd24916e4ec7cf77d3db5294cc9fd45ae7b9c5e82ad8197f38a24", -1, SQLITE_TRANSIENT);
}
/*
@@ -239204,6 +243325,16 @@ SQLITE_EXTENSION_INIT1
#ifndef SQLITE_OMIT_VIRTUALTABLE
+
+#define STMT_NUM_INTEGER_COLUMN 10
+typedef struct StmtRow StmtRow;
+struct StmtRow {
+ sqlite3_int64 iRowid; /* Rowid value */
+ char *zSql; /* column "sql" */
+ int aCol[STMT_NUM_INTEGER_COLUMN+1]; /* all other column values */
+ StmtRow *pNext; /* Next row to return */
+};
+
/* stmt_vtab is a subclass of sqlite3_vtab which will
** serve as the underlying representation of a stmt virtual table
*/
@@ -239221,8 +243352,7 @@ typedef struct stmt_cursor stmt_cursor;
struct stmt_cursor {
sqlite3_vtab_cursor base; /* Base class - must be first */
sqlite3 *db; /* Database connection for this cursor */
- sqlite3_stmt *pStmt; /* Statement cursor is currently pointing at */
- sqlite3_int64 iRowid; /* The rowid */
+ StmtRow *pRow; /* Current row */
};
/*
@@ -239266,7 +243396,7 @@ static int stmtConnect(
"CREATE TABLE x(sql,ncol,ro,busy,nscan,nsort,naidx,nstep,"
"reprep,run,mem)");
if( rc==SQLITE_OK ){
- pNew = sqlite3_malloc( sizeof(*pNew) );
+ pNew = sqlite3_malloc64( sizeof(*pNew) );
*ppVtab = (sqlite3_vtab*)pNew;
if( pNew==0 ) return SQLITE_NOMEM;
memset(pNew, 0, sizeof(*pNew));
@@ -239288,7 +243418,7 @@ static int stmtDisconnect(sqlite3_vtab *pVtab){
*/
static int stmtOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
stmt_cursor *pCur;
- pCur = sqlite3_malloc( sizeof(*pCur) );
+ pCur = sqlite3_malloc64( sizeof(*pCur) );
if( pCur==0 ) return SQLITE_NOMEM;
memset(pCur, 0, sizeof(*pCur));
pCur->db = ((stmt_vtab*)p)->db;
@@ -239296,10 +243426,21 @@ static int stmtOpen(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){
return SQLITE_OK;
}
+static void stmtCsrReset(stmt_cursor *pCur){
+ StmtRow *pRow = 0;
+ StmtRow *pNext = 0;
+ for(pRow=pCur->pRow; pRow; pRow=pNext){
+ pNext = pRow->pNext;
+ sqlite3_free(pRow);
+ }
+ pCur->pRow = 0;
+}
+
/*
** Destructor for a stmt_cursor.
*/
static int stmtClose(sqlite3_vtab_cursor *cur){
+ stmtCsrReset((stmt_cursor*)cur);
sqlite3_free(cur);
return SQLITE_OK;
}
@@ -239310,8 +243451,9 @@ static int stmtClose(sqlite3_vtab_cursor *cur){
*/
static int stmtNext(sqlite3_vtab_cursor *cur){
stmt_cursor *pCur = (stmt_cursor*)cur;
- pCur->iRowid++;
- pCur->pStmt = sqlite3_next_stmt(pCur->db, pCur->pStmt);
+ StmtRow *pNext = pCur->pRow->pNext;
+ sqlite3_free(pCur->pRow);
+ pCur->pRow = pNext;
return SQLITE_OK;
}
@@ -239325,39 +243467,11 @@ static int stmtColumn(
int i /* Which column to return */
){
stmt_cursor *pCur = (stmt_cursor*)cur;
- switch( i ){
- case STMT_COLUMN_SQL: {
- sqlite3_result_text(ctx, sqlite3_sql(pCur->pStmt), -1, SQLITE_TRANSIENT);
- break;
- }
- case STMT_COLUMN_NCOL: {
- sqlite3_result_int(ctx, sqlite3_column_count(pCur->pStmt));
- break;
- }
- case STMT_COLUMN_RO: {
- sqlite3_result_int(ctx, sqlite3_stmt_readonly(pCur->pStmt));
- break;
- }
- case STMT_COLUMN_BUSY: {
- sqlite3_result_int(ctx, sqlite3_stmt_busy(pCur->pStmt));
- break;
- }
- default: {
- assert( i==STMT_COLUMN_MEM );
- i = SQLITE_STMTSTATUS_MEMUSED +
- STMT_COLUMN_NSCAN - SQLITE_STMTSTATUS_FULLSCAN_STEP;
- /* Fall thru */
- }
- case STMT_COLUMN_NSCAN:
- case STMT_COLUMN_NSORT:
- case STMT_COLUMN_NAIDX:
- case STMT_COLUMN_NSTEP:
- case STMT_COLUMN_REPREP:
- case STMT_COLUMN_RUN: {
- sqlite3_result_int(ctx, sqlite3_stmt_status(pCur->pStmt,
- i-STMT_COLUMN_NSCAN+SQLITE_STMTSTATUS_FULLSCAN_STEP, 0));
- break;
- }
+ StmtRow *pRow = pCur->pRow;
+ if( i==STMT_COLUMN_SQL ){
+ sqlite3_result_text(ctx, pRow->zSql, -1, SQLITE_TRANSIENT);
+ }else{
+ sqlite3_result_int(ctx, pRow->aCol[i]);
}
return SQLITE_OK;
}
@@ -239368,7 +243482,7 @@ static int stmtColumn(
*/
static int stmtRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
stmt_cursor *pCur = (stmt_cursor*)cur;
- *pRowid = pCur->iRowid;
+ *pRowid = pCur->pRow->iRowid;
return SQLITE_OK;
}
@@ -239378,7 +243492,7 @@ static int stmtRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){
*/
static int stmtEof(sqlite3_vtab_cursor *cur){
stmt_cursor *pCur = (stmt_cursor*)cur;
- return pCur->pStmt==0;
+ return pCur->pRow==0;
}
/*
@@ -239393,9 +243507,53 @@ static int stmtFilter(
int argc, sqlite3_value **argv
){
stmt_cursor *pCur = (stmt_cursor *)pVtabCursor;
- pCur->pStmt = 0;
- pCur->iRowid = 0;
- return stmtNext(pVtabCursor);
+ sqlite3_stmt *p = 0;
+ sqlite3_int64 iRowid = 1;
+ StmtRow **ppRow = 0;
+
+ stmtCsrReset(pCur);
+ ppRow = &pCur->pRow;
+ for(p=sqlite3_next_stmt(pCur->db, 0); p; p=sqlite3_next_stmt(pCur->db, p)){
+ const char *zSql = sqlite3_sql(p);
+ sqlite3_int64 nSql = zSql ? strlen(zSql)+1 : 0;
+ StmtRow *pNew = (StmtRow*)sqlite3_malloc64(sizeof(StmtRow) + nSql);
+
+ if( pNew==0 ) return SQLITE_NOMEM;
+ memset(pNew, 0, sizeof(StmtRow));
+ if( zSql ){
+ pNew->zSql = (char*)&pNew[1];
+ memcpy(pNew->zSql, zSql, nSql);
+ }
+ pNew->aCol[STMT_COLUMN_NCOL] = sqlite3_column_count(p);
+ pNew->aCol[STMT_COLUMN_RO] = sqlite3_stmt_readonly(p);
+ pNew->aCol[STMT_COLUMN_BUSY] = sqlite3_stmt_busy(p);
+ pNew->aCol[STMT_COLUMN_NSCAN] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_FULLSCAN_STEP, 0
+ );
+ pNew->aCol[STMT_COLUMN_NSORT] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_SORT, 0
+ );
+ pNew->aCol[STMT_COLUMN_NAIDX] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_AUTOINDEX, 0
+ );
+ pNew->aCol[STMT_COLUMN_NSTEP] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_VM_STEP, 0
+ );
+ pNew->aCol[STMT_COLUMN_REPREP] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_REPREPARE, 0
+ );
+ pNew->aCol[STMT_COLUMN_RUN] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_RUN, 0
+ );
+ pNew->aCol[STMT_COLUMN_MEM] = sqlite3_stmt_status(
+ p, SQLITE_STMTSTATUS_MEMUSED, 0
+ );
+ pNew->iRowid = iRowid++;
+ *ppRow = pNew;
+ ppRow = &pNew->pNext;
+ }
+
+ return SQLITE_OK;
}
/*
diff --git a/database/sqlite/sqlite3.h b/database/sqlite/sqlite3.h
index de393da9d..24b916750 100644
--- a/database/sqlite/sqlite3.h
+++ b/database/sqlite/sqlite3.h
@@ -146,9 +146,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.38.5"
-#define SQLITE_VERSION_NUMBER 3038005
-#define SQLITE_SOURCE_ID "2022-05-06 15:25:27 78d9c993d404cdfaa7fdd2973fa1052e3da9f66215cff9c5540ebe55c407d9fe"
+#define SQLITE_VERSION "3.40.1"
+#define SQLITE_VERSION_NUMBER 3040001
+#define SQLITE_SOURCE_ID "2022-12-28 14:03:47 df5c253c0b3dd24916e4ec7cf77d3db5294cc9fd45ae7b9c5e82ad8197f38a24"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -670,13 +670,17 @@ SQLITE_API int sqlite3_exec(
**
** SQLite uses one of these integer values as the second
** argument to calls it makes to the xLock() and xUnlock() methods
-** of an [sqlite3_io_methods] object.
+** of an [sqlite3_io_methods] object. These values are ordered from
+** lest restrictive to most restrictive.
+**
+** The argument to xLock() is always SHARED or higher. The argument to
+** xUnlock is either SHARED or NONE.
*/
-#define SQLITE_LOCK_NONE 0
-#define SQLITE_LOCK_SHARED 1
-#define SQLITE_LOCK_RESERVED 2
-#define SQLITE_LOCK_PENDING 3
-#define SQLITE_LOCK_EXCLUSIVE 4
+#define SQLITE_LOCK_NONE 0 /* xUnlock() only */
+#define SQLITE_LOCK_SHARED 1 /* xLock() or xUnlock() */
+#define SQLITE_LOCK_RESERVED 2 /* xLock() only */
+#define SQLITE_LOCK_PENDING 3 /* xLock() only */
+#define SQLITE_LOCK_EXCLUSIVE 4 /* xLock() only */
/*
** CAPI3REF: Synchronization Type Flags
@@ -754,7 +758,14 @@ struct sqlite3_file {
** [SQLITE_LOCK_PENDING], or
** [SQLITE_LOCK_EXCLUSIVE].
**
-** xLock() increases the lock. xUnlock() decreases the lock.
+** xLock() upgrades the database file lock. In other words, xLock() moves the
+** database file lock in the direction NONE toward EXCLUSIVE. The argument to
+** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never
+** SQLITE_LOCK_NONE. If the database file lock is already at or above the
+** requested lock, then the call to xLock() is a no-op.
+** xUnlock() downgrades the database file lock to either SHARED or NONE.
+* If the lock is already at or below the requested lock state, then the call
+** to xUnlock() is a no-op.
** The xCheckReservedLock() method checks whether any database connection,
** either in this process or in some other process, is holding a RESERVED,
** PENDING, or EXCLUSIVE lock on the file. It returns true
@@ -859,9 +870,8 @@ struct sqlite3_io_methods {
** opcode causes the xFileControl method to write the current state of
** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED],
** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE])
-** into an integer that the pArg argument points to. This capability
-** is used during testing and is only available when the SQLITE_TEST
-** compile-time option is used.
+** into an integer that the pArg argument points to.
+** This capability is only available if SQLite is compiled with [SQLITE_DEBUG].
**
** [[SQLITE_FCNTL_SIZE_HINT]]
** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS
@@ -1182,6 +1192,12 @@ struct sqlite3_io_methods {
**
** [[SQLITE_FCNTL_CKSM_FILE]]
** Used by the cksmvfs VFS module only.
+**
+** [[SQLITE_FCNTL_RESET_CACHE]]
+** If there is currently no transaction open on the database, and the
+** database is not a temp db, then this file-control purges the contents
+** of the in-memory page cache. If there is an open transaction, or if
+** the db is a temp-db, it is a no-op, not an error.
**
*/
#define SQLITE_FCNTL_LOCKSTATE 1
@@ -1224,6 +1240,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_CKPT_START 39
#define SQLITE_FCNTL_EXTERNAL_READER 40
#define SQLITE_FCNTL_CKSM_FILE 41
+#define SQLITE_FCNTL_RESET_CACHE 42
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@@ -1253,6 +1270,26 @@ typedef struct sqlite3_mutex sqlite3_mutex;
*/
typedef struct sqlite3_api_routines sqlite3_api_routines;
+/*
+** CAPI3REF: File Name
+**
+** Type [sqlite3_filename] is used by SQLite to pass filenames to the
+** xOpen method of a [VFS]. It may be cast to (const char*) and treated
+** as a normal, nul-terminated, UTF-8 buffer containing the filename, but
+** may also be passed to special APIs such as:
+**
+**
+** sqlite3_filename_database()
+** sqlite3_filename_journal()
+** sqlite3_filename_wal()
+** sqlite3_uri_parameter()
+** sqlite3_uri_boolean()
+** sqlite3_uri_int64()
+** sqlite3_uri_key()
+**
+*/
+typedef const char *sqlite3_filename;
+
/*
** CAPI3REF: OS Interface Object
**
@@ -1431,7 +1468,7 @@ struct sqlite3_vfs {
sqlite3_vfs *pNext; /* Next registered VFS */
const char *zName; /* Name of this virtual file system */
void *pAppData; /* Pointer to application-specific data */
- int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*,
+ int (*xOpen)(sqlite3_vfs*, sqlite3_filename zName, sqlite3_file*,
int flags, int *pOutFlags);
int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir);
int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut);
@@ -2309,6 +2346,7 @@ struct sqlite3_mem_methods {
**
** The [PRAGMA writable_schema=ON] statement.
** The [PRAGMA journal_mode=OFF] statement.
+** The [PRAGMA schema_version=N] statement.
** Writes to the [sqlite_dbpage] virtual table.
** Direct writes to [shadow tables].
**
@@ -3424,6 +3462,9 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
** The database is opened [shared cache] enabled, overriding
** the default shared cache setting provided by
** [sqlite3_enable_shared_cache()].)^
+** The [use of shared cache mode is discouraged] and hence shared cache
+** capabilities may be omitted from many builds of SQLite. In such cases,
+** this option is a no-op.
**
** ^( [SQLITE_OPEN_PRIVATECACHE]
** The database is opened [shared cache] disabled, overriding
@@ -3439,7 +3480,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
** to return an extended result code.
**
** [[OPEN_NOFOLLOW]] ^([SQLITE_OPEN_NOFOLLOW]
-** The database filename is not allowed to be a symbolic link
+** The database filename is not allowed to contain a symbolic link
** )^
**
** If the 3rd parameter to sqlite3_open_v2() is not one of the
@@ -3698,10 +3739,10 @@ SQLITE_API int sqlite3_open_v2(
**
** See the [URI filename] documentation for additional information.
*/
-SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam);
-SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault);
-SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64);
-SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N);
+SQLITE_API const char *sqlite3_uri_parameter(sqlite3_filename z, const char *zParam);
+SQLITE_API int sqlite3_uri_boolean(sqlite3_filename z, const char *zParam, int bDefault);
+SQLITE_API sqlite3_int64 sqlite3_uri_int64(sqlite3_filename, const char*, sqlite3_int64);
+SQLITE_API const char *sqlite3_uri_key(sqlite3_filename z, int N);
/*
** CAPI3REF: Translate filenames
@@ -3730,9 +3771,9 @@ SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N);
** return value from [sqlite3_db_filename()], then the result is
** undefined and is likely a memory access violation.
*/
-SQLITE_API const char *sqlite3_filename_database(const char*);
-SQLITE_API const char *sqlite3_filename_journal(const char*);
-SQLITE_API const char *sqlite3_filename_wal(const char*);
+SQLITE_API const char *sqlite3_filename_database(sqlite3_filename);
+SQLITE_API const char *sqlite3_filename_journal(sqlite3_filename);
+SQLITE_API const char *sqlite3_filename_wal(sqlite3_filename);
/*
** CAPI3REF: Database File Corresponding To A Journal
@@ -3798,14 +3839,14 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
** then the corresponding [sqlite3_module.xClose() method should also be
** invoked prior to calling sqlite3_free_filename(Y).
*/
-SQLITE_API char *sqlite3_create_filename(
+SQLITE_API sqlite3_filename sqlite3_create_filename(
const char *zDatabase,
const char *zJournal,
const char *zWal,
int nParam,
const char **azParam
);
-SQLITE_API void sqlite3_free_filename(char*);
+SQLITE_API void sqlite3_free_filename(sqlite3_filename);
/*
** CAPI3REF: Error Codes And Messages
@@ -5508,6 +5549,16 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
** then the conversion is performed. Otherwise no conversion occurs.
** The [SQLITE_INTEGER | datatype] after conversion is returned.)^
**
+** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8],
+** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding
+** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X)
+** returns something other than SQLITE_TEXT, then the return value from
+** sqlite3_value_encoding(X) is meaningless. ^Calls to
+** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X),
+** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or
+** sqlite3_value_bytes16(X) might change the encoding of the value X and
+** thus change the return from subsequent calls to sqlite3_value_encoding(X).
+**
** ^Within the [xUpdate] method of a [virtual table], the
** sqlite3_value_nochange(X) interface returns true if and only if
** the column corresponding to X is unchanged by the UPDATE operation
@@ -5572,6 +5623,7 @@ SQLITE_API int sqlite3_value_type(sqlite3_value*);
SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
+SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
/*
** CAPI3REF: Finding The Subtype Of SQL Values
@@ -5593,7 +5645,8 @@ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
** object D and returns a pointer to that copy. ^The [sqlite3_value] returned
** is a [protected sqlite3_value] object even if the input is not.
** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a
-** memory allocation fails.
+** memory allocation fails. ^If V is a [pointer value], then the result
+** of sqlite3_value_dup(V) is a NULL value.
**
** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object
** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer
@@ -5624,7 +5677,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*);
**
** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer
** when first called if N is less than or equal to zero or if a memory
-** allocate error occurs.
+** allocation error occurs.
**
** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is
** determined by the N parameter on first successful call. Changing the
@@ -5829,9 +5882,10 @@ typedef void (*sqlite3_destructor_type)(void*);
** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE].
** ^SQLite takes the text result from the application from
** the 2nd parameter of the sqlite3_result_text* interfaces.
-** ^If the 3rd parameter to the sqlite3_result_text* interfaces
-** is negative, then SQLite takes result text from the 2nd parameter
-** through the first zero character.
+** ^If the 3rd parameter to any of the sqlite3_result_text* interfaces
+** other than sqlite3_result_text64() is negative, then SQLite computes
+** the string length itself by searching the 2nd parameter for the first
+** zero character.
** ^If the 3rd parameter to the sqlite3_result_text* interfaces
** is non-negative, then as many bytes (not characters) of the text
** pointed to by the 2nd parameter are taken as the application-defined
@@ -6275,6 +6329,28 @@ SQLITE_API int sqlite3_get_autocommit(sqlite3*);
*/
SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
+/*
+** CAPI3REF: Return The Schema Name For A Database Connection
+** METHOD: sqlite3
+**
+** ^The sqlite3_db_name(D,N) interface returns a pointer to the schema name
+** for the N-th database on database connection D, or a NULL pointer of N is
+** out of range. An N value of 0 means the main database file. An N of 1 is
+** the "temp" schema. Larger values of N correspond to various ATTACH-ed
+** databases.
+**
+** Space to hold the string that is returned by sqlite3_db_name() is managed
+** by SQLite itself. The string might be deallocated by any operation that
+** changes the schema, including [ATTACH] or [DETACH] or calls to
+** [sqlite3_serialize()] or [sqlite3_deserialize()], even operations that
+** occur on a different thread. Applications that need to
+** remember the string long-term should make their own copy. Applications that
+** are accessing the same database connection simultaneously on multiple
+** threads should mutex-protect calls to this API and should make their own
+** private copy of the result prior to releasing the mutex.
+*/
+SQLITE_API const char *sqlite3_db_name(sqlite3 *db, int N);
+
/*
** CAPI3REF: Return The Filename For A Database Connection
** METHOD: sqlite3
@@ -6305,7 +6381,7 @@ SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*);
** [sqlite3_filename_wal()]
**
*/
-SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName);
+SQLITE_API sqlite3_filename sqlite3_db_filename(sqlite3 *db, const char *zDbName);
/*
** CAPI3REF: Determine if a database is read-only
@@ -6442,7 +6518,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** function C that is invoked prior to each autovacuum of the database
** file. ^The callback is passed a copy of the generic data pointer (P),
** the schema-name of the attached database that is being autovacuumed,
-** the the size of the database file in pages, the number of free pages,
+** the size of the database file in pages, the number of free pages,
** and the number of bytes per page, respectively. The callback should
** return the number of free pages that should be removed by the
** autovacuum. ^If the callback returns zero, then no autovacuum happens.
@@ -6563,6 +6639,11 @@ SQLITE_API void *sqlite3_update_hook(
** to the same database. Sharing is enabled if the argument is true
** and disabled if the argument is false.)^
**
+** This interface is omitted if SQLite is compiled with
+** [-DSQLITE_OMIT_SHARED_CACHE]. The [-DSQLITE_OMIT_SHARED_CACHE]
+** compile-time option is recommended because the
+** [use of shared cache mode is discouraged].
+**
** ^Cache sharing is enabled and disabled for an entire process.
** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]).
** In prior versions of SQLite,
@@ -6661,7 +6742,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*);
** ^The soft heap limit may not be greater than the hard heap limit.
** ^If the hard heap limit is enabled and if sqlite3_soft_heap_limit(N)
** is invoked with a value of N that is greater than the hard heap limit,
-** the the soft heap limit is set to the value of the hard heap limit.
+** the soft heap limit is set to the value of the hard heap limit.
** ^The soft heap limit is automatically enabled whenever the hard heap
** limit is enabled. ^When sqlite3_hard_heap_limit64(N) is invoked and
** the soft heap limit is outside the range of 1..N, then the soft heap
@@ -8956,7 +9037,7 @@ typedef struct sqlite3_backup sqlite3_backup;
** if the application incorrectly accesses the destination [database connection]
** and so no error code is reported, but the operations may malfunction
** nevertheless. Use of the destination database connection while a
-** backup is in progress might also also cause a mutex deadlock.
+** backup is in progress might also cause a mutex deadlock.
**
** If running in [shared cache mode], the application must
** guarantee that the shared cache used by the destination database
@@ -9384,7 +9465,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2(
*/
#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */
#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */
-#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */
+#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for readers */
#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */
/*
@@ -9554,8 +9635,8 @@ SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_
** of a [virtual table] implementation. The result of calling this
** interface from outside of xBestIndex() is undefined and probably harmful.
**
-** ^The sqlite3_vtab_distinct() interface returns an integer that is
-** either 0, 1, or 2. The integer returned by sqlite3_vtab_distinct()
+** ^The sqlite3_vtab_distinct() interface returns an integer between 0 and
+** 3. The integer returned by sqlite3_vtab_distinct()
** gives the virtual table additional information about how the query
** planner wants the output to be ordered. As long as the virtual table
** can meet the ordering requirements of the query planner, it may set
@@ -9587,6 +9668,13 @@ SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_
** that have the same value for all columns identified by "aOrderBy".
** ^However omitting the extra rows is optional.
** This mode is used for a DISTINCT query.
+**
+** ^(If the sqlite3_vtab_distinct() interface returns 3, that means
+** that the query planner needs only distinct rows but it does need the
+** rows to be sorted.)^ ^The virtual table implementation is free to omit
+** rows that are identical in all aOrderBy columns, if it wants to, but
+** it is not required to omit any rows. This mode is used for queries
+** that have both DISTINCT and ORDER BY clauses.
**
**
** ^For the purposes of comparing virtual table output values to see if the
diff --git a/database/sqlite/sqlite_aclk.c b/database/sqlite/sqlite_aclk.c
index 7e3a9b2eb..3b0c40522 100644
--- a/database/sqlite/sqlite_aclk.c
+++ b/database/sqlite/sqlite_aclk.c
@@ -10,10 +10,140 @@ void sanity_check(void) {
BUILD_BUG_ON(WORKER_UTILIZATION_MAX_JOB_TYPES < ACLK_MAX_ENUMERATIONS_DEFINED);
}
-const char *aclk_sync_config[] = {
+static int sql_check_aclk_table(void *data, int argc, char **argv, char **column)
+{
+ struct aclk_database_worker_config *wc = data;
+ UNUSED(argc);
+ UNUSED(column);
- NULL,
-};
+ debug(D_ACLK_SYNC,"Scheduling aclk sync table check for node %s", (char *) argv[0]);
+ struct aclk_database_cmd cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = ACLK_DATABASE_DELETE_HOST;
+ cmd.data = strdupz((char *) argv[0]);
+ aclk_database_enq_cmd_noblock(wc, &cmd);
+ return 0;
+}
+
+#define SQL_SELECT_ACLK_ACTIVE_LIST "SELECT REPLACE(SUBSTR(name,19),'_','-') FROM sqlite_schema " \
+ "WHERE name LIKE 'aclk_chart_latest_%' AND type IN ('table');"
+
+static void sql_check_aclk_table_list(struct aclk_database_worker_config *wc)
+{
+ char *err_msg = NULL;
+ debug(D_ACLK_SYNC,"Cleaning tables for nodes that do not exist");
+ int rc = sqlite3_exec_monitored(db_meta, SQL_SELECT_ACLK_ACTIVE_LIST, sql_check_aclk_table, (void *) wc, &err_msg);
+ if (rc != SQLITE_OK) {
+ error_report("Query failed when trying to check for obsolete ACLK sync tables, %s", err_msg);
+ sqlite3_free(err_msg);
+ }
+}
+
+static void sql_maint_aclk_sync_database(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd)
+{
+ UNUSED(cmd);
+
+ debug(D_ACLK, "Checking database for %s", wc->host_guid);
+
+ BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE, &netdata_buffers_statistics.buffers_sqlite);
+
+ buffer_sprintf(sql,"DELETE FROM aclk_alert_%s WHERE date_submitted IS NOT NULL AND "
+ "CAST(date_cloud_ack AS INT) < unixepoch()-%d;", wc->uuid_str, ACLK_DELETE_ACK_ALERTS_INTERNAL);
+ db_execute(buffer_tostring(sql));
+
+ buffer_free(sql);
+}
+
+
+#define SQL_SELECT_HOST_BY_UUID "SELECT host_id FROM host WHERE host_id = @host_id;"
+
+static int is_host_available(uuid_t *host_id)
+{
+ sqlite3_stmt *res = NULL;
+ int rc;
+
+ if (unlikely(!db_meta)) {
+ if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ error_report("Database has not been initialized");
+ return 1;
+ }
+
+ rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_HOST_BY_UUID, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to select node instance information for a node");
+ return 1;
+ }
+
+ rc = sqlite3_bind_blob(res, 1, host_id, sizeof(*host_id), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind host_id parameter to select node instance information");
+ goto failed;
+ }
+ rc = sqlite3_step_monitored(res);
+
+failed:
+ if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement when checking host existence");
+
+ return (rc == SQLITE_ROW);
+}
+
+// OPCODE: ACLK_DATABASE_DELETE_HOST
+void sql_delete_aclk_table_list(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd)
+{
+ UNUSED(wc);
+ char uuid_str[GUID_LEN + 1];
+ char host_str[GUID_LEN + 1];
+
+ int rc;
+ uuid_t host_uuid;
+ char *host_guid = (char *)cmd.data;
+
+ if (unlikely(!host_guid))
+ return;
+
+ rc = uuid_parse(host_guid, host_uuid);
+ freez(host_guid);
+ if (rc)
+ return;
+
+ uuid_unparse_lower(host_uuid, host_str);
+ uuid_unparse_lower_fix(&host_uuid, uuid_str);
+
+ debug(D_ACLK_SYNC, "Checking if I should delete aclk tables for node %s", host_str);
+
+ if (is_host_available(&host_uuid)) {
+ debug(D_ACLK_SYNC, "Host %s exists, not deleting aclk sync tables", host_str);
+ return;
+ }
+
+ debug(D_ACLK_SYNC, "Host %s does NOT exist, can delete aclk sync tables", host_str);
+
+ sqlite3_stmt *res = NULL;
+ BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE, &netdata_buffers_statistics.buffers_sqlite);
+
+ buffer_sprintf(sql,"SELECT 'drop '||type||' IF EXISTS '||name||';' FROM sqlite_schema " \
+ "WHERE name LIKE 'aclk_%%_%s' AND type IN ('table', 'trigger', 'index');", uuid_str);
+
+ rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement to clean up aclk tables");
+ goto fail;
+ }
+ buffer_flush(sql);
+
+ while (sqlite3_step_monitored(res) == SQLITE_ROW)
+ buffer_strcat(sql, (char *) sqlite3_column_text(res, 0));
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize statement to clean up aclk tables, rc = %d", rc);
+
+ db_execute(buffer_tostring(sql));
+
+fail:
+ buffer_free(sql);
+}
uv_mutex_t aclk_async_lock;
struct aclk_database_worker_config *aclk_thread_head = NULL;
@@ -38,7 +168,6 @@ void aclk_add_worker_thread(struct aclk_database_worker_config *wc)
aclk_thread_head = wc;
}
uv_mutex_unlock(&aclk_async_lock);
- return;
}
void aclk_del_worker_thread(struct aclk_database_worker_config *wc)
@@ -53,7 +182,6 @@ void aclk_del_worker_thread(struct aclk_database_worker_config *wc)
if (*tmp)
*tmp = wc->next;
uv_mutex_unlock(&aclk_async_lock);
- return;
}
int aclk_worker_thread_exists(char *guid)
@@ -199,7 +327,6 @@ void aclk_sync_exit_all()
uv_mutex_unlock(&aclk_async_lock);
}
-#ifdef ENABLE_ACLK
enum {
IDX_HOST_ID,
IDX_HOSTNAME,
@@ -228,6 +355,8 @@ static int create_host_callback(void *data, int argc, char **argv, char **column
uuid_unparse_lower(*(uuid_t *)argv[IDX_HOST_ID], guid);
struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info));
+ __atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED);
+
system_info->hops = str2i((const char *) argv[IDX_HOPS]);
sql_build_host_system_info((uuid_t *)argv[IDX_HOST_ID], system_info);
@@ -268,9 +397,9 @@ static int create_host_callback(void *data, int argc, char **argv, char **column
#endif
return 0;
}
-#endif
-int aclk_start_sync_thread(void *data, int argc, char **argv, char **column)
+#ifdef ENABLE_ACLK
+static int aclk_start_sync_thread(void *data, int argc, char **argv, char **column)
{
char uuid_str[GUID_LEN + 1];
UNUSED(data);
@@ -286,10 +415,9 @@ int aclk_start_sync_thread(void *data, int argc, char **argv, char **column)
sql_create_aclk_table(host, (uuid_t *) argv[0], (uuid_t *) argv[1]);
return 0;
}
-
+#endif
void sql_aclk_sync_init(void)
{
-#ifdef ENABLE_ACLK
char *err_msg = NULL;
int rc;
@@ -301,21 +429,7 @@ void sql_aclk_sync_init(void)
return;
}
- info("SQLite aclk sync initialization");
-
- for (int i = 0; aclk_sync_config[i]; i++) {
- debug(D_ACLK_SYNC, "Executing %s", aclk_sync_config[i]);
- rc = sqlite3_exec_monitored(db_meta, aclk_sync_config[i], 0, 0, &err_msg);
- if (rc != SQLITE_OK) {
- error_report("SQLite error aclk sync initialization setup, rc = %d (%s)", rc, err_msg);
- error_report("SQLite failed statement %s", aclk_sync_config[i]);
- sqlite3_free(err_msg);
- return;
- }
- }
- info("SQLite aclk sync initialization completed");
- fatal_assert(0 == uv_mutex_init(&aclk_async_lock));
-
+ info("Creating archived hosts");
rc = sqlite3_exec_monitored(db_meta, "SELECT host_id, hostname, registry_hostname, update_every, os, "
"timezone, tags, hops, memory_mode, abbrev_timezone, utc_offset, program_name, "
"program_version, entries, health_enabled FROM host WHERE hops >0;",
@@ -325,14 +439,16 @@ void sql_aclk_sync_init(void)
sqlite3_free(err_msg);
}
+#ifdef ENABLE_ACLK
+ fatal_assert(0 == uv_mutex_init(&aclk_async_lock));
rc = sqlite3_exec_monitored(db_meta, "SELECT ni.host_id, ni.node_id FROM host h, node_instance ni WHERE "
"h.host_id = ni.host_id AND ni.node_id IS NOT NULL;", aclk_start_sync_thread, NULL, &err_msg);
if (rc != SQLITE_OK) {
error_report("SQLite error when starting ACLK sync threads, rc = %d (%s)", rc, err_msg);
sqlite3_free(err_msg);
}
+ info("ACLK sync initialization completed");
#endif
- return;
}
static void async_cb(uv_async_t *handle)
@@ -374,10 +490,9 @@ static void timer_cb(uv_timer_t* handle)
#endif
}
-#define MAX_CMD_BATCH_SIZE (256)
-
-void aclk_database_worker(void *arg)
+static void aclk_database_worker(void *arg)
{
+ service_register(SERVICE_THREAD_TYPE_EVENT_LOOP, NULL, NULL, NULL, true);
worker_register("ACLKSYNC");
worker_register_job_name(ACLK_DATABASE_NOOP, "noop");
worker_register_job_name(ACLK_DATABASE_ORPHAN_HOST, "node orphan");
@@ -398,15 +513,12 @@ void aclk_database_worker(void *arg)
enum aclk_database_opcode opcode;
uv_timer_t timer_req;
struct aclk_database_cmd cmd;
- unsigned cmd_batch_size;
-
- //aclk_database_init_cmd_queue(wc);
char threadname[NETDATA_THREAD_NAME_MAX+1];
if (wc->host)
- snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "AS_%s", rrdhost_hostname(wc->host));
+ snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "ACLK[%s]", rrdhost_hostname(wc->host));
else {
- snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "AS_%s", wc->uuid_str);
+ snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "ACLK[%s]", wc->uuid_str);
threadname[11] = '\0';
}
uv_thread_set_name_np(wc->thread, threadname);
@@ -449,17 +561,13 @@ void aclk_database_worker(void *arg)
uv_run(loop, UV_RUN_DEFAULT);
/* wait for commands */
- cmd_batch_size = 0;
do {
- if (unlikely(cmd_batch_size >= MAX_CMD_BATCH_SIZE))
- break;
cmd = aclk_database_deq_cmd(wc);
if (netdata_exit)
break;
opcode = cmd.opcode;
- ++cmd_batch_size;
if(likely(opcode != ACLK_DATABASE_NOOP))
worker_is_busy(opcode);
@@ -535,7 +643,7 @@ void aclk_database_worker(void *arg)
wc->host = rrdhost_find_by_guid(wc->host_guid);
if (wc->host) {
info("HOST %s (%s) detected as active", rrdhost_hostname(wc->host), wc->host_guid);
- snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "AS_%s", rrdhost_hostname(wc->host));
+ snprintfz(threadname, NETDATA_THREAD_NAME_MAX, "ACLK[%s]", rrdhost_hostname(wc->host));
uv_thread_set_name_np(wc->thread, threadname);
wc->host->dbsync_worker = wc;
if (unlikely(!wc->hostname))
@@ -584,10 +692,8 @@ void aclk_database_worker(void *arg)
info("Shutting down ACLK sync event loop complete for host %s", wc->host_guid);
/* TODO: don't let the API block by waiting to enqueue commands */
uv_cond_destroy(&wc->cmd_cond);
-/* uv_mutex_destroy(&wc->cmd_mutex); */
- //fatal_assert(0 == uv_loop_close(loop));
- int rc;
+ int rc;
do {
rc = uv_loop_close(loop);
} while (rc != UV_EBUSY);
@@ -628,7 +734,7 @@ void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id)
uuid_unparse_lower(*host_uuid, host_guid);
- BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE);
+ BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE, &netdata_buffers_statistics.buffers_sqlite);
buffer_sprintf(sql, TABLE_ACLK_ALERT, uuid_str);
db_execute(buffer_tostring(sql));
@@ -648,6 +754,10 @@ void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id)
if (likely(host)) {
host->dbsync_worker = (void *)wc;
wc->hostname = strdupz(rrdhost_hostname(host));
+ if (node_id && !host->node_id) {
+ host->node_id = mallocz(sizeof(*host->node_id));
+ uuid_copy(*host->node_id, *node_id);
+ }
}
else
wc->hostname = get_hostname_by_node_id(wc->node_id);
@@ -663,142 +773,4 @@ void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id)
UNUSED(host_uuid);
UNUSED(node_id);
#endif
- return;
-}
-
-void sql_maint_aclk_sync_database(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd)
-{
- UNUSED(cmd);
-
- debug(D_ACLK, "Checking database for %s", wc->host_guid);
-
- BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE);
-
- buffer_sprintf(sql,"DELETE FROM aclk_alert_%s WHERE date_submitted IS NOT NULL AND "
- "CAST(date_cloud_ack AS INT) < unixepoch()-%d;", wc->uuid_str, ACLK_DELETE_ACK_ALERTS_INTERNAL);
- db_execute(buffer_tostring(sql));
-
- buffer_free(sql);
- return;
-}
-
-#define SQL_SELECT_HOST_BY_UUID "SELECT host_id FROM host WHERE host_id = @host_id;"
-
-static int is_host_available(uuid_t *host_id)
-{
- sqlite3_stmt *res = NULL;
- int rc;
-
- if (unlikely(!db_meta)) {
- if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
- error_report("Database has not been initialized");
- return 1;
- }
-
- rc = sqlite3_prepare_v2(db_meta, SQL_SELECT_HOST_BY_UUID, -1, &res, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to select node instance information for a node");
- return 1;
- }
-
- rc = sqlite3_bind_blob(res, 1, host_id, sizeof(*host_id), SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind host_id parameter to select node instance information");
- goto failed;
- }
- rc = sqlite3_step_monitored(res);
-
- failed:
- if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
- error_report("Failed to finalize the prepared statement when checking host existence");
-
- return (rc == SQLITE_ROW);
-}
-
-// OPCODE: ACLK_DATABASE_DELETE_HOST
-void sql_delete_aclk_table_list(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd)
-{
- UNUSED(wc);
- char uuid_str[GUID_LEN + 1];
- char host_str[GUID_LEN + 1];
-
- int rc;
- uuid_t host_uuid;
- char *host_guid = (char *)cmd.data;
-
- if (unlikely(!host_guid))
- return;
-
- rc = uuid_parse(host_guid, host_uuid);
- freez(host_guid);
- if (rc)
- return;
-
- uuid_unparse_lower(host_uuid, host_str);
- uuid_unparse_lower_fix(&host_uuid, uuid_str);
-
- debug(D_ACLK_SYNC, "Checking if I should delete aclk tables for node %s", host_str);
-
- if (is_host_available(&host_uuid)) {
- debug(D_ACLK_SYNC, "Host %s exists, not deleting aclk sync tables", host_str);
- return;
- }
-
- debug(D_ACLK_SYNC, "Host %s does NOT exist, can delete aclk sync tables", host_str);
-
- sqlite3_stmt *res = NULL;
- BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE);
-
- buffer_sprintf(sql,"SELECT 'drop '||type||' IF EXISTS '||name||';' FROM sqlite_schema " \
- "WHERE name LIKE 'aclk_%%_%s' AND type IN ('table', 'trigger', 'index');", uuid_str);
-
- rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0);
- if (rc != SQLITE_OK) {
- error_report("Failed to prepare statement to clean up aclk tables");
- goto fail;
- }
- buffer_flush(sql);
-
- while (sqlite3_step_monitored(res) == SQLITE_ROW)
- buffer_strcat(sql, (char *) sqlite3_column_text(res, 0));
-
- rc = sqlite3_finalize(res);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to finalize statement to clean up aclk tables, rc = %d", rc);
-
- db_execute(buffer_tostring(sql));
-
-fail:
- buffer_free(sql);
- return;
-}
-
-static int sql_check_aclk_table(void *data, int argc, char **argv, char **column)
-{
- struct aclk_database_worker_config *wc = data;
- UNUSED(argc);
- UNUSED(column);
-
- debug(D_ACLK_SYNC,"Scheduling aclk sync table check for node %s", (char *) argv[0]);
- struct aclk_database_cmd cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = ACLK_DATABASE_DELETE_HOST;
- cmd.data = strdupz((char *) argv[0]);
- aclk_database_enq_cmd_noblock(wc, &cmd);
- return 0;
-}
-
-#define SQL_SELECT_ACLK_ACTIVE_LIST "SELECT REPLACE(SUBSTR(name,19),'_','-') FROM sqlite_schema " \
- "WHERE name LIKE 'aclk_chart_latest_%' AND type IN ('table');"
-
-void sql_check_aclk_table_list(struct aclk_database_worker_config *wc)
-{
- char *err_msg = NULL;
- debug(D_ACLK_SYNC,"Cleaning tables for nodes that do not exist");
- int rc = sqlite3_exec_monitored(db_meta, SQL_SELECT_ACLK_ACTIVE_LIST, sql_check_aclk_table, (void *) wc, &err_msg);
- if (rc != SQLITE_OK) {
- error_report("Query failed when trying to check for obsolete ACLK sync tables, %s", err_msg);
- sqlite3_free(err_msg);
- }
- return;
-}
+}
\ No newline at end of file
diff --git a/database/sqlite/sqlite_aclk.h b/database/sqlite/sqlite_aclk.h
index 06d5d0270..208177e45 100644
--- a/database/sqlite/sqlite_aclk.h
+++ b/database/sqlite/sqlite_aclk.h
@@ -99,7 +99,7 @@ struct aclk_database_cmd {
struct aclk_completion *completion;
};
-#define ACLK_DATABASE_CMD_Q_MAX_SIZE (16384)
+#define ACLK_DATABASE_CMD_Q_MAX_SIZE (1024)
struct aclk_database_cmdqueue {
unsigned head, tail;
@@ -166,9 +166,6 @@ int aclk_database_enq_cmd_noblock(struct aclk_database_worker_config *wc, struct
void aclk_database_enq_cmd(struct aclk_database_worker_config *wc, struct aclk_database_cmd *cmd);
void sql_create_aclk_table(RRDHOST *host, uuid_t *host_uuid, uuid_t *node_id);
void sql_aclk_sync_init(void);
-void sql_check_aclk_table_list(struct aclk_database_worker_config *wc);
-void sql_delete_aclk_table_list(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd);
-void sql_maint_aclk_sync_database(struct aclk_database_worker_config *wc, struct aclk_database_cmd cmd);
int claimed();
void aclk_sync_exit_all();
struct aclk_database_worker_config *find_inactive_wc_by_node_id(char *node_id);
diff --git a/database/sqlite/sqlite_aclk_alert.c b/database/sqlite/sqlite_aclk_alert.c
index 47663a8d1..ce284ebc3 100644
--- a/database/sqlite/sqlite_aclk_alert.c
+++ b/database/sqlite/sqlite_aclk_alert.c
@@ -43,6 +43,34 @@ void update_filtered(ALARM_ENTRY *ae, uint32_t unique_id, char *uuid_str) {
ae->flags |= HEALTH_ENTRY_FLAG_ACLK_QUEUED;
}
+static inline bool is_event_from_alert_variable_config(uint32_t unique_id, char *uuid_str) {
+ sqlite3_stmt *res = NULL;
+ int rc = 0;
+ bool ret = false;
+
+ char sql[ACLK_SYNC_QUERY_SIZE];
+ snprintfz(sql,ACLK_SYNC_QUERY_SIZE-1, "select hl.unique_id from health_log_%s hl, alert_hash ah where hl.unique_id = %u " \
+ "and hl.config_hash_id = ah.hash_id " \
+ "and ah.warn is null and ah.crit is null;", uuid_str, unique_id);
+
+ rc = sqlite3_prepare_v2(db_meta, sql, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement when trying to check for alert variables.");
+ return false;
+ }
+
+ rc = sqlite3_step_monitored(res);
+ if (likely(rc == SQLITE_ROW)) {
+ ret = true;
+ }
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize statement when trying to check for alert variables, rc = %d", rc);
+
+ return ret;
+}
+
#define MAX_REMOVED_PERIOD 86400
//decide if some events should be sent or not
int should_send_to_cloud(RRDHOST *host, ALARM_ENTRY *ae)
@@ -59,6 +87,9 @@ int should_send_to_cloud(RRDHOST *host, ALARM_ENTRY *ae)
if (unlikely(uuid_is_null(ae->config_hash_id)))
return 0;
+ if (is_event_from_alert_variable_config(ae->unique_id, uuid_str))
+ return 0;
+
char sql[ACLK_SYNC_QUERY_SIZE];
uuid_t config_hash_id;
RRDCALC_STATUS status;
@@ -133,6 +164,9 @@ done:
// and handle both cases
int sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae, int skip_filter)
{
+ if(!service_running(SERVICE_ACLK))
+ return 0;
+
if (!claimed())
return 0;
@@ -153,7 +187,7 @@ int sql_queue_alarm_to_aclk(RRDHOST *host, ALARM_ENTRY *ae, int skip_filter)
char uuid_str[GUID_LEN + 1];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
buffer_sprintf(
sql,
@@ -242,7 +276,7 @@ void aclk_push_alert_event(struct aclk_database_worker_config *wc, struct aclk_d
return;
}
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
if (wc->alerts_start_seq_id != 0) {
buffer_sprintf(
@@ -267,20 +301,38 @@ void aclk_push_alert_event(struct aclk_database_worker_config *wc, struct aclk_d
sqlite3_stmt *res = NULL;
- buffer_sprintf(sql, "select aa.sequence_id, hl.unique_id, hl.alarm_id, hl.config_hash_id, hl.updated_by_id, hl.when_key, \
- hl.duration, hl.non_clear_duration, hl.flags, hl.exec_run_timestamp, hl.delay_up_to_timestamp, hl.name, \
- hl.chart, hl.family, hl.exec, hl.recipient, hl.source, hl.units, hl.info, hl.exec_code, hl.new_status, \
- hl.old_status, hl.delay, hl.new_value, hl.old_value, hl.last_repeat, hl.chart_context \
- from health_log_%s hl, aclk_alert_%s aa \
- where hl.unique_id = aa.alert_unique_id and aa.date_submitted is null \
- order by aa.sequence_id asc limit %d;", wc->uuid_str, wc->uuid_str, limit);
+ buffer_sprintf(sql, "select aa.sequence_id, hl.unique_id, hl.alarm_id, hl.config_hash_id, hl.updated_by_id, hl.when_key, " \
+ " hl.duration, hl.non_clear_duration, hl.flags, hl.exec_run_timestamp, hl.delay_up_to_timestamp, hl.name, " \
+ " hl.chart, hl.family, hl.exec, hl.recipient, hl.source, hl.units, hl.info, hl.exec_code, hl.new_status, " \
+ " hl.old_status, hl.delay, hl.new_value, hl.old_value, hl.last_repeat, hl.chart_context " \
+ " from health_log_%s hl, aclk_alert_%s aa " \
+ " where hl.unique_id = aa.alert_unique_id and aa.date_submitted is null " \
+ " order by aa.sequence_id asc limit %d;", wc->uuid_str, wc->uuid_str, limit);
rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0);
if (rc != SQLITE_OK) {
- error_report("Failed to prepare statement when trying to send an alert update via ACLK");
- buffer_free(sql);
- freez(claim_id);
- return;
+
+ // Try to create tables
+ if (wc->host)
+ sql_create_health_log_table(wc->host);
+
+ BUFFER *sql_fix = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
+ buffer_sprintf(sql_fix, TABLE_ACLK_ALERT, wc->uuid_str);
+ db_execute(buffer_tostring(sql_fix));
+ buffer_flush(sql_fix);
+ buffer_sprintf(sql_fix, INDEX_ACLK_ALERT, wc->uuid_str, wc->uuid_str);
+ db_execute(buffer_tostring(sql_fix));
+ buffer_free(sql_fix);
+
+ // Try again
+ rc = sqlite3_prepare_v2(db_meta, buffer_tostring(sql), -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement when trying to send an alert update via ACLK");
+
+ buffer_free(sql);
+ freez(claim_id);
+ return;
+ }
}
char uuid_str[GUID_LEN + 1];
@@ -311,7 +363,7 @@ void aclk_push_alert_event(struct aclk_database_worker_config *wc, struct aclk_d
alarm_log.utc_offset = wc->host->utc_offset;
alarm_log.timezone = strdupz(rrdhost_abbrev_timezone(wc->host));
alarm_log.exec_path = sqlite3_column_bytes(res, 14) > 0 ? strdupz((char *)sqlite3_column_text(res, 14)) :
- strdupz((char *)string2str(wc->host->health_default_exec));
+ strdupz((char *)string2str(wc->host->health.health_default_exec));
alarm_log.conf_source = strdupz((char *)sqlite3_column_text(res, 16));
char *edit_command = sqlite3_column_bytes(res, 16) > 0 ?
@@ -407,7 +459,7 @@ void sql_queue_existing_alerts_to_aclk(RRDHOST *host)
{
char uuid_str[GUID_LEN + 1];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
buffer_sprintf(sql,"delete from aclk_alert_%s; " \
"insert into aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \
@@ -484,7 +536,7 @@ void aclk_push_alarm_health_log(struct aclk_database_worker_config *wc, struct a
struct timeval first_timestamp;
struct timeval last_timestamp;
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
sqlite3_stmt *res = NULL;
@@ -528,7 +580,7 @@ void aclk_push_alarm_health_log(struct aclk_database_worker_config *wc, struct a
alarm_log.node_id = wc->node_id;
alarm_log.log_entries = log_entries;
alarm_log.status = wc->alert_updates == 0 ? 2 : 1;
- alarm_log.enabled = (int)host->health_enabled;
+ alarm_log.enabled = (int)host->health.health_enabled;
wc->alert_sequence_id = last_sequence;
@@ -541,6 +593,8 @@ void aclk_push_alarm_health_log(struct aclk_database_worker_config *wc, struct a
freez(claim_id);
buffer_free(sql);
+
+ aclk_alert_reloaded = 1;
#endif
return;
@@ -651,7 +705,7 @@ int aclk_push_alert_config_event(struct aclk_database_worker_config *wc, struct
alarm_config.p_db_lookup_dimensions = sqlite3_column_bytes(res, 27) > 0 ? strdupz((char *)sqlite3_column_text(res, 27)) : NULL;
alarm_config.p_db_lookup_method = sqlite3_column_bytes(res, 28) > 0 ? strdupz((char *)sqlite3_column_text(res, 28)) : NULL;
- BUFFER *tmp_buf = buffer_create(1024);
+ BUFFER *tmp_buf = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
buffer_data_options2string(tmp_buf, sqlite3_column_int(res, 29));
alarm_config.p_db_lookup_options = strdupz((char *)buffer_tostring(tmp_buf));
buffer_free(tmp_buf);
@@ -706,7 +760,7 @@ void aclk_start_alert_streaming(char *node_id, uint64_t batch_id, uint64_t start
(struct aclk_database_worker_config *)host->dbsync_worker :
(struct aclk_database_worker_config *)find_inactive_wc_by_node_id(node_id);
- if (unlikely(!host->health_enabled)) {
+ if (unlikely(!host->health.health_enabled)) {
log_access("ACLK STA [%s (N/A)]: Ignoring request to stream alert state changes, health is disabled.", node_id);
return;
}
@@ -735,7 +789,7 @@ void sql_process_queue_removed_alerts_to_aclk(struct aclk_database_worker_config
{
UNUSED(cmd);
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
buffer_sprintf(sql,"insert into aclk_alert_%s (alert_unique_id, date_created, filtered_alert_unique_id) " \
"select unique_id alert_unique_id, unixepoch(), unique_id alert_unique_id from health_log_%s " \
@@ -813,7 +867,7 @@ void aclk_process_send_alarm_snapshot(char *node_id, char *claim_id, uint64_t sn
void aclk_mark_alert_cloud_ack(char *uuid_str, uint64_t alerts_ack_sequence_id)
{
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
if (alerts_ack_sequence_id != 0) {
buffer_sprintf(
@@ -846,7 +900,7 @@ void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_EN
alarm_log->utc_offset = host->utc_offset;
alarm_log->timezone = strdupz(rrdhost_abbrev_timezone(host));
- alarm_log->exec_path = ae->exec ? strdupz(ae_exec(ae)) : strdupz((char *)string2str(host->health_default_exec));
+ alarm_log->exec_path = ae->exec ? strdupz(ae_exec(ae)) : strdupz((char *)string2str(host->health.health_default_exec));
alarm_log->conf_source = ae->source ? strdupz(ae_source(ae)) : strdupz((char *)"");
alarm_log->command = strdupz((char *)edit_command);
@@ -1022,7 +1076,7 @@ void sql_aclk_alert_clean_dead_entries(RRDHOST *host)
char uuid_str[GUID_LEN + 1];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
buffer_sprintf(sql,"delete from aclk_alert_%s where filtered_alert_unique_id not in "
" (select unique_id from health_log_%s); ", uuid_str, uuid_str);
@@ -1048,7 +1102,7 @@ int get_proto_alert_status(RRDHOST *host, struct proto_alert_status *proto_alert
proto_alert_status->alert_updates = wc->alert_updates;
proto_alert_status->alerts_batch_id = wc->alerts_batch_id;
- BUFFER *sql = buffer_create(1024);
+ BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
sqlite3_stmt *res = NULL;
buffer_sprintf(sql, "SELECT MIN(sequence_id), MAX(sequence_id), " \
diff --git a/database/sqlite/sqlite_context.c b/database/sqlite/sqlite_context.c
index deca84584..892292cc7 100644
--- a/database/sqlite/sqlite_context.c
+++ b/database/sqlite/sqlite_context.c
@@ -283,8 +283,8 @@ void ctx_get_context_list(uuid_t *host_uuid, void (*dict_cb)(VERSIONED_CONTEXT_D
context_data.chart_type = (char *) sqlite3_column_text(res, 3);
context_data.units = (char *) sqlite3_column_text(res, 4);
context_data.priority = sqlite3_column_int64(res, 5);
- context_data.first_time_t = sqlite3_column_int64(res, 6);
- context_data.last_time_t = sqlite3_column_int64(res, 7);
+ context_data.first_time_s = sqlite3_column_int64(res, 6);
+ context_data.last_time_s = sqlite3_column_int64(res, 7);
context_data.deleted = sqlite3_column_int(res, 8);
context_data.family = (char *) sqlite3_column_text(res, 9);
dict_cb(&context_data, data);
@@ -360,13 +360,13 @@ int ctx_store_context(uuid_t *host_uuid, VERSIONED_CONTEXT_DATA *context_data)
goto skip_store;
}
- rc = sqlite3_bind_int64(res, 8, (time_t) context_data->first_time_t);
+ rc = sqlite3_bind_int64(res, 8, (time_t) context_data->first_time_s);
if (unlikely(rc != SQLITE_OK)) {
error_report("Failed to bind first_time_t to store context details");
goto skip_store;
}
- rc = sqlite3_bind_int64(res, 9, (time_t) context_data->last_time_t);
+ rc = sqlite3_bind_int64(res, 9, (time_t) context_data->last_time_s);
if (unlikely(rc != SQLITE_OK)) {
error_report("Failed to bind last_time_t to store context details");
goto skip_store;
@@ -478,8 +478,8 @@ static void dict_ctx_get_context_list_cb(VERSIONED_CONTEXT_DATA *context_data, v
context_data->chart_type,
context_data->units,
context_data->priority,
- context_data->first_time_t,
- context_data->last_time_t,
+ context_data->first_time_s,
+ context_data->last_time_s,
context_data->deleted,
context_data->family);
}
@@ -504,8 +504,8 @@ int ctx_unittest(void)
context_data.family = strdupz("TestContextFamily");
context_data.priority = 50000;
context_data.deleted = 0;
- context_data.first_time_t = 1657781000;
- context_data.last_time_t = 1657781100;
+ context_data.first_time_s = 1657781000;
+ context_data.last_time_s = 1657781100;
context_data.version = now_realtime_usec();
if (likely(!ctx_store_context(&host_uuid, &context_data)))
@@ -519,8 +519,8 @@ int ctx_unittest(void)
info("Entry %s not inserted", context_data.id);
// This will change end time
- context_data.first_time_t = 1657781000;
- context_data.last_time_t = 1657782001;
+ context_data.first_time_s = 1657781000;
+ context_data.last_time_s = 1657782001;
if (likely(!ctx_update_context(&host_uuid, &context_data)))
info("Entry %s updated", context_data.id);
else
@@ -530,8 +530,8 @@ int ctx_unittest(void)
info("List context end after insert");
// This will change start time
- context_data.first_time_t = 1657782000;
- context_data.last_time_t = 1657782001;
+ context_data.first_time_s = 1657782000;
+ context_data.last_time_s = 1657782001;
if (likely(!ctx_update_context(&host_uuid, &context_data)))
info("Entry %s updated", context_data.id);
else
diff --git a/database/sqlite/sqlite_context.h b/database/sqlite/sqlite_context.h
index 2e52b9bf8..2586916ea 100644
--- a/database/sqlite/sqlite_context.h
+++ b/database/sqlite/sqlite_context.h
@@ -45,8 +45,8 @@ typedef struct versioned_context_data {
uint64_t priority; // the chart priority of the context
- uint64_t first_time_t; // the first entry in the database, in seconds
- uint64_t last_time_t; // the last point in the database, in seconds
+ uint64_t first_time_s; // the first entry in the database, in seconds
+ uint64_t last_time_s; // the last point in the database, in seconds
bool deleted; // true when this is deleted
diff --git a/database/sqlite/sqlite_functions.c b/database/sqlite/sqlite_functions.c
index ce5487fbf..1d03cfc2a 100644
--- a/database/sqlite/sqlite_functions.c
+++ b/database/sqlite/sqlite_functions.c
@@ -22,9 +22,8 @@ const char *database_config[] = {
"multiplier int, divisor int , algorithm int, options text);",
"CREATE TABLE IF NOT EXISTS metadata_migration(filename text, file_size, date_created int);",
- "CREATE INDEX IF NOT EXISTS ind_d1 on dimension (chart_id, id, name);",
- "CREATE INDEX IF NOT EXISTS ind_c1 on chart (host_id, id, type, name);",
- "CREATE INDEX IF NOT EXISTS ind_c2 on chart (host_id, context);",
+ "CREATE INDEX IF NOT EXISTS ind_d2 on dimension (chart_id);",
+ "CREATE INDEX IF NOT EXISTS ind_c3 on chart (host_id);",
"CREATE TABLE IF NOT EXISTS chart_label(chart_id blob, source_type int, label_key text, "
"label_value text, date_created int, PRIMARY KEY (chart_id, label_key));",
"CREATE TABLE IF NOT EXISTS node_instance (host_id blob PRIMARY KEY, claim_id, node_id, date_created);",
@@ -55,6 +54,9 @@ const char *database_cleanup[] = {
"DELETE FROM host_info WHERE host_id NOT IN (SELECT host_id FROM host);",
"DELETE FROM host_label WHERE host_id NOT IN (SELECT host_id FROM host);",
"DROP TRIGGER IF EXISTS tr_dim_del;",
+ "DROP INDEX IF EXISTS ind_d1;",
+ "DROP INDEX IF EXISTS ind_c1;",
+ "DROP INDEX IF EXISTS ind_c2;",
NULL
};
@@ -504,211 +506,6 @@ skip:
return result;
}
-
-
-//
-// Support for archived charts (TO BE REMOVED)
-//
-#define SELECT_DIMENSION "select d.id, d.name from dimension d where d.chart_id = @chart_uuid;"
-
-static void sql_rrdim2json(sqlite3_stmt *res_dim, uuid_t *chart_uuid, BUFFER *wb, size_t *dimensions_count)
-{
- int rc;
-
- rc = sqlite3_bind_blob(res_dim, 1, chart_uuid, sizeof(*chart_uuid), SQLITE_STATIC);
- if (rc != SQLITE_OK)
- return;
-
- int dimensions = 0;
- buffer_sprintf(wb, "\t\t\t\"dimensions\": {\n");
-
- while (sqlite3_step_monitored(res_dim) == SQLITE_ROW) {
- if (dimensions)
- buffer_strcat(wb, ",\n\t\t\t\t\"");
- else
- buffer_strcat(wb, "\t\t\t\t\"");
- buffer_strcat_jsonescape(wb, (const char *) sqlite3_column_text(res_dim, 0));
- buffer_strcat(wb, "\": { \"name\": \"");
- buffer_strcat_jsonescape(wb, (const char *) sqlite3_column_text(res_dim, 1));
- buffer_strcat(wb, "\" }");
- dimensions++;
- }
- *dimensions_count += dimensions;
- buffer_sprintf(wb, "\n\t\t\t}");
-}
-
-#define SELECT_CHART "select chart_id, id, name, type, family, context, title, priority, plugin, " \
- "module, unit, chart_type, update_every from chart " \
- "where host_id = @host_uuid and chart_id not in (select chart_id from chart_active) order by chart_id asc;"
-
-void sql_rrdset2json(RRDHOST *host, BUFFER *wb)
-{
- // time_t first_entry_t = 0; //= rrdset_first_entry_t(st);
- // time_t last_entry_t = 0; //rrdset_last_entry_t(st);
- static char *custom_dashboard_info_js_filename = NULL;
- int rc;
-
- sqlite3_stmt *res_chart = NULL;
- sqlite3_stmt *res_dim = NULL;
- time_t now = now_realtime_sec();
-
- rc = sqlite3_prepare_v2(db_meta, SELECT_CHART, -1, &res_chart, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to fetch host archived charts");
- return;
- }
-
- rc = sqlite3_bind_blob(res_chart, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind host parameter to fetch archived charts");
- goto failed;
- }
-
- rc = sqlite3_prepare_v2(db_meta, SELECT_DIMENSION, -1, &res_dim, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to fetch chart archived dimensions");
- goto failed;
- };
-
- if(unlikely(!custom_dashboard_info_js_filename))
- custom_dashboard_info_js_filename = config_get(CONFIG_SECTION_WEB, "custom dashboard_info.js", "");
-
- buffer_sprintf(wb, "{\n"
- "\t\"hostname\": \"%s\""
- ",\n\t\"version\": \"%s\""
- ",\n\t\"release_channel\": \"%s\""
- ",\n\t\"os\": \"%s\""
- ",\n\t\"timezone\": \"%s\""
- ",\n\t\"update_every\": %d"
- ",\n\t\"history\": %ld"
- ",\n\t\"memory_mode\": \"%s\""
- ",\n\t\"custom_info\": \"%s\""
- ",\n\t\"charts\": {"
- , rrdhost_hostname(host)
- , rrdhost_program_version(host)
- , get_release_channel()
- , rrdhost_os(host)
- , rrdhost_timezone(host)
- , host->rrd_update_every
- , host->rrd_history_entries
- , rrd_memory_mode_name(host->rrd_memory_mode)
- , custom_dashboard_info_js_filename
- );
-
- size_t c = 0;
- size_t dimensions = 0;
-
- while (sqlite3_step_monitored(res_chart) == SQLITE_ROW) {
- char id[512];
- sprintf(id, "%s.%s", sqlite3_column_text(res_chart, 3), sqlite3_column_text(res_chart, 1));
- RRDSET *st = rrdset_find(host, id);
- if (st && !rrdset_flag_check(st, RRDSET_FLAG_ARCHIVED))
- continue;
-
- if (c)
- buffer_strcat(wb, ",\n\t\t\"");
- else
- buffer_strcat(wb, "\n\t\t\"");
- c++;
-
- buffer_strcat(wb, id);
- buffer_strcat(wb, "\": ");
-
- buffer_sprintf(
- wb,
- "\t\t{\n"
- "\t\t\t\"id\": \"%s\",\n"
- "\t\t\t\"name\": \"%s\",\n"
- "\t\t\t\"type\": \"%s\",\n"
- "\t\t\t\"family\": \"%s\",\n"
- "\t\t\t\"context\": \"%s\",\n"
- "\t\t\t\"title\": \"%s (%s)\",\n"
- "\t\t\t\"priority\": %ld,\n"
- "\t\t\t\"plugin\": \"%s\",\n"
- "\t\t\t\"module\": \"%s\",\n"
- "\t\t\t\"enabled\": %s,\n"
- "\t\t\t\"units\": \"%s\",\n"
- "\t\t\t\"data_url\": \"/api/v1/data?chart=%s\",\n"
- "\t\t\t\"chart_type\": \"%s\",\n",
- id //sqlite3_column_text(res_chart, 1)
- ,
- id // sqlite3_column_text(res_chart, 2)
- ,
- sqlite3_column_text(res_chart, 3), sqlite3_column_text(res_chart, 4), sqlite3_column_text(res_chart, 5),
- sqlite3_column_text(res_chart, 6), id //sqlite3_column_text(res_chart, 2)
- ,
- (long ) sqlite3_column_int(res_chart, 7),
- (const char *) sqlite3_column_text(res_chart, 8) ? (const char *) sqlite3_column_text(res_chart, 8) : (char *) "",
- (const char *) sqlite3_column_text(res_chart, 9) ? (const char *) sqlite3_column_text(res_chart, 9) : (char *) "", (char *) "false",
- (const char *) sqlite3_column_text(res_chart, 10), id //sqlite3_column_text(res_chart, 2)
- ,
- rrdset_type_name(sqlite3_column_int(res_chart, 11)));
-
- sql_rrdim2json(res_dim, (uuid_t *) sqlite3_column_blob(res_chart, 0), wb, &dimensions);
-
- rc = sqlite3_reset(res_dim);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to reset the prepared statement when reading archived chart dimensions");
- buffer_strcat(wb, "\n\t\t}");
- }
-
- buffer_sprintf(wb
- , "\n\t}"
- ",\n\t\"charts_count\": %zu"
- ",\n\t\"dimensions_count\": %zu"
- ",\n\t\"alarms_count\": %zu"
- ",\n\t\"rrd_memory_bytes\": %zu"
- ",\n\t\"hosts_count\": %zu"
- ",\n\t\"hosts\": ["
- , c
- , dimensions
- , (size_t) 0
- , (size_t) 0
- , rrd_hosts_available
- );
-
- if(unlikely(rrd_hosts_available > 1)) {
- rrd_rdlock();
-
- size_t found = 0;
- RRDHOST *h;
- rrdhost_foreach_read(h) {
- if(!rrdhost_should_be_removed(h, host, now) && !rrdhost_flag_check(h, RRDHOST_FLAG_ARCHIVED)) {
- buffer_sprintf(wb
- , "%s\n\t\t{"
- "\n\t\t\t\"hostname\": \"%s\""
- "\n\t\t}"
- , (found > 0) ? "," : ""
- , rrdhost_hostname(h)
- );
-
- found++;
- }
- }
-
- rrd_unlock();
- }
- else {
- buffer_sprintf(wb
- , "\n\t\t{"
- "\n\t\t\t\"hostname\": \"%s\""
- "\n\t\t}"
- , rrdhost_hostname(host)
- );
- }
-
- buffer_sprintf(wb, "\n\t]\n}\n");
-
- rc = sqlite3_finalize(res_dim);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to finalize the prepared statement when reading archived chart dimensions");
-
-failed:
- rc = sqlite3_finalize(res_chart);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to finalize the prepared statement when reading archived charts");
-}
-
void db_execute(const char *cmd)
{
int rc;
@@ -732,116 +529,6 @@ void db_execute(const char *cmd)
}
}
-#define SELECT_MIGRATED_FILE "select 1 from metadata_migration where filename = @path;"
-
-int file_is_migrated(char *path)
-{
- sqlite3_stmt *res = NULL;
- int rc;
-
- rc = sqlite3_prepare_v2(db_meta, SELECT_MIGRATED_FILE, -1, &res, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to fetch host");
- return 0;
- }
-
- rc = sqlite3_bind_text(res, 1, path, -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind filename parameter to check migration");
- return 0;
- }
-
- rc = sqlite3_step_monitored(res);
-
- if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
- error_report("Failed to finalize the prepared statement when checking if metadata file is migrated");
-
- return (rc == SQLITE_ROW);
-}
-
-#define STORE_MIGRATED_FILE "insert or replace into metadata_migration (filename, file_size, date_created) " \
- "values (@file, @size, unixepoch());"
-
-void add_migrated_file(char *path, uint64_t file_size)
-{
- sqlite3_stmt *res = NULL;
- int rc;
-
- rc = sqlite3_prepare_v2(db_meta, STORE_MIGRATED_FILE, -1, &res, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to fetch host");
- return;
- }
-
- rc = sqlite3_bind_text(res, 1, path, -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind filename parameter to store migration information");
- return;
- }
-
- rc = sqlite3_bind_int64(res, 2, (sqlite_int64) file_size);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind size parameter to store migration information");
- return;
- }
-
- rc = execute_insert(res);
- if (unlikely(rc != SQLITE_DONE))
- error_report("Failed to store migrated file, rc = %d", rc);
-
- if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
- error_report("Failed to finalize the prepared statement when checking if metadata file is migrated");
-}
-
-
-
-#define SQL_STORE_CLAIM_ID "insert into node_instance " \
- "(host_id, claim_id, date_created) values (@host_id, @claim_id, unixepoch()) " \
- "on conflict(host_id) do update set claim_id = excluded.claim_id;"
-
-void store_claim_id(uuid_t *host_id, uuid_t *claim_id)
-{
- sqlite3_stmt *res = NULL;
- int rc;
-
- if (unlikely(!db_meta)) {
- if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
- error_report("Database has not been initialized");
- return;
- }
-
- rc = sqlite3_prepare_v2(db_meta, SQL_STORE_CLAIM_ID, -1, &res, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement store chart labels");
- return;
- }
-
- rc = sqlite3_bind_blob(res, 1, host_id, sizeof(*host_id), SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind host_id parameter to store node instance information");
- goto failed;
- }
-
- if (claim_id)
- rc = sqlite3_bind_blob(res, 2, claim_id, sizeof(*claim_id), SQLITE_STATIC);
- else
- rc = sqlite3_bind_null(res, 2);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to bind claim_id parameter to store node instance information");
- goto failed;
- }
-
- rc = execute_insert(res);
- if (unlikely(rc != SQLITE_DONE))
- error_report("Failed to store node instance information, rc = %d", rc);
-
-failed:
- if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
- error_report("Failed to finalize the prepared statement when storing node instance information");
-
- return;
-}
-
static inline void set_host_node_id(RRDHOST *host, uuid_t *node_id)
{
if (unlikely(!host))
@@ -1268,112 +955,3 @@ int sql_metadata_cache_stats(int op)
netdata_thread_enable_cancelability();
return count;
}
-
-#define SQL_FIND_CHART_UUID \
- "SELECT chart_id FROM chart WHERE host_id = @host AND type=@type AND id=@id AND (name IS NULL OR name=@name) AND chart_id IS NOT NULL;"
-
-#define SQL_FIND_DIMENSION_UUID \
- "SELECT dim_id FROM dimension WHERE chart_id=@chart AND id=@id AND name=@name AND LENGTH(dim_id)=16;"
-
-
-//Do a database lookup to find the UUID of a chart
-//If found store it in store_uuid and return 0
-int sql_find_chart_uuid(RRDHOST *host, RRDSET *st, uuid_t *store_uuid)
-{
- static __thread sqlite3_stmt *res = NULL;
- int rc;
-
- const char *name = string2str(st->parts.name);
-
- if (unlikely(!db_meta) && default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE)
- return 1;
-
- if (unlikely(!res)) {
- rc = prepare_statement(db_meta, SQL_FIND_CHART_UUID, &res);
- if (rc != SQLITE_OK) {
- error_report("Failed to prepare statement to lookup chart UUID in the database");
- return 1;
- }
- }
-
- rc = sqlite3_bind_blob(res, 1, &host->host_uuid, sizeof(host->host_uuid), SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = sqlite3_bind_text(res, 2, string2str(st->parts.type), -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = sqlite3_bind_text(res, 3, string2str(st->parts.id), -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = sqlite3_bind_text(res, 4, name && *name ? name : string2str(st->parts.id), -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- int status = 1;
- rc = sqlite3_step_monitored(res);
- if (likely(rc == SQLITE_ROW)) {
- uuid_copy(*store_uuid, sqlite3_column_blob(res, 0));
- status = 0;
- }
-
- rc = sqlite3_reset(res);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to reset statement when searching for a chart UUID, rc = %d", rc);
-
- return status;
-
-bind_fail:
- error_report("Failed to bind input parameter to perform chart UUID database lookup, rc = %d", rc);
- rc = sqlite3_reset(res);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to reset statement when searching for a chart UUID, rc = %d", rc);
- return 1;
-}
-
-int sql_find_dimension_uuid(RRDSET *st, RRDDIM *rd, uuid_t *store_uuid)
-{
- static __thread sqlite3_stmt *res = NULL;
- int rc;
- int status = 1;
-
- if (unlikely(!db_meta) && default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE)
- return 1;
-
- if (unlikely(!res)) {
- rc = prepare_statement(db_meta, SQL_FIND_DIMENSION_UUID, &res);
- if (rc != SQLITE_OK) {
- error_report("Failed to bind prepare statement to lookup dimension UUID in the database");
- return 1;
- }
- }
-
- rc = sqlite3_bind_blob(res, 1, st->chart_uuid, sizeof(*st->chart_uuid), SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = sqlite3_bind_text(res, 2, rrddim_id(rd), -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = sqlite3_bind_text(res, 3, rrddim_name(rd), -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = sqlite3_step_monitored(res);
- if (likely(rc == SQLITE_ROW)) {
- uuid_copy(*store_uuid, *((uuid_t *) sqlite3_column_blob(res, 0)));
- status = 0;
- }
-
- rc = sqlite3_reset(res);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to reset statement find dimension uuid, rc = %d", rc);
- return status;
-
-bind_fail:
- error_report("Failed to bind input parameter to perform dimension UUID database lookup, rc = %d", rc);
- return 1;
-}
diff --git a/database/sqlite/sqlite_functions.h b/database/sqlite/sqlite_functions.h
index 5731d5c9e..40abd010d 100644
--- a/database/sqlite/sqlite_functions.h
+++ b/database/sqlite/sqlite_functions.h
@@ -54,9 +54,7 @@ void sql_close_database(void);
int bind_text_null(sqlite3_stmt *res, int position, const char *text, bool can_be_null);
int prepare_statement(sqlite3 *database, const char *query, sqlite3_stmt **statement);
int execute_insert(sqlite3_stmt *res);
-int file_is_migrated(char *path);
int exec_statement_with_uuid(const char *sql, uuid_t *uuid);
-void add_migrated_file(char *path, uint64_t file_size);
void db_execute(const char *cmd);
// Look up functions
@@ -65,16 +63,11 @@ int get_host_id(uuid_t *node_id, uuid_t *host_id);
struct node_instance_list *get_node_list(void);
void sql_load_node_id(RRDHOST *host);
char *get_hostname_by_node_id(char *node_id);
-int sql_find_chart_uuid(RRDHOST *host, RRDSET *st, uuid_t *store_uuid);
-int sql_find_dimension_uuid(RRDSET *st, RRDDIM *rd, uuid_t *store_uuid);
// Help build archived hosts in memory when agent starts
void sql_build_host_system_info(uuid_t *host_id, struct rrdhost_system_info *system_info);
DICTIONARY *sql_load_host_labels(uuid_t *host_id);
-// For queries: To be removed when context queries are implemented
-void sql_rrdset2json(RRDHOST *host, BUFFER *wb);
-
// TODO: move to metadata
int update_node_id(uuid_t *host_id, uuid_t *node_id);
diff --git a/database/sqlite/sqlite_health.c b/database/sqlite/sqlite_health.c
index c189305b8..471fa3add 100644
--- a/database/sqlite/sqlite_health.c
+++ b/database/sqlite/sqlite_health.c
@@ -61,8 +61,12 @@ void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) {
rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
if (unlikely(rc != SQLITE_OK)) {
- error_report("HEALTH [%s]: Failed to prepare statement for SQL_UPDATE_HEALTH_LOG", rrdhost_hostname(host));
- return;
+ sql_create_health_log_table(host);
+ rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("HEALTH [%s]: Failed to prepare statement for SQL_INSERT_HEALTH_LOG", rrdhost_hostname(host));
+ return;
+ }
}
rc = sqlite3_bind_int64(res, 1, (sqlite3_int64) ae->updated_by_id);
@@ -103,8 +107,6 @@ void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) {
failed:
if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
error_report("HEALTH [%s]: Failed to finalize the prepared statement for updating health log.", rrdhost_hostname(host));
-
- return;
}
/* Health related SQL queries
@@ -134,8 +136,12 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) {
rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
if (unlikely(rc != SQLITE_OK)) {
- error_report("HEALTH [%s]: Failed to prepare statement for SQL_INSERT_HEALTH_LOG", rrdhost_hostname(host));
- return;
+ sql_create_health_log_table(host);
+ rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("HEALTH [%s]: Failed to prepare statement for SQL_INSERT_HEALTH_LOG", rrdhost_hostname(host));
+ return;
+ }
}
rc = sqlite3_bind_text(res, 1, rrdhost_hostname(host), -1, SQLITE_STATIC);
@@ -337,13 +343,11 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) {
}
ae->flags |= HEALTH_ENTRY_FLAG_SAVED;
- host->health_log_entries_written++;
+ host->health.health_log_entries_written++;
failed:
if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
error_report("HEALTH [%s]: Failed to finalize the prepared statement for inserting to health log.", rrdhost_hostname(host));
-
- return;
}
void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae)
@@ -369,7 +373,7 @@ void sql_health_alarm_log_cleanup(RRDHOST *host) {
if(rotate_every < 100) rotate_every = 100;
}
- if(likely(host->health_log_entries_written < rotate_every)) {
+ if(likely(host->health.health_log_entries_written < rotate_every)) {
return;
}
@@ -382,7 +386,7 @@ void sql_health_alarm_log_cleanup(RRDHOST *host) {
char uuid_str[GUID_LEN + 1];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
- snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG(uuid_str, uuid_str, (unsigned long int) (host->health_log_entries_written - rotate_every)));
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG(uuid_str, uuid_str, (unsigned long int) (host->health.health_log_entries_written - rotate_every)));
rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
if (unlikely(rc != SQLITE_OK)) {
@@ -398,7 +402,7 @@ void sql_health_alarm_log_cleanup(RRDHOST *host) {
if (unlikely(rc != SQLITE_OK))
error_report("Failed to finalize the prepared statement to cleanup health log table");
- host->health_log_entries_written = rotate_every;
+ host->health.health_log_entries_written = rotate_every;
sql_aclk_alert_clean_dead_entries(host);
}
@@ -431,13 +435,13 @@ void sql_health_alarm_log_count(RRDHOST *host) {
rc = sqlite3_step_monitored(res);
if (likely(rc == SQLITE_ROW))
- host->health_log_entries_written = (size_t) sqlite3_column_int64(res, 0);
+ host->health.health_log_entries_written = (size_t) sqlite3_column_int64(res, 0);
rc = sqlite3_finalize(res);
if (unlikely(rc != SQLITE_OK))
error_report("Failed to finalize the prepared statement to count health log entries from db");
- info("HEALTH [%s]: Table health_log_%s, contains %lu entries.", rrdhost_hostname(host), uuid_str, (unsigned long int) host->health_log_entries_written);
+ info("HEALTH [%s]: Table health_log_%s, contains %lu entries.", rrdhost_hostname(host), uuid_str, (unsigned long int) host->health.health_log_entries_written);
}
#define SQL_INJECT_REMOVED(guid, guid2) "insert into health_log_%s (hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, " \
@@ -537,8 +541,6 @@ void sql_inject_removed_status(char *uuid_str, uint32_t alarm_id, uint32_t alarm
failed:
if (unlikely(sqlite3_finalize(res) != SQLITE_OK))
error_report("HEALTH [N/A]: Failed to finalize the prepared statement for injecting removed event.");
- return;
-
}
#define SQL_SELECT_MAX_UNIQUE_ID(guid) "SELECT MAX(unique_id) from health_log_%s", guid
@@ -612,7 +614,7 @@ void sql_health_alarm_log_load(RRDHOST *host) {
ssize_t errored = 0, loaded = 0;
char command[MAX_HEALTH_SQL_SIZE + 1];
- host->health_log_entries_written = 0;
+ host->health.health_log_entries_written = 0;
if (unlikely(!db_meta)) {
if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
diff --git a/database/sqlite/sqlite_metadata.c b/database/sqlite/sqlite_metadata.c
index 4eb212152..35f928ffa 100644
--- a/database/sqlite/sqlite_metadata.c
+++ b/database/sqlite/sqlite_metadata.c
@@ -4,9 +4,9 @@
// SQL statements
-#define SQL_STORE_CLAIM_ID "insert into node_instance " \
- "(host_id, claim_id, date_created) values (@host_id, @claim_id, unixepoch()) " \
- "on conflict(host_id) do update set claim_id = excluded.claim_id;"
+#define SQL_STORE_CLAIM_ID "INSERT INTO node_instance " \
+ "(host_id, claim_id, date_created) VALUES (@host_id, @claim_id, unixepoch()) " \
+ "ON CONFLICT(host_id) DO UPDATE SET claim_id = excluded.claim_id;"
#define SQL_DELETE_HOST_LABELS "DELETE FROM host_label WHERE host_id = @uuid;"
@@ -56,24 +56,13 @@
#define MAX_METADATA_CLEANUP (500) // Maximum metadata write operations (e.g deletes before retrying)
#define METADATA_MAX_BATCH_SIZE (512) // Maximum commands to execute before running the event loop
-#define METADATA_MAX_TRANSACTION_BATCH (128) // Maximum commands to add in a transaction
enum metadata_opcode {
METADATA_DATABASE_NOOP = 0,
METADATA_DATABASE_TIMER,
- METADATA_ADD_CHART,
- METADATA_ADD_CHART_LABEL,
- METADATA_ADD_DIMENSION,
METADATA_DEL_DIMENSION,
- METADATA_ADD_DIMENSION_OPTION,
- METADATA_ADD_HOST_SYSTEM_INFO,
- METADATA_ADD_HOST_INFO,
METADATA_STORE_CLAIM_ID,
- METADATA_STORE_HOST_LABELS,
- METADATA_STORE_BUFFER,
-
- METADATA_SKIP_TRANSACTION, // Dummy -- OPCODES less than this one can be in a tranasction
-
+ METADATA_ADD_HOST_INFO,
METADATA_SCAN_HOSTS,
METADATA_MAINTENANCE,
METADATA_SYNC_SHUTDOWN,
@@ -105,14 +94,14 @@ typedef enum {
struct metadata_wc {
uv_thread_t thread;
+ uv_loop_t *loop;
+ uv_async_t async;
+ uv_timer_t timer_req;
time_t check_metadata_after;
time_t check_hosts_after;
volatile unsigned queue_size;
- uv_loop_t *loop;
- uv_async_t async;
METADATA_FLAG flags;
uint64_t row_id;
- uv_timer_t timer_req;
struct completion init_complete;
/* FIFO command queue */
uv_mutex_t cmd_mutex;
@@ -339,7 +328,7 @@ static int sql_store_host_info(RRDHOST *host)
if (unlikely(rc != SQLITE_OK))
goto bind_fail;
- rc = sqlite3_bind_int(res, ++param, (int ) host->health_enabled);
+ rc = sqlite3_bind_int(res, ++param, (int ) host->health.health_enabled);
if (unlikely(rc != SQLITE_OK))
goto bind_fail;
@@ -383,7 +372,7 @@ static BUFFER *sql_store_host_system_info(RRDHOST *host)
if (unlikely(!system_info))
return NULL;
- BUFFER *work_buffer = buffer_create(1024);
+ BUFFER *work_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
struct query_build key_data = {.sql = work_buffer, .count = 0};
uuid_unparse_lower(host->host_uuid, key_data.uuid_str);
@@ -417,49 +406,6 @@ static BUFFER *sql_store_host_system_info(RRDHOST *host)
}
-/*
- * Store set option for a dimension
- */
-static int sql_set_dimension_option(uuid_t *dim_uuid, char *option)
-{
- sqlite3_stmt *res = NULL;
- int rc;
-
- if (unlikely(!db_meta)) {
- if (default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE)
- return 0;
- error_report("Database has not been initialized");
- return 1;
- }
-
- rc = sqlite3_prepare_v2(db_meta, "UPDATE dimension SET options = @options WHERE dim_id = @dim_id", -1, &res, 0);
- if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to update dimension options");
- return 0;
- };
-
- rc = sqlite3_bind_blob(res, 2, dim_uuid, sizeof(*dim_uuid), SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- if (!option || !strcmp(option,"unhide"))
- rc = sqlite3_bind_null(res, 1);
- else
- rc = sqlite3_bind_text(res, 1, option, -1, SQLITE_STATIC);
- if (unlikely(rc != SQLITE_OK))
- goto bind_fail;
-
- rc = execute_insert(res);
- if (unlikely(rc != SQLITE_DONE))
- error_report("Failed to update dimension option, rc = %d", rc);
-
-bind_fail:
- rc = sqlite3_finalize(res);
- if (unlikely(rc != SQLITE_OK))
- error_report("Failed to finalize statement in update dimension options, rc = %d", rc);
- return 0;
-}
-
/*
* Store a chart in the database
*/
@@ -665,22 +611,26 @@ bind_fail:
return 1;
}
-static bool dimension_can_be_deleted(uuid_t *dim_uuid)
+static bool dimension_can_be_deleted(uuid_t *dim_uuid __maybe_unused)
{
#ifdef ENABLE_DBENGINE
- bool no_retention = true;
- for (size_t tier = 0; tier < storage_tiers; tier++) {
- if (!multidb_ctx[tier])
- continue;
- time_t first_time_t = 0, last_time_t = 0;
- if (rrdeng_metric_retention_by_uuid((void *) multidb_ctx[tier], dim_uuid, &first_time_t, &last_time_t) == 0) {
- if (first_time_t > 0) {
- no_retention = false;
- break;
+ if(dbengine_enabled) {
+ bool no_retention = true;
+ for (size_t tier = 0; tier < storage_tiers; tier++) {
+ if (!multidb_ctx[tier])
+ continue;
+ time_t first_time_t = 0, last_time_t = 0;
+ if (rrdeng_metric_retention_by_uuid((void *) multidb_ctx[tier], dim_uuid, &first_time_t, &last_time_t)) {
+ if (first_time_t > 0) {
+ no_retention = false;
+ break;
+ }
}
}
+ return no_retention;
}
- return no_retention;
+ else
+ return false;
#else
return false;
#endif
@@ -736,6 +686,16 @@ skip_run:
error_report("Failed to finalize the prepared statement when reading dimensions");
}
+static void cleanup_health_log(void)
+{
+ RRDHOST *host;
+ dfe_start_reentrant(rrdhost_root_index, host) {
+ if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED))
+ continue;
+ sql_health_alarm_log_cleanup(host);
+ }
+ dfe_done(host);
+}
//
// EVENT LOOP STARTS HERE
@@ -817,7 +777,7 @@ static void metadata_enq_cmd(struct metadata_wc *wc, struct metadata_cmd *cmd)
(void) uv_async_send(&wc->async);
}
-static struct metadata_cmd metadata_deq_cmd(struct metadata_wc *wc, enum metadata_opcode *next_opcode)
+static struct metadata_cmd metadata_deq_cmd(struct metadata_wc *wc)
{
struct metadata_cmd ret;
unsigned queue_size;
@@ -828,7 +788,6 @@ static struct metadata_cmd metadata_deq_cmd(struct metadata_wc *wc, enum metadat
memset(&ret, 0, sizeof(ret));
ret.opcode = METADATA_DATABASE_NOOP;
ret.completion = NULL;
- *next_opcode = METADATA_DATABASE_NOOP;
} else {
/* dequeue command */
ret = wc->cmd_queue.cmd_array[wc->cmd_queue.head];
@@ -840,10 +799,6 @@ static struct metadata_cmd metadata_deq_cmd(struct metadata_wc *wc, enum metadat
wc->cmd_queue.head + 1 : 0;
}
wc->queue_size = queue_size - 1;
- if (wc->queue_size > 0)
- *next_opcode = wc->cmd_queue.cmd_array[wc->cmd_queue.head].opcode;
- else
- *next_opcode = METADATA_DATABASE_NOOP;
/* wake up producers */
uv_cond_signal(&wc->cmd_cond);
}
@@ -892,10 +847,16 @@ static void after_metadata_cleanup(uv_work_t *req, int status)
struct metadata_wc *wc = req->data;
metadata_flag_clear(wc, METADATA_FLAG_CLEANUP);
}
+
static void start_metadata_cleanup(uv_work_t *req)
{
+ register_libuv_worker_jobs();
+
+ worker_is_busy(UV_EVENT_METADATA_CLEANUP);
struct metadata_wc *wc = req->data;
check_dimension_metadata(wc);
+ cleanup_health_log();
+ worker_is_idle();
}
struct scan_metadata_payload {
@@ -920,13 +881,13 @@ static void after_metadata_hosts(uv_work_t *req, int status __maybe_unused)
freez(data);
}
-static bool metadata_scan_host(RRDHOST *host, uint32_t max_count) {
+static bool metadata_scan_host(RRDHOST *host, uint32_t max_count, size_t *query_counter) {
RRDSET *st;
int rc;
bool more_to_do = false;
uint32_t scan_count = 1;
- BUFFER *work_buffer = buffer_create(1024);
+ BUFFER *work_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
rrdset_foreach_reentrant(st, host) {
if (scan_count == max_count) {
@@ -934,6 +895,8 @@ static bool metadata_scan_host(RRDHOST *host, uint32_t max_count) {
break;
}
if(rrdset_flag_check(st, RRDSET_FLAG_METADATA_UPDATE)) {
+ (*query_counter)++;
+
rrdset_flag_clear(st, RRDSET_FLAG_METADATA_UPDATE);
scan_count++;
@@ -963,8 +926,15 @@ static bool metadata_scan_host(RRDHOST *host, uint32_t max_count) {
RRDDIM *rd;
rrddim_foreach_read(rd, st) {
if(rrddim_flag_check(rd, RRDDIM_FLAG_METADATA_UPDATE)) {
+ (*query_counter)++;
+
rrddim_flag_clear(rd, RRDDIM_FLAG_METADATA_UPDATE);
+ if (rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN))
+ rrddim_flag_set(rd, RRDDIM_FLAG_META_HIDDEN);
+ else
+ rrddim_flag_clear(rd, RRDDIM_FLAG_META_HIDDEN);
+
rc = sql_store_dimension(
&rd->metric_uuid,
&rd->rrdset->chart_uuid,
@@ -990,52 +960,119 @@ static bool metadata_scan_host(RRDHOST *host, uint32_t max_count) {
// Worker thread to scan hosts for pending metadata to store
static void start_metadata_hosts(uv_work_t *req __maybe_unused)
{
+ register_libuv_worker_jobs();
+
RRDHOST *host;
struct scan_metadata_payload *data = req->data;
struct metadata_wc *wc = data->wc;
+ usec_t all_started_ut = now_monotonic_usec(); (void)all_started_ut;
+ internal_error(true, "METADATA: checking all hosts...");
+
bool run_again = false;
+ worker_is_busy(UV_EVENT_METADATA_STORE);
+
+ if (!data->max_count)
+ db_execute("BEGIN TRANSACTION;");
dfe_start_reentrant(rrdhost_root_index, host) {
if (rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED) || !rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_UPDATE))
continue;
- internal_error(true, "METADATA: Scanning host %s", rrdhost_hostname(host));
+
+ size_t query_counter = 0; (void)query_counter;
+ usec_t started_ut = now_monotonic_usec(); (void)started_ut;
+
rrdhost_flag_clear(host,RRDHOST_FLAG_METADATA_UPDATE);
- if (unlikely(metadata_scan_host(host, data->max_count))) {
+
+ if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_LABELS))) {
+ rrdhost_flag_clear(host, RRDHOST_FLAG_METADATA_LABELS);
+ int rc = exec_statement_with_uuid(SQL_DELETE_HOST_LABELS, &host->host_uuid);
+ if (likely(rc == SQLITE_OK)) {
+ BUFFER *work_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
+ struct query_build tmp = {.sql = work_buffer, .count = 0};
+ uuid_unparse_lower(host->host_uuid, tmp.uuid_str);
+ rrdlabels_walkthrough_read(host->rrdlabels, host_label_store_to_sql_callback, &tmp);
+ db_execute(buffer_tostring(work_buffer));
+ buffer_free(work_buffer);
+ query_counter++;
+ }
+ }
+
+ if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_CLAIMID))) {
+ rrdhost_flag_clear(host, RRDHOST_FLAG_METADATA_CLAIMID);
+ uuid_t uuid;
+
+ if (likely(host->aclk_state.claimed_id && !uuid_parse(host->aclk_state.claimed_id, uuid)))
+ store_claim_id(&host->host_uuid, &uuid);
+ else
+ store_claim_id(&host->host_uuid, NULL);
+
+ query_counter++;
+ }
+
+ if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_METADATA_INFO))) {
+ rrdhost_flag_clear(host, RRDHOST_FLAG_METADATA_INFO);
+
+ BUFFER *work_buffer = sql_store_host_system_info(host);
+ if(work_buffer) {
+ db_execute(buffer_tostring(work_buffer));
+ buffer_free(work_buffer);
+ query_counter++;
+ }
+
+ int rc = sql_store_host_info(host);
+ if (unlikely(rc))
+ error_report("METADATA: 'host:%s': failed to store host info", string2str(host->hostname));
+ else
+ query_counter++;
+ }
+
+ if (data->max_count)
+ db_execute("BEGIN TRANSACTION;");
+ if (unlikely(metadata_scan_host(host, data->max_count, &query_counter))) {
run_again = true;
rrdhost_flag_set(host,RRDHOST_FLAG_METADATA_UPDATE);
- internal_error(true,"METADATA: Rescheduling host %s to run; more charts to store", rrdhost_hostname(host));
+ internal_error(true,"METADATA: 'host:%s': scheduling another run, more charts to store", rrdhost_hostname(host));
}
+ if (data->max_count)
+ db_execute("COMMIT TRANSACTION;");
+
+ usec_t ended_ut = now_monotonic_usec(); (void)ended_ut;
+ internal_error(true, "METADATA: 'host:%s': saved metadata with %zu SQL statements, in %0.2f ms",
+ rrdhost_hostname(host), query_counter,
+ (double)(ended_ut - started_ut) / USEC_PER_MS);
}
dfe_done(host);
+ if (!data->max_count)
+ db_execute("COMMIT TRANSACTION;");
+
+ usec_t all_ended_ut = now_monotonic_usec(); (void)all_ended_ut;
+ internal_error(true, "METADATA: checking all hosts completed in %0.2f ms",
+ (double)(all_ended_ut - all_started_ut) / USEC_PER_MS);
+
if (unlikely(run_again))
wc->check_hosts_after = now_realtime_sec() + METADATA_HOST_CHECK_IMMEDIATE;
else
wc->check_hosts_after = now_realtime_sec() + METADATA_HOST_CHECK_INTERVAL;
+ worker_is_idle();
}
static void metadata_event_loop(void *arg)
{
+ service_register(SERVICE_THREAD_TYPE_EVENT_LOOP, NULL, NULL, NULL, true);
worker_register("METASYNC");
worker_register_job_name(METADATA_DATABASE_NOOP, "noop");
worker_register_job_name(METADATA_DATABASE_TIMER, "timer");
- worker_register_job_name(METADATA_ADD_CHART, "add chart");
- worker_register_job_name(METADATA_ADD_CHART_LABEL, "add chart label");
- worker_register_job_name(METADATA_ADD_DIMENSION, "add dimension");
worker_register_job_name(METADATA_DEL_DIMENSION, "delete dimension");
- worker_register_job_name(METADATA_ADD_DIMENSION_OPTION, "dimension option");
- worker_register_job_name(METADATA_ADD_HOST_SYSTEM_INFO, "host system info");
- worker_register_job_name(METADATA_ADD_HOST_INFO, "host info");
worker_register_job_name(METADATA_STORE_CLAIM_ID, "add claim id");
- worker_register_job_name(METADATA_STORE_HOST_LABELS, "host labels");
+ worker_register_job_name(METADATA_ADD_HOST_INFO, "add host info");
worker_register_job_name(METADATA_MAINTENANCE, "maintenance");
-
int ret;
uv_loop_t *loop;
unsigned cmd_batch_size;
struct metadata_wc *wc = arg;
- enum metadata_opcode opcode, next_opcode;
+ enum metadata_opcode opcode;
uv_work_t metadata_cleanup_worker;
uv_thread_set_name_np(wc->thread, "METASYNC");
@@ -1073,20 +1110,12 @@ static void metadata_event_loop(void *arg)
wc->check_hosts_after = now_realtime_sec() + METADATA_HOST_CHECK_FIRST_CHECK;
int shutdown = 0;
- int in_transaction = 0;
- int commands_in_transaction = 0;
- // This can be used in the event loop for all opcodes (not workers)
- BUFFER *work_buffer = buffer_create(1024);
wc->row_id = 0;
completion_mark_complete(&wc->init_complete);
while (shutdown == 0 || (wc->flags & METADATA_WORKER_BUSY)) {
- RRDDIM *rd = NULL;
- RRDSET *st = NULL;
- RRDHOST *host = NULL;
- DICTIONARY_ITEM *dict_item = NULL;
- BUFFER *buffer = NULL;
uuid_t *uuid;
+ RRDHOST *host = NULL;
int rc;
worker_is_idle();
@@ -1098,7 +1127,7 @@ static void metadata_event_loop(void *arg)
if (unlikely(cmd_batch_size >= METADATA_MAX_BATCH_SIZE))
break;
- cmd = metadata_deq_cmd(wc, &next_opcode);
+ cmd = metadata_deq_cmd(wc);
opcode = cmd.opcode;
if (unlikely(opcode == METADATA_DATABASE_NOOP && metadata_flag_check(wc, METADATA_FLAG_SHUTDOWN))) {
@@ -1108,130 +1137,38 @@ static void metadata_event_loop(void *arg)
++cmd_batch_size;
- // If we are not in transaction and this command is the same with the next ; start a transaction
- if (!in_transaction && opcode < METADATA_SKIP_TRANSACTION && opcode == next_opcode) {
- if (opcode != METADATA_DATABASE_NOOP) {
- in_transaction = 1;
- db_execute("BEGIN TRANSACTION;");
- }
- }
-
- if (likely(in_transaction)) {
- commands_in_transaction++;
- }
-
if (likely(opcode != METADATA_DATABASE_NOOP))
- worker_is_busy(opcode);
+ worker_is_busy(opcode);
switch (opcode) {
case METADATA_DATABASE_NOOP:
case METADATA_DATABASE_TIMER:
break;
- case METADATA_ADD_CHART:
- dict_item = (DICTIONARY_ITEM * ) cmd.param[0];
- st = (RRDSET *) dictionary_acquired_item_value(dict_item);
-
- rc = sql_store_chart(
- &st->chart_uuid,
- &st->rrdhost->host_uuid,
- string2str(st->parts.type),
- string2str(st->parts.id),
- string2str(st->parts.name),
- rrdset_family(st),
- rrdset_context(st),
- rrdset_title(st),
- rrdset_units(st),
- rrdset_plugin_name(st),
- rrdset_module_name(st),
- st->priority,
- st->update_every,
- st->chart_type,
- st->rrd_memory_mode,
- st->entries);
-
- if (unlikely(rc))
- error_report("Failed to store chart %s", rrdset_id(st));
-
- dictionary_acquired_item_release(st->rrdhost->rrdset_root_index, dict_item);
- break;
- case METADATA_ADD_CHART_LABEL:
- dict_item = (DICTIONARY_ITEM * ) cmd.param[0];
- st = (RRDSET *) dictionary_acquired_item_value(dict_item);
- check_and_update_chart_labels(st, work_buffer);
- dictionary_acquired_item_release(st->rrdhost->rrdset_root_index, dict_item);
- break;
- case METADATA_ADD_DIMENSION:
- dict_item = (DICTIONARY_ITEM * ) cmd.param[0];
- rd = (RRDDIM *) dictionary_acquired_item_value(dict_item);
-
- rc = sql_store_dimension(
- &rd->metric_uuid,
- &rd->rrdset->chart_uuid,
- string2str(rd->id),
- string2str(rd->name),
- rd->multiplier,
- rd->divisor,
- rd->algorithm,
- rrddim_option_check(rd, RRDDIM_OPTION_HIDDEN));
- if (unlikely(rc))
- error_report("Failed to store dimension %s", rrddim_id(rd));
-
- dictionary_acquired_item_release(rd->rrdset->rrddim_root_index, dict_item);
- break;
case METADATA_DEL_DIMENSION:
uuid = (uuid_t *) cmd.param[0];
if (likely(dimension_can_be_deleted(uuid)))
delete_dimension_uuid(uuid);
freez(uuid);
break;
- case METADATA_ADD_DIMENSION_OPTION:
- dict_item = (DICTIONARY_ITEM * ) cmd.param[0];
- rd = (RRDDIM *) dictionary_acquired_item_value(dict_item);
- rc = sql_set_dimension_option(
- &rd->metric_uuid, rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN) ? "hidden" : NULL);
- if (unlikely(rc))
- error_report("Failed to store dimension option for %s", string2str(rd->id));
- dictionary_acquired_item_release(rd->rrdset->rrddim_root_index, dict_item);
- break;
- case METADATA_ADD_HOST_SYSTEM_INFO:
- buffer = (BUFFER *) cmd.param[0];
- db_execute(buffer_tostring(buffer));
- buffer_free(buffer);
- break;
- case METADATA_ADD_HOST_INFO:
- dict_item = (DICTIONARY_ITEM * ) cmd.param[0];
- host = (RRDHOST *) dictionary_acquired_item_value(dict_item);
- rc = sql_store_host_info(host);
- if (unlikely(rc))
- error_report("Failed to store host info in the database for %s", string2str(host->hostname));
- dictionary_acquired_item_release(rrdhost_root_index, dict_item);
- break;
case METADATA_STORE_CLAIM_ID:
store_claim_id((uuid_t *) cmd.param[0], (uuid_t *) cmd.param[1]);
freez((void *) cmd.param[0]);
freez((void *) cmd.param[1]);
break;
- case METADATA_STORE_HOST_LABELS:
- dict_item = (DICTIONARY_ITEM * ) cmd.param[0];
- host = (RRDHOST *) dictionary_acquired_item_value(dict_item);
- rc = exec_statement_with_uuid(SQL_DELETE_HOST_LABELS, &host->host_uuid);
-
- if (likely(rc == SQLITE_OK)) {
- buffer_flush(work_buffer);
- struct query_build tmp = {.sql = work_buffer, .count = 0};
- uuid_unparse_lower(host->host_uuid, tmp.uuid_str);
- rrdlabels_walkthrough_read(host->rrdlabels, host_label_store_to_sql_callback, &tmp);
- db_execute(buffer_tostring(work_buffer));
- }
-
- dictionary_acquired_item_release(rrdhost_root_index, dict_item);
+ case METADATA_ADD_HOST_INFO:
+ host = (RRDHOST *) cmd.param[0];
+ rc = sql_store_host_info(host);
+ if (unlikely(rc))
+ error_report("Failed to store host info in the database for %s", string2str(host->hostname));
break;
-
case METADATA_SCAN_HOSTS:
if (unlikely(metadata_flag_check(wc, METADATA_FLAG_SCANNING_HOSTS)))
break;
+ if (unittest_running)
+ break;
+
struct scan_metadata_payload *data = mallocz(sizeof(*data));
data->request.data = data;
data->wc = wc;
@@ -1242,7 +1179,7 @@ static void metadata_event_loop(void *arg)
cmd.completion = NULL; // Do not complete after launching worker (worker will do)
}
else
- data->max_count = 1000;
+ data->max_count = 5000;
metadata_flag_set(wc, METADATA_FLAG_SCANNING_HOSTS);
if (unlikely(
@@ -1255,11 +1192,6 @@ static void metadata_event_loop(void *arg)
metadata_flag_clear(wc, METADATA_FLAG_SCANNING_HOSTS);
}
break;
- case METADATA_STORE_BUFFER:
- buffer = (BUFFER *) cmd.param[0];
- db_execute(buffer_tostring(buffer));
- buffer_free(buffer);
- break;
case METADATA_MAINTENANCE:
if (unlikely(metadata_flag_check(wc, METADATA_FLAG_CLEANUP)))
break;
@@ -1279,11 +1211,6 @@ static void metadata_event_loop(void *arg)
default:
break;
}
- if (in_transaction && (commands_in_transaction >= METADATA_MAX_TRANSACTION_BATCH || opcode != next_opcode)) {
- in_transaction = 0;
- db_execute("COMMIT TRANSACTION;");
- commands_in_transaction = 0;
- }
if (cmd.completion)
completion_mark_complete(cmd.completion);
@@ -1302,8 +1229,6 @@ static void metadata_event_loop(void *arg)
uv_run(loop, UV_RUN_DEFAULT);
uv_cond_destroy(&wc->cmd_cond);
- /* uv_mutex_destroy(&wc->cmd_mutex); */
- //fatal_assert(0 == uv_loop_close(loop));
int rc;
do {
@@ -1313,7 +1238,6 @@ static void metadata_event_loop(void *arg)
freez(loop);
worker_unregister();
- buffer_free(work_buffer);
info("METADATA: Shutting down event loop");
completion_mark_complete(&wc->init_complete);
return;
@@ -1408,50 +1332,6 @@ static inline void queue_metadata_cmd(enum metadata_opcode opcode, const void *p
}
// Public
-void metaqueue_chart_update(RRDSET *st)
-{
- const DICTIONARY_ITEM *acquired_st = dictionary_get_and_acquire_item(st->rrdhost->rrdset_root_index, string2str(st->id));
- queue_metadata_cmd(METADATA_ADD_CHART, acquired_st, NULL);
-}
-
-//
-// RD may not be collected, so we may store it needlessly
-void metaqueue_dimension_update(RRDDIM *rd)
-{
- const DICTIONARY_ITEM *acquired_rd =
- dictionary_get_and_acquire_item(rd->rrdset->rrddim_root_index, string2str(rd->id));
-
- if (unlikely(rrdset_flag_check(rd->rrdset, RRDSET_FLAG_METADATA_UPDATE))) {
- metaqueue_chart_update(rd->rrdset);
- rrdset_flag_clear(rd->rrdset, RRDSET_FLAG_METADATA_UPDATE);
- }
-
- queue_metadata_cmd(METADATA_ADD_DIMENSION, acquired_rd, NULL);
-}
-
-void metaqueue_dimension_update_flags(RRDDIM *rd)
-{
- const DICTIONARY_ITEM *acquired_rd =
- dictionary_get_and_acquire_item(rd->rrdset->rrddim_root_index, string2str(rd->id));
- queue_metadata_cmd(METADATA_ADD_DIMENSION_OPTION, acquired_rd, NULL);
-}
-
-void metaqueue_host_update_system_info(RRDHOST *host)
-{
- BUFFER *work_buffer = sql_store_host_system_info(host);
-
- if (unlikely(!work_buffer))
- return;
-
- queue_metadata_cmd(METADATA_ADD_HOST_SYSTEM_INFO, work_buffer, NULL);
-}
-
-void metaqueue_host_update_info(const char *machine_guid)
-{
- const DICTIONARY_ITEM *acquired_host = dictionary_get_and_acquire_item(rrdhost_root_index, machine_guid);
- queue_metadata_cmd(METADATA_ADD_HOST_INFO, acquired_host, NULL);
-}
-
void metaqueue_delete_dimension_uuid(uuid_t *uuid)
{
if (unlikely(!metasync_worker.loop))
@@ -1477,24 +1357,13 @@ void metaqueue_store_claim_id(uuid_t *host_uuid, uuid_t *claim_uuid)
queue_metadata_cmd(METADATA_STORE_CLAIM_ID, local_host_uuid, local_claim_uuid);
}
-void metaqueue_store_host_labels(const char *machine_guid)
+void metaqueue_host_update_info(RRDHOST *host)
{
- const DICTIONARY_ITEM *acquired_host = dictionary_get_and_acquire_item(rrdhost_root_index, machine_guid);
- queue_metadata_cmd(METADATA_STORE_HOST_LABELS, acquired_host, NULL);
-}
-
-void metaqueue_buffer(BUFFER *buffer)
-{
- queue_metadata_cmd(METADATA_STORE_BUFFER, buffer, NULL);
-}
-
-void metaqueue_chart_labels(RRDSET *st)
-{
- const DICTIONARY_ITEM *acquired_st = dictionary_get_and_acquire_item(st->rrdhost->rrdset_root_index, string2str(st->id));
- queue_metadata_cmd(METADATA_ADD_CHART_LABEL, acquired_st, NULL);
+ if (unlikely(!metasync_worker.loop))
+ return;
+ queue_metadata_cmd(METADATA_ADD_HOST_INFO, host, NULL);
}
-
//
// unitests
//
@@ -1542,7 +1411,7 @@ static void *metadata_unittest_threads(void)
tu.join = 0;
for (int i = 0; i < threads_to_create; i++) {
char buf[100 + 1];
- snprintf(buf, 100, "meta%d", i);
+ snprintf(buf, 100, "META[%d]", i);
netdata_thread_create(
&threads[i],
buf,
@@ -1558,7 +1427,6 @@ static void *metadata_unittest_threads(void)
void *retval;
netdata_thread_join(threads[i], &retval);
}
-// uv_async_send(&metasync_worker.async);
sleep_usec(5 * USEC_PER_SEC);
fprintf(stderr, "Added %u elements, processed %u\n", tu.added, tu.processed);
diff --git a/database/sqlite/sqlite_metadata.h b/database/sqlite/sqlite_metadata.h
index 9293facf8..d578b7a8f 100644
--- a/database/sqlite/sqlite_metadata.h
+++ b/database/sqlite/sqlite_metadata.h
@@ -11,17 +11,10 @@ void metadata_sync_init(void);
void metadata_sync_shutdown(void);
void metadata_sync_shutdown_prepare(void);
-void metaqueue_dimension_update(RRDDIM *rd);
-void metaqueue_chart_update(RRDSET *st);
-void metaqueue_dimension_update_flags(RRDDIM *rd);
-void metaqueue_host_update_system_info(RRDHOST *host);
-void metaqueue_host_update_info(const char *machine_guid);
void metaqueue_delete_dimension_uuid(uuid_t *uuid);
void metaqueue_store_claim_id(uuid_t *host_uuid, uuid_t *claim_uuid);
-void metaqueue_store_host_labels(const char *machine_guid);
-void metaqueue_chart_labels(RRDSET *st);
+void metaqueue_host_update_info(RRDHOST *host);
void migrate_localhost(uuid_t *host_uuid);
-void metaqueue_buffer(BUFFER *buffer);
// UNIT TEST
int metadata_unittest(void);
diff --git a/database/storage_engine.c b/database/storage_engine.c
index edf017db4..c5ba86552 100644
--- a/database/storage_engine.c
+++ b/database/storage_engine.c
@@ -6,23 +6,24 @@
#include "engine/rrdengineapi.h"
#endif
-#define im_collect_ops { \
- .init = rrddim_collect_init,\
- .store_metric = rrddim_collect_store_metric,\
- .flush = rrddim_store_metric_flush,\
- .finalize = rrddim_collect_finalize, \
+#define im_collect_ops { \
+ .init = rrddim_collect_init, \
+ .store_metric = rrddim_collect_store_metric, \
+ .flush = rrddim_store_metric_flush, \
+ .finalize = rrddim_collect_finalize, \
.change_collection_frequency = rrddim_store_metric_change_collection_frequency, \
- .metrics_group_get = rrddim_metrics_group_get, \
- .metrics_group_release = rrddim_metrics_group_release, \
+ .metrics_group_get = rrddim_metrics_group_get, \
+ .metrics_group_release = rrddim_metrics_group_release, \
}
-#define im_query_ops { \
- .init = rrddim_query_init, \
- .next_metric = rrddim_query_next_metric, \
- .is_finished = rrddim_query_is_finished, \
- .finalize = rrddim_query_finalize, \
- .latest_time = rrddim_query_latest_time, \
- .oldest_time = rrddim_query_oldest_time \
+#define im_query_ops { \
+ .init = rrddim_query_init, \
+ .next_metric = rrddim_query_next_metric, \
+ .is_finished = rrddim_query_is_finished, \
+ .finalize = rrddim_query_finalize, \
+ .latest_time_s = rrddim_query_latest_time_s, \
+ .oldest_time_s = rrddim_query_oldest_time_s, \
+ .align_to_optimal_before = rrddim_query_align_to_optimal_before, \
}
static STORAGE_ENGINE engines[] = {
@@ -34,8 +35,9 @@ static STORAGE_ENGINE engines[] = {
.metric_get_or_create = rrddim_metric_get_or_create,
.metric_dup = rrddim_metric_dup,
.metric_release = rrddim_metric_release,
+ .metric_retention_by_uuid = rrddim_metric_retention_by_uuid,
.collect_ops = im_collect_ops,
- .query_ops = im_query_ops
+ .query_ops = im_query_ops,
}
},
{
@@ -46,8 +48,9 @@ static STORAGE_ENGINE engines[] = {
.metric_get_or_create = rrddim_metric_get_or_create,
.metric_dup = rrddim_metric_dup,
.metric_release = rrddim_metric_release,
+ .metric_retention_by_uuid = rrddim_metric_retention_by_uuid,
.collect_ops = im_collect_ops,
- .query_ops = im_query_ops
+ .query_ops = im_query_ops,
}
},
{
@@ -58,8 +61,9 @@ static STORAGE_ENGINE engines[] = {
.metric_get_or_create = rrddim_metric_get_or_create,
.metric_dup = rrddim_metric_dup,
.metric_release = rrddim_metric_release,
+ .metric_retention_by_uuid = rrddim_metric_retention_by_uuid,
.collect_ops = im_collect_ops,
- .query_ops = im_query_ops
+ .query_ops = im_query_ops,
}
},
{
@@ -70,8 +74,9 @@ static STORAGE_ENGINE engines[] = {
.metric_get_or_create = rrddim_metric_get_or_create,
.metric_dup = rrddim_metric_dup,
.metric_release = rrddim_metric_release,
+ .metric_retention_by_uuid = rrddim_metric_retention_by_uuid,
.collect_ops = im_collect_ops,
- .query_ops = im_query_ops
+ .query_ops = im_query_ops,
}
},
{
@@ -82,8 +87,9 @@ static STORAGE_ENGINE engines[] = {
.metric_get_or_create = rrddim_metric_get_or_create,
.metric_dup = rrddim_metric_dup,
.metric_release = rrddim_metric_release,
+ .metric_retention_by_uuid = rrddim_metric_retention_by_uuid,
.collect_ops = im_collect_ops,
- .query_ops = im_query_ops
+ .query_ops = im_query_ops,
}
},
#ifdef ENABLE_DBENGINE
@@ -95,6 +101,7 @@ static STORAGE_ENGINE engines[] = {
.metric_get_or_create = rrdeng_metric_get_or_create,
.metric_dup = rrdeng_metric_dup,
.metric_release = rrdeng_metric_release,
+ .metric_retention_by_uuid = rrdeng_metric_retention_by_uuid,
.collect_ops = {
.init = rrdeng_store_metric_init,
.store_metric = rrdeng_store_metric_next,
@@ -109,8 +116,9 @@ static STORAGE_ENGINE engines[] = {
.next_metric = rrdeng_load_metric_next,
.is_finished = rrdeng_load_metric_is_finished,
.finalize = rrdeng_load_metric_finalize,
- .latest_time = rrdeng_metric_latest_time,
- .oldest_time = rrdeng_metric_oldest_time
+ .latest_time_s = rrdeng_metric_latest_time,
+ .oldest_time_s = rrdeng_metric_oldest_time,
+ .align_to_optimal_before = rrdeng_load_align_to_optimal_before,
}
}
},
diff --git a/diagrams/Makefile.am b/diagrams/Makefile.am
index 475ca89f8..8844034d3 100644
--- a/diagrams/Makefile.am
+++ b/diagrams/Makefile.am
@@ -9,7 +9,6 @@ dist_noinst_DATA = \
netdata-proxies-example.xml \
netdata-overview.xml \
data_structures/netdata_config.svg \
- data_structures/README.md \
data_structures/registry.svg \
data_structures/rrd.svg \
data_structures/web.svg \
diff --git a/diagrams/data_structures/README.md b/diagrams/data_structures/README.md
deleted file mode 100644
index 12ea1afa2..000000000
--- a/diagrams/data_structures/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-
-
-# Data structures
-
-These are the main internal data structures of `netdata`. Created with `draw.io`.
-
-![Config](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/netdata_config.svg?sanitize=true)
-
-![Registry](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/registry.svg?sanitize=true)
-
-![RRD](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/rrd.svg?sanitize=true)
-
-![Web](https://raw.githubusercontent.com/netdata/netdata/master/diagrams/data_structures/web.svg?sanitize=true)
-
-
diff --git a/diagrams/netdata-overview.xml b/diagrams/netdata-overview.xml
index 4d9c3ba35..0db020f65 100644
--- a/diagrams/netdata-overview.xml
+++ b/diagrams/netdata-overview.xml
@@ -1 +1,751 @@
-7V1ZV+rKtv4t92GNsc9DHNVX6lEU7AU71PWyR5qKoPSh//W3KhAkSamgURH32uesBQHS1PxmU7P9g/ea44Oe06mdtX3Z+IOAP/6D9/8gBDFh6h99ZDI7wuMDD726P//S84Gr+lTOD4L50UHdl2Hii/12u9Gvd5IHvXarJb1+4pjT67VHya8F7Ubyqh3nQWYOXHlOI3v0tu73a7OjNgXPxw9l/aEWXxmC+Seu4z099NqD1vx6fxAOoj+zj5tOfK7598Oa47dHS4dw8Q/e67Xb/dmr5nhPNvTaxss2+13phU8X992Trb7hBzeh7JXdR71kCDQcV5Ftfpv6Z41662n2vtbv65Xe1T9EpYd6vzZwd7x2U71pyb7v9J3Eq35PSvVP0wn7sqdeeO1GQ12j3Qv1m5rT64c7/k6nMXiot/QPB/3ZBRe3jcTi2RfPFPYnMTn0inayjzZ/2qHs9eXYhAvHjc8Als5/INtN2e9N1Pfmv8LEnv1uDlfI7PmB0TP1IZwvU22J8nR+zJkD7mFx7udF1ysze6j47RIZ8iJLUO/JWruRIIvbaLtJsrSCHX+JJDNa7OjjKYJg8CpBFGw7+mW9GTFSIfp3N+zMeBGoI078JqiPpTprQVOprjjsVD9epR3W+/V2S33utvt99QTPX9ht1B/0B/12Jz6zepd8dK/RHvg7swUYqMVTT9BXsJivhROGsq+hhxhDGJDoFUW2IApmJewGyPUAs4h6ZUEouSUkdSwuAXEExLaH+U6n9aAuboDcDKkG0GXBtYQd+uXQOZf9Ubv39Aexhvpmwa8P1csH/fKmchUfVedb+sDw3WsldMOXvv1t2ISvYlORRd9frd/USwHVy54M69O5LNDY7LTrrX606rTwh+5rtA76CpGRFtI/cOYQbMig/yIyw47j1VsP1/rNvkXUkUChcK7LII7fz2/MIJnXwdI4iZnJn2VptwQ0BrJAI2BDZFQOqqPdka1QWwEJSKAtFFYMKZOpZEOOXIkjYWXPhJXtetRyXI6QQAGH2HtZWK0LL2GDJL44YFk9SHkWY5j/VIx1Jv1au7WMMa898Gq+m4QYXgtib+InerjCwljcazfavehUsbn4EsQyUM2i2YxCjTwr+ih8EYvPEMQQYAY5tDyGJAMQWQ7leK4tkQstH9tUQMgpC9w5AKOTF5YN4Fa7JfNBpg1TyKSQGpDJDNKPbA8ye+rG2wprPZkEJ9lG+Uexln/YdgQhy/JPUOlaCHk2dhm1OctR/hGQQhnDhn2AsA0owxuCMpOtNao/1Z8PWEG7Zx21r5MQoj9dvr0TZSDSsoAjbvveMsocEFgYIuQT4CDB3fxQBiElCZghEDsWlmBmG7YMNtsQlK0iy2rSaaiHUV9p9+uBorTGiCZBU4ahoqBb7/lJDLLfikE+wyDHFIFlSw9AYFElcDxOA1eKHDFIOUtAkMAsAqGgBkPPzgOCGcfV3HwcOo2BXEbjEv1HtXpfXqlNl34/6jmd5CYvVKrxSS4ReY/vw6KmVVBvNOLjc4vkoef4dbV6y1/fK6k/hl1cvBn01PcVsNX+2AlrGhbRZTuyV1cLIHtXs+3gHB9t/a6vFxfrtxEAo9/odwunn37z0FC4mL9WOKl78Zei54m9j3h+3YrTV9dqzTaUAOYDBp7UejTeZSacXxwatF58cA00bIYl1al1gk4zKX7sbbSiopc2YL6PHbms34DnW9KjQnIpsO/a+cmWWHMtTHVskC2IGOD0yc7UmRtL83fa5eXGB3YrZ4tjvcXBTqcx11/qlGfqlureslfMTZ9O22GGqyx5z8K+0w99vdK9dr/taYNtRZfcP9d7FfU7JbYQuNmv/O/D3rm1uWl27wteWmah19346kC9E8qZBIyYKQK1er+ubGeeLd0gJdvVcd+RduClhDgxCfHXNEAeNh5AKS4AOCtVETBsJSDNRcW+xQYZ0M4Qu0Qv1h204w+smXd0Vysd1hk/fxif5UpBxmlqBfg6D5jYZQm3+wp/rqMwgsClXOK6L8B4+PwECXfz65GQdZFrIxczZkAulbafdiIbkZsLOnFyB8JEVkQjYNiBiE3x85n2uWmKxmpfLe/Q6bXqYc3yHK+Wcp7A18MJ27vt0A+uTAOPKzHkLJsGmBMLkgBBaZMAwxwdLHCx010YBzTrYRbYALxNcbDkYHwGTr2BXCelO+F6UYwtwiGa4dDlyCfLOEQeslwGXY9Bm7uS5eqCgUkc4ji6v4xDw/6Xf4lyNpiA79fXxKSvg47SdAtIrq2y3zRT40NhR+P8vXfZiqPYapEUlLzJ0vVnZ/6W22q2W3XF3UlrZ5X7+Twpk6RnQq68Hrp6w4BZNkdQ1mxRcgR5nsnQ8ZnLKMuHXYlNdjDGRDCglBNNG9bZMDiEhjg4++Q4+JexrsJBvdOsbzz3RhZrp1ePzHntCR79x74vsW+apgkOfj2+9xM4GDK80Vy7TP3rtr5cq92vaTq9CpAldCwI/bVbVdzuRHGVKKqn7tgKZctXn1ittp/e6Lwe48vPOyMYx45hjyuh2uXyL9vjirRbm2cRRw2Ig+jHJkxl9xpyXE+5uSFfa5/xU/zcItpECBtzaS9vIlwvsAgnLvNtHrgezW8TwQDdSdkiLM4DWE7JMwVOflIg9w2E+e2h9NrpjO6tDKZo/aRApvaktrucLGBzHFguRg5mtgAuDnIEmUinpKCsGBOGvCcsfirEtOZKlAs4TYWAJLxeDzRssa/E1giEEENBEj47z0VWIInn+xCiQObqs2M8LeewIV2AGyJ68bGfB8KsnHM6Wd8xWq9MYntwGNHPhpj6hC6rW5tAYTnqdqHvUQf5IMewcjJjBRJiCCtjgyR8R5KCAYSZlJXFE7w/ZyUP7oSpjRUkBjM3p41Vdg3QRqwBSK8B/so1wJuwBoiR71wDsglrQEh6Daj4wjWgG7EGOL0G5CvXgG3EGqAML3zlGnw8lzGPNYDpNUBfuQb2JqyBoOklMNSMfNoSiE1YAkQzMDBUdH3WGsSJfx9Yg7S3mur/TE5HFv3JadPNk6tm21ldSk1lcBCvv2qr5Su+HetQz2yKdcTZisBfpFatmqS4WrRlNH9+fQ+tdq/pNLJ3sXbgi5qe5Z9278FpqS/5GmP6lmrtUG9P1JKCWa2oXvqW/tivN3WlaLsV/u+NB/6O2M8zLXBclfN8aHmD+Uat8xqhHzt+vyhINoSCJHshFMSFC8DniCRT7hnnBon02SlAL+L/Y5mRmSjwzKMQlbivnUKcRuWn3KAc6xoDzcdgfq/q1W7l6B33+5GbNcZuo+p+3dFFCZr+jPPbQ81ToFPvyHDN5ftE/9EzkRP8jPLiZ0N87WuU4yq60TaF1d6hG9/BvyvDy16JF+qtmBeY09TkaLlhJ4moDC8sDsyAAubAmJUPfD0fGZn+n1FdF+cBx/etdquh6et4ngw1q/d1ALyvfZ0Lo2EFJfo1jIVjglhzDrOcTj3FYx9LeNoAnuLCZKSbenvwT87H/hT9clpvDcb5MMHHEoEM/LkJ+Un/NJ36jCW1Ium1vYVVq+9rkrBovzdtSd/cIkKC9TtT/hL+UP5SHvs5O6WzhME3yg37uR+YMmhkwZIiYuFq/20h/l2pghvPiYrtvH5jczhPJwy6L1Th4dcTvT6f3+w45PYav21Jiq6R384cr7zcZe0/bvvZ3NZ0vHZo5rXXm1Z8gZNbfCuvZX2+K4Q/lpNHl3shLDorRBlg6aTQdOZDs+77+oyLfNXm+EG3o92ZdYBFs3/1afXSgB2qrxf1qIVih/IIwb05/nVniJ5erkKr3fdq81tYP1XaUFYcBL7Ia5+AmUgSm2XzDU2us3dsvbOENcV0liSBgeu1s8aa01Gz/ZyUL+4QTEyes2ReMDx43tk/e7neI7DXFJgrLFPUdjIrHXuDVivK+dZk1Bv0nnT8MHqSsK6TvcH84V7tCfA5nTJfZNJVemWSnKwOO5kGIlhWEi444YMNMr/SxDAHduIOEptqY/wz8DtarXqdzVHjr3SuwK9nlK7nNFq0oXi5cUUOeIcQvlP1Lw5uaosJI6KeOwOvjfn80e0qU+VZc2wOxDMtc5dBTj7Uy+JbCsfsdA6Hqe/nlmwlzSXQUTb8Bsv56AYfw01khlQlQYIVPhZY/w5WQJn0Rvh5uTwbyQvPDV02lhtmt7iJzJCpaEiww8fi0t/SFABk2OHzMhw/Yv30ZLPdX9qe6aWVPfWgr/aQW0LFc2oQ6C03xdqLiLQoBQbPuV5RclRPN2CtNxcJX88N7eYZE1Enua8tV05gLrfGFN+XCxH3x48xiEA2mYkgUzLT53jfYhWxAd63x0Gzc9TaXfbBQbrDxZ+5Ew6xmRMu+v6hdPSVKTVQ1uSle6l72sv91gyeOcF9wHk+SGDp1uEAGDZjJu/DooDkI845skLxwbfSHS2crxh9hO6NeufwBa1je9KsdVybEppT9iKEqQ5mwrDp5obGUe/IXvz+OJYXTd56p2P0q3a8s3t8TkOYHzCaOd8dreGpTB9haEphGxIRaC65r1mx8V+w5vOCNRx/X7CGrFCA8x9h30lYOx1y/UrCrlBW9B9h36vcQdqj/oWUje2K/yj7GZSF+Bspu0KngJ9AWXuHYBNxPR/ZRuLGVUNG4jo08dn+0oWlMvTyoXs6YYLzLyQ7+o/s30R2Rr6R7Cv0g/gJZIfg59EdwpRDbrHf+grCb47v7UOEpztIZOn+/S42mnb4m1xsn0bczXGwfVCYbyZxM/1rViXue7p1fn8k0+kseaw2NZip63nmFYGRA24TcsQ3sHdzgpbL3kb6IW/jRvRtxulWOl/ctzkrh7fEY7mpShank0y+VMluiddyU5VspjHWdyrZj+mBjF6+aF+9oTLfaOCeUwJQX1frenqop9yg5J/+Uplu31ikSz+UAJ0DONNFusbQmAGbnxQaY1vijt1URQNT1RqfqWg2o4FwcxJ2G0mmY+vNnvspbdIVZUs28IVLkvOsAx5YtiSe5wnHJzDHNuk2SuWJGwdaI0Nk/UfNVH8DYWF3oJCVRBjaSoTNphpLSrG9PNXYZgRYAcGOh30OApojwiBMW8aM2VmIAcO0hzgdaBsg1pN+PUxB7PU8zp8KMRZBLBAABYkO53oqIfMxCYgfeE6eUwlZutgFQwPCiCG5bJvmiXTaYf+hJ9Mge72HzPZ22tf1ThqHzGU4gUPmMMu3A+C5lIHAy7HTPklFz8w4jM31hNMHbQ8OdYcVbQIP0kh83au3xUjkGokQiICyxIilgKhXROIAC9/DeSpdG6LsiCWDYUcMWhdvkUxUmOq1x5MUENcb5LU9QIz6oCloUOKI5PARpgdXc+I42KYB83MdgpNBIjVtMeJEg4R23iKpWO8EaXm43qivLYIhnMHQhjS5zfW4awkYeJ5re0zAHC3ExXCBhTSkeCdbEC2QAYRbNNSwPey0/tXNHAbhv412qqyMrTcb7KdsR8Rc5sEAJna8gfAsQW0EhYTElSI/sInY2/q84eUGsEHTgMN4+sE2oE1vSBQUkijj603++hEoE1FDkZKPmCdnm1611Z1P0fQdC/iMCl8KFBCY46aXpkVanF3/hh+YbBHEWuqfNMDWc9xtkVqNvMcQexi5aHmbQT1l3VHsOFQGvk9ljmo17m22MO0MZbwQGrRqHFDd1EY6xqo+jQLZ8t9T1rcUrjxbVJvv9rxafZhMyfm8uKa7uPsEu3yomPzlIvGXysq/YoQ1T408ZIYI2aLfYsLh/OP6fZh7nO02nF7z49Wnh7oRgu7WdLaUPJYrJmvzK+DZC6uZutACpR/qhP1yN7GX+o99BUppqsxxw1CaMyQvBjJ6+KJW2W/NUjJB87kLx/6sF0hP+oN4OMSfeMrE7O/oUytUbxpL0vX5DDmjeCRdrZj1PIVSVz1nPe2E5x9qLP1yqsFLyQlfAd/MOCITfE0F/RsOX7PqP7y+rvxJDyF5G7qfBbMEuD6ULbuZ4EpV+1NjSlM+HUm/H1t/MiNywGqHztv9ejDvarSetv8UJV5qJe8nAdLXfe4/UoEznnRsE27Y/DCDXzuf4TOZzDu+OVnAxl42f+JGNroKLtPF5uV0/JcS+NM0Xu5xY8rsy4PkqVxLigzNikyCKd9E4E/aF5YiItb7qVmKfD3H8LZ4VETUk6xEkYOB0jbaq0fmvmPqMcu1bYygQ4TPcvTqiVQ7LGzqhsWFQfN98kCrz4Jcu6MWK/TdJOTsLfQS2wxF2XeQBD50nGUPHUTUopJ62GOcOzLP7DuQAhSJu6YtA0rYBkDBHwmoBIrWyxLeFsFlMz0tUwHNQyLwveWglxtIK/CU4PIhdDnKM9C/GJAdSy5msIaAQXLRXGz2jDUUp8n/VmsoB5raqcxdYlJGphrG+SStjxW1xabWfwT8gPRPOWmooemibdihLLrFb7K5mhD1r3vYtljUR+nWUEKbw0TUj7jCwlw60AuYh50ck2kyzUKwMepnSHPNxz2TlRRsYyRF3h3Avr/ejKX0OjNMB/qsumZ7S4rWwY5p2M23k9bm4PtIuznOqrVIO++5tNEsCwFMqn0Osps+nE9o5iWtv1DgX9VyuScf6mF0r5/XFuZjvWuaE+t5kkFTtgZv3+rnZYM8Lxeef8l6PpSwrD5U376hCSJvhy6hsZ0Dz6U2IiMM46EGGyAM+6P2rTPZTYlEFItEBHfyiRouy9SwPcjLQQ9xsjqe8ixpmYGy73BCZKm4Je1KN1Kn4VS6K43dA19gq8RlAf8R9jP2F/H0oe8g7Ob4CLeybxIESWkMQew5+IS+SVnqruBAjMnhDXqNSaGnfUva2HrDqol8QhHxzTbOroDU2AJ27joyLHupVEIlsYr980maM11Dv2COhFOHGhz4n9DjamGiX52fVV4z3ZcM5Gh9Or16KFe265XlO2r3nr6+rVRqwGIpbDU7SbNbvO7QVAfqnXBZoET+w7eh+wmpNCY0g5IolXIy1lPVVAvJv5xoaJzWK3KpdMnKlc1xN273zKgFVReTCg0OKmHwMy+yrj6kPrLOx5pawUbU2TVF7i2bCJ9tBy6yjSxMejsXtZ31DIZKbOopjpk074xED9q9kdPzX5Po20s2QZPmFo9P8cbmN9fOcMvNIOc06WklCRxlrda0Kly1sWTKhfip2nlp1CZud6IEVkthyVJ3bCnJKKMyLUur7ZSaft07lp+aFoxjx+BgkVCZ+XwVNZ2LPZ9uPggNu7XFpL+PWvSrKONFtuVr2lhZ98OFuS5b/lxh7ruNtvf058Xu0WRmVfYmd1oa7NiExwfuowMoiudHByqyV1ePoxc6khth3+n148u01b3Fx0r1RuP5RpbeLSyDZ/LfztfUtJV4cUzrHt+HxdjUqDh9dU+t6KwIzC86fzydtiDH9f784ebv7hPv0o/1IobURqTnyZj+s2PqcR9kf5lPEJD+g3wVaUs4MsEoPtaTDadfH8rETZigNb9CRcvxJSCjdPFVOt1t9kDznz0jNHsmnGrAz1EqE3O2DJkzKXA4k6WvzTXNK7ecuhCbi/IX7yxdoRPnlq78Axgn9zyz5+yun5l1QblvzAh5qf6gFDbUfj4pr+d9Bn9doojAGuAl6XquR/0omZnNE0UAZZYf2EICDwLJnPwSRShO2u8YoKyyiFufJfJE7Dx0xXfDrzMIa+5A7fr7aQyiX4lBm6FZJzQbYhkkkpUwh5bwAy9wYYB4YOeIwZRNjA3z66BtyH/GaBsw6NdDr633Q0kAbmXnUWRH6BLUQTZZznqmNrKkIJLiQLrQ43l2fExKOGSqB4qbjP7YrlIvSzhFvZ4/6E/S+Fqv6egPwReetc9zsMR4uXzDFg6wKPaEr5RogPwcUy1pqnpjIakS+DI5Yn5SE8cX8dWXDfnQc5ppeK2X7Psj4KUMNN2pTG0pCeSRcvT4HF4AM0s4DlXgkoABmqP4ikdtvqIcuan3ey6jI74dXaN6o95OY2u9LrU/AltKdM1aLnogEC5bNryAbVucK8FFAwowybMpd0p0Lbbuy6ILf12SOASbk0y8UeXR+U0R5TC93zOVgBkjdTnEDRbFZavE+2P+XSK4mQ3z2HytICTy5fZZnannOBImuN3T/aepy4QLpUCOO+f2lRMY9vZKpSisawj5Lj5bwh0ysksOSEtnK/CY9ktII6ai1vUdC1mcwRU8zi9Ljxed/K12S8Z4WgJaRsqsqUfSMqgZeo7cmSHL6dT/bTotBb1mRIs0vQHgYp99EU3j9Mk4Dyx2DL2Rs/4JwasozGTI8tbhQmv+6DrNuxfdxdsj1tbuoGVMH8+Gw3LIb3//U4b1Zqchd7RPPwi0MhlGb5yRDBUB/u9Tr51Z4aTYfE6039H23RcvTDIE+uE5fb34yD97/1Pf3mt3JvMLA/Vb29Tc7NUA68du6zwGoeIpcNTydt64WnqFX7PsU2RLmOXwdbd9joH8hgz6LwvT18L4KKe0FyFSYRjO8A5jjBMbEGbzbCtpY1QfflJh/aKfa0L/rSnhjCU8NXXzGh9yKPUFA+n0B1HeR6hd6MBp+fpv3TS1r25v9lE7+BPlhSTlQVZWmnjfJD9fHGw5O9avRRftyJY1D3lGnNBTe0yrX2/O33ZkL2j3lGbVn8/vO96Ggg9NYt685JW8UJ+esWoYV0KNYyzz2DrAFRLBl/IG5qba0tInovfmyPvLkXpDtH/V9d2QeDpPpYXwOJK8djg9E4POnOqFePqfN8PRBrKvliH+Tkv+JWP6sy18Z6pE485M3vy7JG2+xoynqSFYLObr5a2ZyY5f32NtIChdiaC/wAUwawbvCeqiRCwM2L7FKHSAkEJ6nvNjXQDpoX9EZBUGzmfChgFnK6SAb64LoB0EdU/uaDiFs7+zdEaAo4L9RbRMe3Nw1sQ1BlHXb1FmIOUKbuHcdAAF+r+vI7Gi7VATefbvv4HTa2Zvan+XKIP8i0gNU5sbjrMR8zg3L6EecmHbFZpO/A71EA3HhD7xeHIEF+W2BYAnpM+Ap4j1Y9VDpvUVB1k7xOhPzKF1GYT/hSJmQCMR0BCnjm49uBSK8BG0MGCOaxMScPfnhiJgarScwNkYumnSVy4CDa0Qivhv37p0hdR2E6dHtYHULmT1fWu6fhbQHcohgRBiPY41JVVy3MQuIJt1x5mKaYAeQw4ck9ds2TMFlBCIvGNZB1rkwZu0B/pcrbDvzG5HL0Y0i2P2Xn3e0zichH3ZnImR1ZvtJBrdW/o0g06n3dMSJnvX2qdl+bI/D2ibXX6JM84ccpkTTWWvrY4ryRbUHwY9Z9XTBe1e5NYLIydlp9N4zjGZ+/7iVXhpCTLPWzc+6ZKX8SWyyOFszEozGjIVnymiy5wimsnddlQjB3TtMZgVNIJ+LYoLQCuUagX0p//MS5ZnI1b6em2G9XCghOg0er7/rUg888MEOu3gxeeYralGaSBH0d21B+FsMZuL+VkdGUFsfr97MQpbUvrzS87uZX4Nnfqv2BDsVW7+zB3H+gKhwntDRoTvrV46bnrQ6P7SUxpSjy21YV5/lbUSD/G8BGeF+K4vd88W0Gppsvj18En9c6TOVdaf9BdsOKrVo2eLztkbtF7G4EoP2AjbSwutCfvW4zZd6fuRT9rISXsmaMixMi3C+qs/WmWtE276t/n4n4PKqca40t+F/62zTi8H4F7/6c7OzpyIC+E8I9GMjUc12ZOvSM7Piwe8PwYG7XysqoxDmWetd+MEGJhjD7vZib5qlqXSvg/tdCN8iNZL1d+eWhEcZfMjaPu2n9g56B0rdplDlYSEMM+URUqTVe+QQsPYXhsZzNGf1Cz/rZmqoZ/G4Hrp/NuCQRENEChJCihxXJ3x79F4Ujm1Le4z6EjmOV6eFSUK0akAGIHZDaUwJGW/IxFqYzFYD71//ZrXySDx9cx/jafO6kSIbP2Wuo25jvwz71/xcmYGXwzyXlDHEKeGZPG1hLv6k2ferkygoN6TtXbjVQLFNFFM+SIx1kuV3xaxYDNC5k4tJSES+bUQuxaRTAKX+FRS92WxEPsDVhAMMdLwDgPLf7K9PZSxsGNQThjskMQvUR44NDgiTJGUPBrn7h/uLTcBS6btbKIV/BxNmd8Y/BgEFgPndgBEwubx3wlRZGVtFVNa2DvKKzZDJ4RqTxh9lhRC9i8VQtp/rIQQ8pBHQMI+ptRC3CY2cB0eyBxtE5SuRuSmAo/Pq+n5Dhx6NafXD5dx6HS8QSdjI8/KxbetboxEJa8Ie4QgL5lGAi3KXUptKRzkghxrEhlL21hM2DuGjZhxDF4ulYlZ3RYHddYJ6G2PrIl6h9iKNLaAyWwiaFtOAKDrM+65DsoPBul+z1AY5onFjZkTMwxyaP23kGG/lNwoIjdlyGUokR0QUAtDjwccSKDYP0+uJ2lyZzmeG3KAWB5hVPyOtMGfIsGjlivId6nkCQmOubRc5Evu+K5r81xbrqTcaCLuGrzsvDUI73dMVzDQcrV8rq1kXYGjHhUu8YB67S13eeIUWi5jgeMTzgDJscvToidFXOgN4q4Cy5LaNAY7j2JM/I6Urm0ht+Lu2SYgYCBgiZYkvuJux3FtrLS2sAnOj9w2T247oeAG/6TB/cXyKNSIs2Z+J7lpZI5jCF1BEuQmnFoMCCaB43KAcmwRkilGM5F7ke+WiInkkUhHVkhw2mJ6R3Y3RpIGUCbm+DrIgsKTHAUCIzdHaZ5OiBKGljCmltvvCLsaqL3CBJ3tpTaZRTx9ST03YaoRGFiOEqCuY6vPqMhRd4vUVltQ01bb5EfMRXsT9JsJTqM4AhbUkXJ5Wy1c4Fo2DyRyPGpD6OXYlSc2xZcIvpKxZufC37/ai0KjXGjsCEr8ZNK9ss0D15NK3gfCJ0F+5OYMpp1pgmb52zaEk3keG2vyq/0olM0ILgPqJPwoWr5TgiVwORZKteco0EGqB9Nzw6WEj97gScHrh6c/KRY9qj/Vnw9EffiP2tezkz5DawubACrIRK43THVSCUiqBGIBiSkNJGEuydHCVwDBKSGhtvAGI99YLbNFiU/NSdhtpFG2le0AaeQ2wF7g+La9LJgcjC2AlQoCLoaek6PhwVNjxJWtb3DwmpyCnxTRIb/YT6gQYM8Q4BGPJeSMsvUt6TOfBgC7MCB5NoREWTGTzeplJuMzj5gO+dWeQhrFAbArXOkl0mmp51uQB47APuZOnum0gqYJDpVeMWwvOcqSnOURCyAreAsN42ASRXGpYrlsfZzO8yk5zXpDP+ShbAylJncyIQjGxXGJfiD6zx9DVeI6FXeJQTPz+r7lQTP6V8lBM88zX+ifpZkvr9E8Md0l5qPl8S6xabAhZX22TdK4Sw1lWbWuT+D0mcDnjHfJ3nJyWsufNWevZJmBruBKzdaKrscL76wVjdhjadIS/bM8Z2kJs3AZsTucphkg5uD0qKWVkB2TehnYC7RvCLJ52h2/sGPWRbadzjMH4pOQzT8d2Su4jX/CGGd9Cxs3wzmdiMMMMzg/az43jCPJW5iYwSLnL6EQJJNshACuxVwPCEmlw0iO3n4sSMYCj1sTLLt/TbuwPFrt0He4f38ENQXWNTolDlypI+7LeReKsBbygPQI8zxu55ghZ8MUKW1DgpzBZRPz1MdI+asduyzy0hHEucDJOVYIWb4LJIHUFz7Ic/dsZ3nX4KT7tN0zfUeK3BYRPNo9E9sJApJwmFHpWdAVgEhMEGE5evKJEFmCZznclEcnchHWq7W421aCzzJtAg5dO+EuQQRYASGYcOAIx8lROy/07jO5DZE6Q+CG8zzI/avdoSxKtCFKTWPKkgnu0ApsxJSsFV6A/Bwd4nbKIW4LliE3NI7rziOxiv5qbyjDMwUecA8uy3MRBNByOAugJwMfI5ijN5SDFegdT51M0DsX/v7ViZOMzTZbLgfcSYc7GBXEd0VguzBH85zxdFQVLlJnE/OQDL3hckmEZ786dZLROcVB4CWS6QSl6i9AMEVE2izHzGhO0wrcMKrBNGo2l8xotoIT7GdurkFUj6TUHrIDR0trD8aba9e3fKBe+tIXyM5RO0OYdvwLYWBeaDDHci2wXnOsBjCVz8+6z8XnGsSH/aWS+oGhd1W2zP6TKmyjrh3hIg8DP7chtGa37lu6f3Hdk+ni79mIqBezMzaiF8Ai9PdaL4C1RY1IVUdCkhU1wmA7olwKsQ3S510DNL4luEQQTISXdqCAq8aRol9VZK+uFk0HCFYPm8b9yxPBpXjE2YYEl9KlPSLtKlw5agqSmhCCWDV+Qv9T9otdkd9S8MdSBX8Q2KasDmrwPZNcDNvtrdb9hqAQ5yxNTWGgJjF1HFs/Y9hAzXf4GX8ENb8lKKTuIJNzhU30NOVc5dKSnK3gSNwQSyCTZpJfLolZ3W9YlhRJa+m0dbhyLglN+bYWMwI/Q9+v4LncEIDxL0cY2SyEobSl8G6EwfSZ0j6yPBG2Xtro70IY2yiE8XTfGJD2lq+KMM5TW2pgr5YP9x6EcZNvNuHr8RbIeHbnYMl84Mush+e5f7jvyGZiasHmN0s05UYXmf5P30+778wtN/HqXnsd116q6JmKbB7FYsj7B50nK40a/5S50fGBkXT/bbSXZ826BmffKvNnP+mOl6ZaAHWzf1Ljc1+9xbeb7n+e3zJbQLZY6oSTki/DMeukHOlpxleKQfT7Uc/pJBkylQ8aBAHyPJP+8JmrC05zmsWbsgwX2ZyJbrMGFnlHKGMl/yJfw8njDXqNSaHneE8yYoDXFzjy7yz09srzpxZuIEP6balUQiWRknLkjyG/eDkbWCmqKBs4B/rZKV2GDM3UMDfs6HOUcMrkaPmzMTNLI2deY8eefKir9Z/spAbBl9IM9Uv7TgvNlSXPlTYhMkpKkvOYpoSuxW0hfEiQB524ODgNvxyQxZOqE5sGJMRnTYzlSJfN5F0ovKYmMs6fz+jOBXB3HsN3KFCDqvxYI+xlVfls881a1L5o862pzD92h7MJYK3Jsy7X+F/v1pLMzj+iPbPiPPpjEOdU/7eKyM6fiQjPKlcU+2YSaSG5DBAwKNcVPCo/tH7ma+wlmMr8oLFnPP8amZckoDcItTZbUrbhGsbvK1049qITW/up0z6DR/xSbRyNWJfC9aHtkiVtLCT2LK4xgFzX8xcj1j9BGyOQ9H6soY7X7961kiixV0g6+0+UvN6NJakcSDZL9NPK7eKSov+o916GTG2cv5Z66D/q5VrqSg2jnD9ZjV9VD9QB1/EfdLIbCHqRTl+MPM3VmxUZ6CWno7V8fMWEard/6exBMUseFgK7IArcu0HcVpm6FpbSgZC7rk9f2mgn/d04J6ccS8kW26DsTZPG07Onc9P1K/jk/pM3rzXKj6Pk37dtyFmkvDk6Jy1h1mvitzUShrGoAxtxQeDDREGKhNBiRLoexZwxL8cRGwylZurS55Li5WCtKdFr/bS9tZx3Xxmkacqm53ha+KSg+Eu9yoyDGRShw4JEzqGLgYUFhEQATpVxlGPOYbr3DAVZS8uUP/qOnvJrAFEt2rKxpQCBYbTc2qk5aQ96GhJer97pf6YpJscdpaBkBF+lJBvplpT26x7RLQYqioDqI+k7iSJ8bFOLOT63XUl87nmvW2XqLcrYaHl5ZHCcMh8baTjbXtsUeHtHu7vVbLRfXebLZ2W+PglSXS6V5WVR3wUcS46kyLHj9qLbxnO/7awLgBqKAN9Rxf9FCrUmnYa6c/WVdr8eKKpqPOjllk2nnpFO67mDl/YHZsjkAcYV0sXzhd2s9jRQbO0nNKovmcWUhFJa1XGcRb3icwoCSO+THhrqKuk908tZherG6l58nnUyDJe2P/N0RkNew95eSf0xSNaXsrc+JXjGkvUKyNAUhRrKMukW2bA9x3Xr/WY3xXsCrMV7WyTnZ2UzgbKNEsXeAmGh3jrARtRxBMqxCCozKAcLQ4qUqTsPzSVF6rtlf2cQ1tpD/Z0kBF9PdtteCIJI5gMOsPSTcxaFbyHGAwaxhDTPSbmMJndRCBgcgqZpzOiTgn/xONZ1jM2tNABIBAbEqeMGie5RPoIWBsxxbUIC7sbenZWzDhcaeEXtjIzu1RywB1EyVGIbIs8mX/Q7Ssy+Q+W22r5cVrhBr92qD9LxkVkv3pWF3VdXLWrAWtFH4YsQhjp3BWhXR0nRkAGOgEU9WyIObYvxuNNGNOiZU8EZl8JTJliOviCcGVHEeNYPHgc3E2BCP0iTvoqvsF+X6sKyoQRBK42yzR50sz7KqKAUW8KFlApEFMpcP/Y4KtXp6v1TwIDH85wvzUnKXOMQmmbKmwZZviPcsiEwy+4cOu2R7PmtjCT78c7vVWD4rKYxsLmwMbCIzZTNSmzLwUDMUcgCz7IhEdzBxMYkxx5wNk339IS6x5wBh6beLT9X3GVx6NWUoJukUfjjPdtro1ApViQsHAAQ+DbTKHTioZ82V6ZiIAnEXHo4R1kIYbqYlhjqQ2AcLvyxgcAX965BVBWbwp79y7CHMKVcCT4Kke87jFgewHMJaEPILdfD0AFeQIHMEXs41QUTQ0NXtXgq7I/1mrwh/FzptMK+08hg8MenUq+LQQIpo8Ry7EA4PvYsDxE618IABZYExPUDwBiUOe44FuNO39DBxKCDyfr71w0UgE5DLZaTRB8CP957vCb6CMUYEmIJ6QVIqA1uABadJZEtFRgVNl0g1Pdz7BJKUFICIkOTUAgMAQyUSwVdxmuH4ga0PzxE/A7iE4gsjwqJPBQRH82J70hs2dLFujuwDVCOzbZskm7OZDNTcyZT2CCHpv4IrOCh/dbOJh+c87XUGUUP6Ui0eQSA/flIB0fTeLAF82xIOxSbwx21y+TKhJr9nUCbHsi4+ET/nTz96jPx6M7yNdJdpJgw3UPuU8VSDp24tdA7p4ptRoJso+72ZDhyUj5ABH58Jv7a4lmnHCrLEPtC6WDPCqCNYl80QJbv+NSzbZf4jp2jeGZ2xhdtyrIHhqBaesLkz7QM6z0vDb2fH+TAMNpc6ZRrzLignCPLVoYdk4G09JhrDSzbsn3mW1xAKTW0IM9R77N0toBpdJPJ9fyp+a6fKdsMLj/ZqaWx9ePT/9cUa1wABgCxuAIfUgaoheHM7WxbwqfUAshHrg+Y7wQ5zl5HHKTFGmVZAJryUsUnO/ySPSdmluBzVwn4ao+OFfrhU1OPila/47/RzeOret9nuWR2c0kueT04k23ilrTA50a8IcVv9borU5uNZNZiDjAVNs1ERwzDRk0j7t7RiXhj5WRL/TP+V70N0zj4be5BJS254mFLUImUTgZaWs51tRA8sIjPpSccLgKep3sQp6MjhgxT46Am8sna+qtaBmqieTWpYySb2jRwPiPkT9xsqCeb7f5SB8F2r7/mjX4liy8tcILD4esu2E1sFYjSk0i+uFXg93OLxtpP5Zbrvcrmc8vSAie55ec11qQMfCu3ZD3Ccd7m7/L+a8uCC2F5gU0cjpi2LGRsWTBiAcolUKKN01xHfoL01CZDgxHbYN6SHCY4o3hW2cb6/j86mCEOHoAdJJYDCHAHoPh9brObFoyzKZ5/KHaIwIJAxPTIj1QV4rsb73Ob7OiqVnViyBnhqfOqHdoOB9S2599J7cPya5mOoGmuSB7j8TIK3Y36A6ztIdjKLTlN05sC03CgfJqlZ4ken2RjZVZ+8UqUEXorySZjVDLmlU2RTfnNlcso0BTK8ooupv31AP+46GLWkO73HBnU02Y0et2M3krjz0bEtkiAbV8XtmAIwTwEJANmUezoAi0PMS/HglEI07b/YjOw7IM3RBYXLSG3wbtZ96SnvprG4G+Lb2sMckIsV0DGXF9qDML5BoQDZvkO8rgHPUrdPLNvaSa+TUkWhNSwBaHrb0E2FoNhRw8XdtvtDAzXi3VvBQwFIcISnue4DotgiOcwtF1ucce3ESCu6wU5tn8iGUkYN55d9oLECW/5p98a7Mx3DKH88dTXc2EV9ZHFHNt3lJZRUofw2Aui9CJhtqRC2pKDHDvk2DQZX0EgnkX5Vg0AW18GbYbAUeZzqOgcSqfnpXMg0C8reopQR4Fupso8m85QZ89RRymzgCddZYbZDAV5DkYV6ZZzPCtzFnIg/55zBpmzHa253kN9TC2CAHOYHVF/ngFjA9e1uMC+juna2M/R8woBxGnLJ25fu0R/YejLZa+vcr5I6Lh6wlUrPZBhHlH4bQKFESvgPgzmAiVOqsLMsZgPgkBgJHyZI6REqqIcM4MNwwxajPyk3dwzxkqdnr7PmszkpeBf5kCYAY4jS+2niB/MZJgzBxxE0CLCxwF0OAk8mSPgUkNmCM82FjT6D9Leto0G3Ms1a6MwTPctmJfE/zLoMQQsxAABZAY9d64+HQ9bHnSZg9R3YJ5NLSlMh60NUWtTEOBHNd17EXq9th4l6dWctL8A/zK31Qx+nFjS44EnQAJ+xPOtICA8kNRmgU3yLJlESfiR1ZKX39FSdWN9Vo7vdPrS+7fnKPSlUPjLvFYRCoX2U3FX+yY0CunceWrbyNbFQYHgLkaOyLV2MwlC+pyunEjeMWjgd3Q921gguu16K10jhH9ZHccMgVT3zFDScLaLjREoJA8sHwWAIOoBT+QoB3m2Ok0YNh3AVD38kxTxGwj0tTruZaTgj29h9a5mkwLr/X7JJR5Qr3XnUY/Ny9i5gqTLWOD4Gq8kx1imyJSzsawzhZnswfWL2DcWhjXf78tmJw3D3+fOhQAQbGmHpgtmypjMRaEn1Y7E4RKqDQP3ghy7aKC0SUgNdbqmGbpkiwRho96Xip7pUTZzc+O3YZAr6AXQBZ69jEFb3aTlU+Bi6QnukxxDCgSlLUJoSuY2hBRit+A2gHAk3X8b7YckBCH68Y6Z97Z+jkaOIOZ46oPEYC+bWtBDgStc38Esz45Cz3O8FhtkKAx7E9ugkXkuvcDfqsLJZNB+LGXXb4V+Pey/mI5rytz9Ugs1vr+EWCavB2a2JmkYglS+J8Q2MsHRtFP+5DSjD8IRGgvKFawGY71qurmuPs2g02lMNhad0e3+G93sv/GtJoH6uvDeGqAKjjKC07ChjtNTEhr8kz06H8WpbSwD9MO+s7lSM769JBZfd25uDRYxSo41goxkt9Q/UGKaFXhTPjheo765UIzvL4nF192cW4NFlO5SSLHJ1f0daPxSEOiq6TQEfryX8R1ba4hsC1HXx3NPN4sT1TmzbJcwDHQjVZhnrgPKdizi2UmaxLC3/lEdet+qfO+1x5Ow20iD8Df6GJUQslwMgHYyLoHQltKzOPYC3w2Q79Ecwy00NcsVYkPKKGSGnMF0z8sfjUG1Q5AZMfgbPYy6A6UACM3TVukiVR5Qizrch9KDAQryzLvJdsQihkiLcWoN2aLkh57s19SlfDed/0V/X66rLuXnFoEe9PkMh3HyfED0HCWK9Uxj2/d5joHn1MxViA2li/NBVSkUblHAT0HES+9P+Y/PfX2nn1t3fS7ZUHgEucujtW3PDyxPCASlzyFyc0Qh5ikUEpHdlMSNL37s1Jq3ahedpuukpeCPB+F7pCATlocZw7OMfxoXkSDqW8h2GHYdTDyRY8a/nfbQ4GxzSmPvptc6JmysiyZsOr2+v7EemtntZSOP82z37XfSKOhntsgiC8htd9KEnfpDu99MZyTS35eZrUSiLSzEbTuYZWbTeU2KLXxgAQKYJ6XwZZBrIlhaJkLTPJPXestsqFA0R1AGLVeTf2Ol4uL+kszwS2IoENBU4JnALQk8m3W00n71DUZjfHtJ19HrknlrwChYZq41AVkX9rbr58EofEjH0ehv9F9jZFtqlwyRM/PazMumBLFtS5cqeRK7ngNBjvtlO40/appzyD9t1twXK+cRDGUr1G2ON1QePt9gkh1eHz27PRIxpZwZM8Fx2wXiQ/tfOe4MnTQK2O9z4nCouwxRR1BPaqHouotOICKwOLUJ4h6zPSfHHQsVmVRZYxkfNLVh/pwJnMUyqRfPx7vuU+H6nt9Uny7OrqxFjviPaw8TsQXenb1VHKAoh/bq1UL5cgRODh7au+rP+dVNrXjzoF5Viuqv/fHe7r0+7kxg8Vi/2LsrHN3enalX4ZX667T4cHiF77itf1soNooX1UuyK7EyLgvwzrsquId2ta/IUzpVR0TRKxR3j/gufqg9HHV3/ZG7eznYk3t/r45HZXLcP3osFMPCfuFi9/7iQoJOsBvePfU67qM4fbxS0qFUKd/hA2I753eKTRXsCp3qSYeeTvFuuDcI+P5w9zw8Orvd9Y4unLMj+2rXu7+4PzsiF7v2/eV9uEsuHkb3l+ShQEYP43slXgvt0eP4qDCuFR5GjzX1wLVSfbRf29sv1A7ru4ePe/ulh8O94sHjXkU96kHxYH+vXNytHBRvT3fLxaOzg6I82pVnuV4T16EoTDvjg/EBPTlFBVIqFQ87p3eP94+eDEoV9xh3vHPO/97au57n1e1W0NmX02n36FDWhvYVaxX7lfB6cmbX7o6Gx4fsAt/4l2CPe9UhOjjdP7Iv1FLq/48LTTIdXin0Fur1W+9y7+ZAvbQHRwE6PN1vH5yfXp5eUFmqsl2g1MTYPcfsYDi6b/FpwQ0PafX+5h5yG8IibFxrMdLYOy75t+R87DtNP2CX962z7tnFcbd1OLzoajEzaQ80JwYNtt/unLROdh2hrkiPLg9ZsTsq3dC7SXu8W2XeOe7s3gfqM3zQrw6bZ64Pzhqn/UJ3uMfEgz8uj552L4/V58w79cPzxt3DYfSu4DxO7k9HoxvnXpwMC8Oq3H8MLvRF4U3jHp1PeuPDxvXj6A5j0vBOb9VvgnL37JrfXKID/6ZUctv0JNjdr8rClXc09XePr3noDdqn5WIlYKe3g0fQ8R7GZ3eXtZpoDid706cBOVJnaU8hLVba49O7EW459tnpBRyBx8GIPI7379q359q8mVypv9vq/8ej0X7lftTsn0HmonEFjEJemdo8rA4bnFz69cPi/l1hUDnujILW9OyuxKtOkeARb43L52zK7+Rj8TF4Gh9Vm/hvs1P7yyqP6rSXt43xYb3Weaz7DcWWj9OpX7u7aB3QI0p6zG12a0qiFs6un2bMU725b7jtQrdSb3a8i73q4OG2bXcbe+fXY6lsn1L9tjjYh+zKDvZl5b44lU9lWXh0uxV+7MtyoFbNvyt3dgu7k8qAjm6OJNs9uaWHl3/PHfB3OqjvtlqXWtC6j0TWii3/4PySV0ndhntXh2Xa7kwJLEZ9HMb3e4dFWS1XLk9PydWepklR/RWtQK+Ex5eFY48Wu9Oi6ITnykgslR/6F53J5XWvwsrHYTUclAi7HOwWy8GhdzaunQbMbhYKrcvHG4nLx63R5Fqdr+TsOkqkFvynk6sB2+P4uiIPx6d2ZUz68Ar2bk4arpKJpWq5XDitKD1TuH869YpTgP+Gp/T+asqGxw8XgvS7j+fkktS4snULBe/s4XIfP6Lu2Z1i31bj4Bxc+3uV3UnXc+3biu/dQvJ0OAor9+rbvQpg9lVR7hXOyuf6MTv38p4OL3ebh/qpNUi86Z22Xpy9ysGovB+U9eOWptfjSn23enAucXE6wtVurbBbUdf0lFItnARVMDkqgHs4bfLxkfMXy4dr5wbjw85h16/c9cbXZ+JafbE2BNfdcHwjxkgL08fp/dVhsY1O9aUlQ7DhPRTAkV2sO54S2YXpXyWI7PrRzcUjoV1cIIVq+5Jclq4frx+dg8YU9GTXPxwPzoPbbumu/FRsHNZa1UHh5mC6d9UeFyqF0qmEtWtWVI9ewpPb6aRWfLJPoHMrT0andqlTJRfdx4MCuTskdHRFerI9Pj/fO/fp8Pp+/1RJjuuL6Z1kvnN6p8306+JJ/6LuyEOnMe5XgyY+Oru8buucl9JtMLk5G+lvwU7/4ObosCl2L6jnHP5VD3JQ6SBwePm4z3aLRfvmRkw0v+5LVuj0rgG/A57milG3XPZ2C+KgvidPxlpnHXUuZWRCFU4amNX3i4C7N85wXCqro7WL3cmBDI+qt/cjhZICdIsRblv3vSMtWp3J4OGKFK5643G5QYk/aQ12kX97al+TQ0fJOPdMfeugFD6K9un1lX97BS/ru+jeJsEE3j14zVEBXtTv0f3tdVUr08fBVfO8+KRgNS5el7vjnu8EF43RceOyfkHGdwR4/uhM2TRUH8CXznDSCr3ol8X+4Vgt7qE6//1ofO7dDYa2uj/kKGIdHD8eP/KSK+sj9dWnfrHS6o07o0tyULqsP0z+3j7qBYDs/sI9rd2eEwGuqqLLbo6QPyjtnQ9HjJxoecbGl6XgfNpFB5PyY/GhcuDdkE/Rjbnr4+PR9dPtuSjcBTJSJp5SU0rya6U0nZ7RSmUSCSK9DPbIfjx5OqiLCj0e8dLfYqVzrQ0dGZxKTO+0ZHk48Tx0QK9x9TisKd4snQ0ORdW7UdZtZ19DCp6PNTOE/Qv97rohXM6x2jPealV33Zpq+VxlPZ3OUJhyerdvDy6Gtfbwmh/33XpbixCN3epu76j6VL05Cvxqaz/AV3/vW6ey05oKKLVEnVyjqqieXu67x3+puNViZTKAvL+nm4QVPP140fOq/2nlDzViw/N+eailzVn/dujjmrauJkf100rb7d7gLr6l1VCz2nm/ctQ6GV444Go4uK2MHvktpNWp2z07JIzq35PrU3ooixUoGlrXQbUcYugcDJ+Ohk2uVSCcdCfwlnSDp6EOC5Y6waA7Lt0NryWowkbzTiDn6O/NEBGf9nsXQe+qOxhfawWKu4NeSzPDDUQHhRGskEn3xrl+OsBVuzrUNJr025cwmF6f3J+rU1679f69UjgK3aX96dHVCeM4kJp9K5fyAveql7VAFwZedx8n4PboBl9daIvgrlwYNxA77El2bYvjCq/uh60q7Wi9eXLtIXZ8gM/GF115Jqb0pq3lJ5tqoBzsnx6hI1kuDJ6elAxRJlGhP3pU8rxw+bfsjTk833cHD6JH5fi8XrmWJwfY6Vdunfuwuve3f/1E/eP9Sl8ee2zsn5Qr+ofnDO6djfcrgWZmpA2Zv2cV8nfqn+KuV9rvlHtaQE30X+zM89v3zdKDHD519u9OHq87t7f33erxtQBOdXJ4uleDjuO79RYa9mvw5AhWtVG2f3MylQ3ISrz8eHHPawPZnV45YZUqst+Wwr1T0VG6/bx+dWKfQjrhNVh3z4fF/WGdPj1MmCzUO8gfVv/u++Vy6+SoUZWVE+7sFYL+U8WvdkqNq4PbOxEpKuiKYjh2KgcPA32/jwe1C9bvNod7drXu+hf+3eW9f3sjYXe4Xynaj5UjZwx3vaK6v1rr6qRfuC7ASghOu/2D3j0ZlTWM64+ef1u7RUpY1YeX+EyCcgiaf2ELnV+eXaIhdPcOj51OeFFzzwZPTdEBlV7jL78aP3SFPw29/dMSkqcj1KU+2NecUiAP+3bvENQG0y44CS+65+deudd4uD+B4WPdGe5Nodd97KBB6bhXvW3faGXtaVDjZqRrmtUubNpl93Rc2hPDg3IgQ4pGFDqtvVN4d1R2g6rTUyS/uR0MxnjQOizXtDYmbf9wvz/jyWMXjp9w96Gidomo0D0Dwd31kISdyfS0L58g5IfX+1Myxpc1eDf0taVmT8nTWNtsBds/PD5vTbo6N7SkydvssruwSVgHn1a74dWEiSHFe5oj9jzg26fH4qmGNe9MemI/OL9W0gy1n9i0dX14PZheDLpi6uIyKTdO8F6tMe0Wyz3nZKD1Krv6e6JBeYrqh7f7l+HB7t9xL8Tt6eVA1JrjQRPctDrtgye11fxbvau6juKVCjm/8U5bZ7fl3mXXvfD6rb7PZk/90EX3bIiephWPMX+/6TnO2fTauZ4cDRRxvCcMBrLoe8NSax/S5plzPKgJZ6B48+/Iv20Xw8Hwuh2M9KahVMWDznW53WpeXlSmd21/l9Z2K8P+3lmldVc+D1pHWvjhy2FnXChAX1OvNzhooP7jsM26xwPYPjw4pQfysn8zOH28u967fxSPe6w17d40a3u1fnl0yG6CbuiDU3x3dfCgqQRKnttFx71O4/YIXnXKe+PKoHLBWxN2GzyUqlV/XIPlG2XF4p5ri95p+Yjfe7vBnQPLjP4NHgsjbXHeugf7StQe9asNoYe/FUJy3Zs0mg4M3E5FENxxZQ9Ou3WkbZ1rZ6j31SMUlqrOEQoRdsvOo0bkyZ1alL8dBa/94Vm1wg5OvMPb0djpXkrQvTm6Dp+GhDwe/+WywNx7CIannStZPS8+hq1HaD+J9vno6d4fVGsDoi5ww8/gYXMaDMioA8rDE0HuKv3yFTz3j8dNqQyZQvlGsezhTfks8OSUnJcnuFxrkCIrsLvyUUXzhdMGj61O70mcXE/9p3rz7LA7PXSP7HG7yh/1+p3c3wwPTu7DMvZG43Yd3hWnJyfn4MY7uBwETyc38Kn9SMvtEB+hKi81KjXcPGfdB3zBIuaT3adRoXz5OA6loAedvaESDM2mrB8MhlqpUk0jl7r1xtO4dnxY5PSQPdz2J0RnbRUK7dug5u/5XrF1Xa5dhceYX3V8FuLj24tj76S0f4Lk4V/YLZ0eXo7oLTu/PNCcxcp00nJrRZcCJaK6w861N534NarA8ngRVC8PzrWY69pPt4fw9kr2L08Pe02sjKZjst85dknv/vyekvvBMZqcgwmW9Xu7pb5+3ejVT6o2x8XAa8ECPuOaW0/BPaj3cLn/+NBpi5Y7vLtoV29vcO1ijC73W/x8twsei05v6pc6RzaaBi6ZTImsCsUuxT1RkWSqrYzScaVxAjRstLjSdn65HE41lYfa/Sca15Nu/9h/PKpAuzP15O50t+vb3u3DUXEzLLb3XPO0cHZZIZXb+4fQ+XuK3e6RdmbtXt1Uy5cndO/+6Eh722JHZsZrafBtvpyNlmo/m00LXyTfJiI778vI7elhCc+fHfScTu2s7WsXevH/AQ==
\ No newline at end of file
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/Add-more-charts-to-netdata.md b/docs/Add-more-charts-to-netdata.md
index 6090644e3..35a89fba0 100644
--- a/docs/Add-more-charts-to-netdata.md
+++ b/docs/Add-more-charts-to-netdata.md
@@ -5,9 +5,9 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/Add-more-ch
# Add more charts to Netdata
-This file has been deprecated. Please see our [collectors docs](/collectors/README.md) for more information.
+This file has been deprecated. Please see our [collectors docs](https://github.com/netdata/netdata/blob/master/collectors/README.md) for more information.
## Available data collection modules
-See the [list of supported collectors](/collectors/COLLECTORS.md) to see all the sources Netdata can collect metrics
+See the [list of supported collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) to see all the sources Netdata can collect metrics
from.
diff --git a/docs/Running-behind-apache.md b/docs/Running-behind-apache.md
index 989c51fc7..d152306ff 100644
--- a/docs/Running-behind-apache.md
+++ b/docs/Running-behind-apache.md
@@ -1,6 +1,10 @@
# Netdata via apache's mod_proxy
@@ -35,7 +39,6 @@ Also, enable the rewrite module:
sudo a2enmod rewrite
```
----
## Netdata on an existing virtual host
@@ -314,7 +317,7 @@ or
bind to = ::1
```
----
+
You can also use a unix domain socket. This will also provide a faster route between apache and Netdata:
@@ -338,7 +341,7 @@ At the apache side, prepend the 2nd argument to `ProxyPass` with `unix:/tmp/netd
ProxyPass "/netdata/" "unix:/tmp/netdata.sock|http://localhost:19999/" connectiontimeout=5 timeout=30 keepalive=on
```
----
+
If your apache server is not on localhost, you can set:
@@ -350,7 +353,7 @@ If your apache server is not on localhost, you can set:
*note: Netdata v1.9+ support `allow connections from`*
-`allow connections from` accepts [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to match against the connection IP address.
+`allow connections from` accepts [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to match against the connection IP address.
## prevent the double access.log
diff --git a/docs/Running-behind-caddy.md b/docs/Running-behind-caddy.md
index 0282d0750..d7d61375b 100644
--- a/docs/Running-behind-caddy.md
+++ b/docs/Running-behind-caddy.md
@@ -1,6 +1,10 @@
# Netdata via Caddy
diff --git a/docs/Running-behind-h2o.md b/docs/Running-behind-h2o.md
index c49e4e16f..8a1e22b2f 100644
--- a/docs/Running-behind-h2o.md
+++ b/docs/Running-behind-h2o.md
@@ -1,6 +1,10 @@
# Running Netdata behind H2O
@@ -101,7 +105,7 @@ Using the above, you access Netdata on the backend servers, like this:
### Encrypt the communication between H2O and Netdata
-In case Netdata's web server has been [configured to use TLS](/web/server/README.md#enabling-tls-support), it is
+In case Netdata's web server has been [configured to use TLS](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support), it is
necessary to specify inside the H2O configuration that the final destination is using TLS. To do this, change the
`http://` on the `proxy.reverse.url` line in your H2O configuration with `https://`
@@ -142,7 +146,7 @@ If your H2O server is on `localhost`, you can use this to ensure external access
bind to = 127.0.0.1 ::1
```
----
+
You can also use a unix domain socket. This will provide faster communication between H2O and Netdata as well:
@@ -157,7 +161,7 @@ In the H2O configuration, use a line like the following to connect to Netdata vi
proxy.reverse.url http://[unix:/run/netdata/netdata.sock]
```
----
+
If your H2O server is not on localhost, you can set:
@@ -169,7 +173,7 @@ If your H2O server is not on localhost, you can set:
*note: Netdata v1.9+ support `allow connections from`*
-`allow connections from` accepts [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to match against
+`allow connections from` accepts [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to match against
the connection IP address.
## Prevent the double access.log
diff --git a/docs/Running-behind-haproxy.md b/docs/Running-behind-haproxy.md
index ee1790cfe..f87eaa1fe 100644
--- a/docs/Running-behind-haproxy.md
+++ b/docs/Running-behind-haproxy.md
@@ -1,6 +1,10 @@
# Netdata via HAProxy
diff --git a/docs/Running-behind-lighttpd.md b/docs/Running-behind-lighttpd.md
index 2623560e1..6350b474b 100644
--- a/docs/Running-behind-lighttpd.md
+++ b/docs/Running-behind-lighttpd.md
@@ -1,6 +1,10 @@
# Netdata via lighttpd v1.4.x
@@ -27,7 +31,7 @@ $SERVER["socket"] == ":19998" {
}
```
----
+
If the only thing the server is exposing via the web is Netdata (and thus no suburl rewriting required),
then you can get away with just
@@ -51,7 +55,7 @@ auth.require = ( "" => ( "method" => "digest",
other auth methods, and more info on htdigest, can be found in lighttpd's [mod_auth docs](http://redmine.lighttpd.net/projects/lighttpd/wiki/Docs_ModAuth).
----
+
It seems that lighttpd (or some versions of it), fail to proxy compressed web responses.
To solve this issue, disable web response compression in Netdata.
diff --git a/docs/Running-behind-nginx.md b/docs/Running-behind-nginx.md
index 0cb16309a..a94f4058d 100644
--- a/docs/Running-behind-nginx.md
+++ b/docs/Running-behind-nginx.md
@@ -1,6 +1,10 @@
# Running Netdata behind Nginx
@@ -169,7 +173,7 @@ Using the above, you access Netdata on the backend servers, like this:
### Encrypt the communication between Nginx and Netdata
-In case Netdata's web server has been [configured to use TLS](/web/server/README.md#enabling-tls-support), it is
+In case Netdata's web server has been [configured to use TLS](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support), it is
necessary to specify inside the Nginx configuration that the final destination is using TLS. To do this, please, append
the following parameters in your `nginx.conf`
@@ -212,7 +216,7 @@ If your Nginx is on `localhost`, you can use this to protect your Netdata:
bind to = 127.0.0.1 ::1
```
----
+
You can also use a unix domain socket. This will also provide a faster route between Nginx and Netdata:
@@ -232,7 +236,6 @@ upstream backend {
}
```
----
If your Nginx server is not on localhost, you can set:
@@ -244,7 +247,7 @@ If your Nginx server is not on localhost, you can set:
*note: Netdata v1.9+ support `allow connections from`*
-`allow connections from` accepts [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to match against the
+`allow connections from` accepts [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to match against the
connection IP address.
## Prevent the double access.log
diff --git a/docs/agent-cloud.md b/docs/agent-cloud.md
index ed54325c3..b5b996617 100644
--- a/docs/agent-cloud.md
+++ b/docs/agent-cloud.md
@@ -13,24 +13,24 @@ hosted web interface that gives you real-time visibility into your entire infras
There are two main ways to use your Agent(s) with Netdata Cloud. You can use both these methods simultaneously, or just
one, based on your needs:
-- Use Netdata Cloud's web interface for monitoring an entire infrastructure, with any number of Agents, in one
- centralized dashboard.
-- Use **Visited nodes** to quickly navigate between the dashboards of nodes you've recently visited.
+- Use Netdata Cloud's web interface for monitoring an entire infrastructure, with any number of Agents, in one
+ centralized dashboard.
+- Use **Visited nodes** to quickly navigate between the dashboards of nodes you've recently visited.
## Monitor an infrastructure with Netdata Cloud
We designed Netdata Cloud to help you see health and performance metrics, plus active alarms, in a single interface.
Here's what a small infrastructure might look like:
-![Animated GIF of Netdata
-Cloud](https://user-images.githubusercontent.com/1153921/80828986-1ebb3b00-8b9b-11ea-957f-2c8d0d009e44.gif)
+![Animated GIF of Netdata Cloud](https://user-images.githubusercontent.com/1153921/80828986-1ebb3b00-8b9b-11ea-957f-2c8d0d009e44.gif)
-[Read more about Netdata Cloud](https://learn.netdata.cloud/docs/cloud/) to better understand how it gives you real-time
+[Read more about Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx) to better
+understand how it gives you real-time
visibility into your entire infrastructure, and why you might consider using it.
-Next, [get started in 5 minutes](https://learn.netdata.cloud/docs/cloud/get-started/), or read our [connection to Cloud
-reference](/claim/README.md) for a complete investigation of Cloud's security and encryption features, plus instructions
-for Docker containers.
+Next, [get started in 5 minutes](https://github.com/netdata/netdata/blob/master/docs/cloud/get-started.mdx), or read our
+[connection to Cloud reference](https://github.com/netdata/netdata/blob/master/claim/README.md) for a complete
+investigation of Cloud's security and encryption features, plus instructions for Docker containers.
## Navigate between dashboards with Visited nodes
@@ -46,15 +46,13 @@ Netdata Cloud account, sign in with your preferred method.
Cloud redirects you back to your node's dashboard, which is now connected to your Netdata Cloud account. You can now see
the Visited nodes menu, which is populated by a single node.
-![An Agent's dashboard with the Visited nodes
-menu](https://user-images.githubusercontent.com/1153921/80830383-b6ba2400-8b9d-11ea-9eb2-379c7eccd22f.png)
+![An Agent's dashboard with the Visited nodes menu](https://user-images.githubusercontent.com/1153921/80830383-b6ba2400-8b9d-11ea-9eb2-379c7eccd22f.png)
If you previously went through the Cloud onboarding process to create a Space and War Room, you will also see these in
the Visited Nodes menu. You can click on your Space or any of your War Rooms to navigate to Netdata Cloud and continue
monitoring your infrastructure from there.
-![A Agent's dashboard with the Visited nodes menu, plus Spaces and War
-Rooms](https://user-images.githubusercontent.com/1153921/80830382-b6218d80-8b9d-11ea-869c-1170b95eeb4a.png)
+![A Agent's dashboard with the Visited nodes menu, plus Spaces and War Rooms](https://user-images.githubusercontent.com/1153921/80830382-b6218d80-8b9d-11ea-869c-1170b95eeb4a.png)
To add more Agents to your Visited nodes menu, visit them and sign in again. This process connects that node to your
Cloud account and further populates the menu.
@@ -62,16 +60,19 @@ Cloud account and further populates the menu.
Once you've added more than one node, you can use the menu to switch between various dashboards without remembering IP
addresses or hostnames or saving bookmarks for every node you want to monitor.
-![Switching between dashboards with Visited
-nodes](https://user-images.githubusercontent.com/1153921/80831018-e158ac80-8b9e-11ea-882e-1d82cdc028cd.gif)
+![Switching between dashboards with Visited nodes](https://user-images.githubusercontent.com/1153921/80831018-e158ac80-8b9e-11ea-882e-1d82cdc028cd.gif)
## What's next?
The Agent-Cloud integration is highly adaptable to the needs of any infrastructure or user. If you want to learn more
about how you might want to use or configure Cloud, we recommend the following:
-- Get an overview of Cloud's features by reading [Cloud documentation](https://learn.netdata.cloud/docs/cloud/).
-- Follow the 5-minute [get started with Cloud](https://learn.netdata.cloud/docs/cloud/get-started/) guide to finish
- onboarding and connect your first nodes.
-- Better understand how agents connect securely to the Cloud with [connect agent to Cloud](/claim/README.md) and [Agent-Cloud
- link](/aclk/README.md) documentation.
+- Get an overview of Cloud's features by
+ reading [Cloud documentation](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx).
+- Follow the
+ 5-minute [get started with Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx)
+ guide to finish
+ onboarding and connect your first nodes.
+- Better understand how agents connect securely to the Cloud
+ with [connect agent to Cloud](https://github.com/netdata/netdata/blob/master/claim/README.md) and
+ [Agent-Cloud link](https://github.com/netdata/netdata/blob/master/aclk/README.md) documentation.
diff --git a/docs/anonymous-statistics.md b/docs/anonymous-statistics.md
index 99bd3dc7f..13eb465c6 100644
--- a/docs/anonymous-statistics.md
+++ b/docs/anonymous-statistics.md
@@ -20,7 +20,7 @@ We use the statistics gathered from this information for two purposes:
Netdata collects usage information via two different channels:
-- **Agent dashboard**: We use the [PostHog JavaScript integration](https://posthog.com/docs/integrations/js-integration) (with sensitive event attributes overwritten to be anonymized) to send product usage events when you access an [Agent's dashboard](/web/gui/README.md).
+- **Agent dashboard**: We use the [PostHog JavaScript integration](https://posthog.com/docs/integrations/js-integration) (with sensitive event attributes overwritten to be anonymized) to send product usage events when you access an [Agent's dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md).
- **Agent backend**: The `netdata` daemon executes the [`anonymous-statistics.sh`](https://github.com/netdata/netdata/blob/6469cf92724644f5facf343e4bdd76ac0551a418/daemon/anonymous-statistics.sh.in) script when Netdata starts, stops cleanly, or fails.
You can opt-out from sending anonymous statistics to Netdata through three different [opt-out mechanisms](#opt-out).
@@ -65,7 +65,7 @@ Starting with v1.21, we additionally collect information about:
- Failures to build the dependencies required to use Cloud features.
- Unavailability of Cloud features in an agent.
-- Failures to connect to the Cloud in case the [connection process](/claim/README.md) has been completed. This includes error codes
+- Failures to connect to the Cloud in case the [connection process](https://github.com/netdata/netdata/blob/master/claim/README.md) has been completed. This includes error codes
to inform the Netdata team about the reason why the connection failed.
To see exactly what and how is collected, you can review the script template `daemon/anonymous-statistics.sh.in`. The
@@ -82,13 +82,13 @@ installation, including manual, offline, and macOS installations. Create the fil
.opt-out-from-anonymous-statistics` from your Netdata configuration directory.
**Pass the option `--disable-telemetry` to any of the installer scripts in the [installation
-docs](/packaging/installer/README.md).** You can append this option during the initial installation or a manual
+docs](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md).** You can append this option during the initial installation or a manual
update. You can also export the environment variable `DISABLE_TELEMETRY` with a non-zero or non-empty value
(e.g: `export DISABLE_TELEMETRY=1`).
When using Docker, **set your `DISABLE_TELEMETRY` environment variable to `1`.** You can set this variable with the following
command: `export DISABLE_TELEMETRY=1`. When creating a container using Netdata's [Docker
-image](/packaging/docker/README.md#create-a-new-netdata-agent-container) for the first time, this variable will disable
+image](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md#create-a-new-netdata-agent-container) for the first time, this variable will disable
the anonymous statistics script inside of the container.
Each of these opt-out processes does the following:
diff --git a/docs/cloud/alerts-notifications/add-discord-notification.md b/docs/cloud/alerts-notifications/add-discord-notification.md
new file mode 100644
index 000000000..386e6035e
--- /dev/null
+++ b/docs/cloud/alerts-notifications/add-discord-notification.md
@@ -0,0 +1,59 @@
+
+
+From the Netdata Cloud UI, you can manage your space's notification settings and enable the configuration to deliver notifications on Discord.
+
+#### Prerequisites
+
+To enable Discord notifications you need:
+
+- A Netdata Cloud account
+- Access to the space as an **administrator**
+- Have a Discord server able to receive webhook integrations. For mode details check [how to configure this on Discord](#settings-on-discord)
+
+#### Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Notification** tab
+1. Click on the **+ Add configuration** button (near the top-right corner of your screen)
+1. On the **Discord** card click on **+ Add**
+1. A modal will be presented to you to enter the required details to enable the configuration:
+ 1. **Notification settings** are Netdata specific settings
+ - Configuration name - you can optionally provide a name for your configuration you can easily refer to it
+ - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration
+ - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only
+ 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Discord:
+ - Define the type channel you want to send notifications to: **Text channel** or **Forum channel**
+ - Webhook URL - URL provided on Discord for the channel you want to receive your notifications. For more details check [how to configure this on Discord](#settings-on-discord)
+ - Thread name - if the Discord channel is a **Forum channel** you will need to provide the thread name as well
+
+#### Settings on Discord
+
+#### Enable webhook integrations on Discord server
+
+To enable the webhook integrations on Discord you need:
+1. Go to *Integrations** under your **Server Settings
+
+ ![image](https://user-images.githubusercontent.com/82235632/214091719-89372894-d67f-4ec5-98d0-57c7d4256ebf.png)
+
+1. **Create Webhook** or **View Webhooks** if you already have some defined
+1. When you create a new webhook you specify: Name and Channel
+1. Once you have this configured you will need the Webhook URL to add your notification configuration on Netdata UI
+
+ ![image](https://user-images.githubusercontent.com/82235632/214092713-d16389e3-080f-4e1c-b150-c0fccbf4570e.png)
+
+For more details please read this article from Discord: [Intro to Webhooks](https://support.discord.com/hc/en-us/articles/228383668).
+
+#### Related topics
+
+- [Alerts Configuration](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+- [Manage notification methods](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md)
\ No newline at end of file
diff --git a/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md b/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md
new file mode 100644
index 000000000..6e47cfd9c
--- /dev/null
+++ b/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md
@@ -0,0 +1,60 @@
+
+
+From the Cloud interface, you can manage your space's notification settings and from these you can add specific configuration to get notifications delivered on PagerDuty.
+
+#### Prerequisites
+
+To add PagerDuty notification configurations you need
+
+- A Cloud account
+- Access to the space as and **administrator**
+- Space will needs to be on **Business** plan or higher
+- Have a PagerDuty service to receive events, for mode details check [how to configure this on PagerDuty](#settings-on-pagerduty)
+
+#### Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Notification** tab
+1. Click on the **+ Add configuration** button (near the top-right corner of your screen)
+1. On the **PagerDuty** card click on **+ Add**
+1. A modal will be presented to you to enter the required details to enable the configuration:
+ 1. **Notification settings** are Netdata specific settings
+ - Configuration name - you can optionally provide a name for your configuration you can easily refer to it
+ - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration
+ - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only
+ 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For PagerDuty:
+ - Integration Key - is a 32 character key provided by PagerDuty to receive events on your service. For more details check [how to configure this on PagerDuty](#settings-on-pagerduty)
+
+#### Settings on PagerDuty
+
+#### Enable webhook integrations on PagerDuty
+
+To enable the webhook integrations on PagerDuty you need:
+1. Create a service to receive events from your services directory page:
+
+ ![image](https://user-images.githubusercontent.com/2930882/214254148-03714f31-7943-4444-9b63-7b83c9daa025.png)
+
+1. At step 3, select `Events API V2` Integration:or **View Webhooks** if you already have some defined
+
+ ![image](https://user-images.githubusercontent.com/2930882/214254466-423cf493-037d-47bd-b9e6-fc894897f333.png)
+
+1. Once the service is created you will be redirected to its configuration page, where you can copy the **integration key**, that you will need need to add to your notification configuration on Netdata UI:
+
+
+ ![image](https://user-images.githubusercontent.com/2930882/214255916-0d2e53d5-87cc-408a-9f5b-0308a3262d5c.png)
+
+
+#### Related topics
+
+- [Alerts Configuration](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+- [Manage notification methods](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md)
\ No newline at end of file
diff --git a/docs/cloud/alerts-notifications/add-slack-notification-configuration.md b/docs/cloud/alerts-notifications/add-slack-notification-configuration.md
new file mode 100644
index 000000000..d8d6185fe
--- /dev/null
+++ b/docs/cloud/alerts-notifications/add-slack-notification-configuration.md
@@ -0,0 +1,63 @@
+
+
+From the Cloud interface, you can manage your space's notification settings and from these you can add specific configuration to get notifications delivered on Slack.
+
+#### Prerequisites
+
+To add discord notification configurations you need
+
+- A Netdata Cloud account
+- Access to the space as an **administrator**
+- Space will needs to be on **Business** plan or higher
+- Have a Slack app on your workspace to receive the webhooks, for mode details check [how to configure this on Slack](#settings-on-slack)
+
+#### Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Notification** tab
+1. Click on the **+ Add configuration** button (near the top-right corner of your screen)
+1. On the **Slack** card click on **+ Add**
+1. A modal will be presented to you to enter the required details to enable the configuration:
+ 1. **Notification settings** are Netdata specific settings
+ - Configuration name - you can optionally provide a name for your configuration you can easily refer to it
+ - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration
+ - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only
+ 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Slack:
+ - Webhook URL - URL provided on Slack for the channel you want to receive your notifications. For more details check [how to configure this on Slack](#settings-on-slack)
+
+#### Settings on Slack
+
+To enable the webhook integrations on Slack you need:
+1. Create an app to receive webhook integrations. Check [Create an app](https://api.slack.com/apps?new_app=1) from Slack documentation for further details
+1. Install the app on your workspace
+1. Configure Webhook URLs for your workspace
+ - On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**
+
+ ![image](https://user-images.githubusercontent.com/2930882/214251948-486229bb-195b-499b-92e4-4be59a567a19.png)
+
+ - At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**
+ - After pressing that specify the channel where you want your notifications to be delivered
+
+ ![image](https://user-images.githubusercontent.com/82235632/214103532-95f9928d-d4d6-4172-9c24-a4ddd330e96d.png)
+
+ - Once completed copy the Webhook URL that you will need to add to your notification configuration on Netdata UI
+
+ ![image](https://user-images.githubusercontent.com/82235632/214104412-13aaeced-1b40-4894-85f6-9db0eb35c584.png)
+
+For more details please check Slacks's article [Incoming webhooks for Slack](https://slack.com/help/articles/115005265063-Incoming-webhooks-for-Slack).
+
+
+#### Related topics
+
+- [Alerts Configuration](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+- [Manage notification methods](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md)
\ No newline at end of file
diff --git a/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md b/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md
new file mode 100644
index 000000000..e6d042339
--- /dev/null
+++ b/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md
@@ -0,0 +1,105 @@
+
+
+From the Cloud interface, you can manage your space's notification settings and from these you can add specific configuration to get notifications delivered on a webhook using a predefined schema.
+
+#### Prerequisites
+
+To add discord notification configurations you need
+
+- A Netdata Cloud account
+- Access to the space as an **administrator**
+- Space needs to be on **Pro** plan or higher
+- Have an app that allows you to receive webhooks following a predefined schema, for mode details check [how to create the webhook service](#webhook-service)
+
+#### Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Notification** tab
+1. Click on the **+ Add configuration** button (near the top-right corner of your screen)
+1. On the **webhook** card click on **+ Add**
+1. A modal will be presented to you to enter the required details to enable the configuration:
+ 1. **Notification settings** are Netdata specific settings
+ - Configuration name - you can optionally provide a name for your configuration you can easily refer to it
+ - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration
+ - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only
+ 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For webhook:
+ - Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls. Check [how to create the webhook service](#webhook-service).
+ - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. For mode details check [Extra headers](#extra-headers)
+ - Authorization Mechanism - Netdata webhook integration supports 3 different authorization mechanisms. For mode details check [Authorization mechanism](#authorization-mechanism):
+ - Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.
+ - Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.
+ - Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.
+
+#### Webhook service
+
+A webhook integration allows your application to receive real-time alerts from Netdata by sending HTTP requests to a specified URL. In this document, we'll go over the steps to set up a generic webhook integration, including adding headers, and implementing different types of authorization mechanisms.
+
+##### Netdata webhook integration
+
+A webhook integration is a way for one service to notify another service about events that occur within it. This is done by sending an HTTP POST request to a specified URL (known as the "webhook URL") when an event occurs.
+
+Netdata webhook integration service will send alert notifications to the destination service as soon as they are detected.
+
+The notification content sent to the destination service will be a JSON object having these properties:
+
+| field | type | description |
+| :-- | :-- | :-- |
+| message | string | A summary message of the alert. |
+| alarm | string | The alarm the notification is about. |
+| info | string | Additional info related with the alert. |
+| chart | string | The chart associated with the alert. |
+| context | string | The chart context. |
+| space | string | The space where the node that raised the alert is assigned. |
+| family | string | Context family. |
+| class | string | Classification of the alert, e.g. "Error". |
+| severity | string | Alert severity, can be one of "warning", "critical" or "clear". |
+| date | string | Date of the alert in ISO8601 format. |
+| duration | string | Duration the alert has been raised. |
+| critical_count | integer | umber of critical alerts currently existing on the same node. |
+| warning_count | integer | Number of warning alerts currently existing on the same node. |
+| alarm_url | string | Netdata Cloud URL for this alarm. |
+
+##### Extra headers
+
+When setting up a webhook integration, the user can specify a set of headers to be included in the HTTP requests sent to the webhook URL.
+
+By default, the following headers will be sent in the HTTP request
+
+| **Header** | **Value** |
+|:-------------------------------:|-----------------------------|
+| Content-Type | application/json |
+
+##### Authorization mechanism
+
+Netdata webhook integration supports 3 different authorization mechanisms:
+
+1. Mutual TLS (recommended)
+
+In mutual Transport Layer Security (mTLS) authorization, the client and the server authenticate each other using X.509 certificates. This ensures that the client is connecting to the intended server, and that the server is only accepting connections from authorized clients.
+
+To take advantage of mutual TLS, you can configure your server to verify Netdata's client certificate. To do that you need to download our [CA certificate file](http://localhost) and configure your server to use it as the
+
+This is the default authentication mechanism used if no other method is selected.
+
+2. Basic
+
+In basic authorization, the client sends a request with an Authorization header that includes a base64-encoded string in the format username:password. The server then uses this information to authenticate the client. If this authentication method is selected, the user can set the user and password that will be used when connecting to the destination service.
+
+3. Bearer
+
+In bearer token authorization, the client sends a request with an Authorization header that includes a bearer token. The server then uses this token to authenticate the client. Bearer tokens are typically generated by an authentication service, and are passed to the client after a successful authentication. If this method is selected, the user can set the token to be used for connecting to the destination service.
+
+#### Related topics
+
+- [Alerts Configuration](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+- [Manage notification methods](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md)
diff --git a/docs/cloud/alerts-notifications/manage-notification-methods.md b/docs/cloud/alerts-notifications/manage-notification-methods.md
new file mode 100644
index 000000000..115aaae73
--- /dev/null
+++ b/docs/cloud/alerts-notifications/manage-notification-methods.md
@@ -0,0 +1,88 @@
+
+
+From the Cloud interface, you can manage your space's notification settings as well as allow users to personalize their notifications setting
+
+### Manage space notification settings
+
+#### Prerequisites
+
+To manage space notification settings, you will need the following:
+
+- A Netdata Cloud account
+- Access to the space as an **administrator**
+
+#### Available actions per notification methods based on service level
+
+| **Action** | **Personal service level** | **System service level** |
+| :- | :-: | :-: |
+| Enable / Disable | X | X |
+| Edit | | X | |
+| Delete | X | X |
+| Add multiple configurations for same method | | X |
+
+Notes:
+* For Netadata provided ones you can't delete the existing notification method configuration.
+* Enable, Edit and Add actions over specific notification methods will only be allowed if your plan has access to those ([service classification](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx#service-classification))
+
+#### Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Notification** tab
+1. You will be presented with a table of the configured notification methods for the space. You will be able to:
+ 1. **Add a new** notification method configuration.
+ - Choose the service from the list of the available ones, you'll may see a list of unavailable options if your plan doesn't allow some of them (you will see on the
+ card the plan level that allows a specific service)
+ - You can optionally provide a name for the configuration so you can easily refer to what it
+ - Define filtering criteria. To which Rooms will this apply? What notifications I want to receive? (All Alerts and unreachable, All Alerts, Critical only)
+ - Depending on the service different inputs will be present, please note that there are mandatory and optional inputs
+ - If you doubts on how to configure the service you can find a link at the top of the modal that takes you to the specific documentation page to help you
+ 1. **Edit an existing** notification method configuration. Personal level ones can't be edited here, see [Manage user notification settings](#manage-user-notification-settings). You will be able to change:
+ - The name provided for it
+ - Filtering criteria
+ - Service specific inputs
+ 1. **Enable/Disable** a given notification method configuration.
+ - Use the toggle to enable or disable the notification method configuration
+ 1. **Delete an existing** notification method configuartion. Netdata provided ones can't be deleted, e.g. Email
+ - Use the trash icon to delete your configuration
+
+### Manage user notification settings
+
+#### Prerequisites
+
+To manage user specific notification settings, you will need the following:
+
+- A Cloud account
+- Have access to, at least, a space
+
+Note: If an administrator has disabled a Personal [service level](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md#service-level) notification method this will override any user specific setting.
+
+#### Steps
+
+1. Click on the **User notification settings** shortcut on top of the help button
+1. You are presented with:
+ - The Personal [service level](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md#service-level) notification methods you can manage
+ - The list spaces and rooms inside those where you have access to
+ - If you're an administrator, Manager or Troubleshooter you'll also see the Rooms from a space you don't have access to on **All Rooms** tab and you can activate notifications for them by joining the room
+1. On this modal you will be able to:
+ 1. **Enable/Disable** the notification method for you, this applies accross all spaces and rooms
+ - Use the the toggle enable or disable the notification method
+ 1. **Define what notifications you want** to per space/room: All Alerts and unreachable, All Alerts, Critical only or No notifications
+ 1. **Activate notifications** for a room you aren't a member of
+ - From the **All Rooms** tab click on the Join button for the room(s) you want
+
+#### Related topics
+
+- [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+- [Alerts Configuration](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Add webhook notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md)
+- [Add Discord notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-discord-notification-configuration.md)
+- [Add Slack notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-slack-notification-configuration.md)
+- [Add PagerDuty notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md)
diff --git a/docs/cloud/alerts-notifications/notifications.mdx b/docs/cloud/alerts-notifications/notifications.mdx
new file mode 100644
index 000000000..e594606eb
--- /dev/null
+++ b/docs/cloud/alerts-notifications/notifications.mdx
@@ -0,0 +1,155 @@
+---
+title: "Alert notifications"
+description: >-
+ "Configure Netdata Cloud to send notifications to your team whenever any node on your infrastructure
+ triggers a pre-configured or custom alert threshold."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx"
+sidebar_label: "Alert notifications"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations/Alerts"
+---
+
+import Callout from '@site/src/components/Callout'
+
+Netdata Cloud can send centralized alert notifications to your team whenever a node enters a warning, critical, or
+unreachable state. By enabling notifications, you ensure no alert, on any node in your infrastructure, goes unnoticed by
+you or your team.
+
+Having this information centralized helps you:
+* Have a clear view of the health across your infrastructure, [seeing all a alerts in one place](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/view-active-alerts.mdx)
+* Easily [setup your alert notification process](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md):
+methods to use and where to use them, filtering rules, etc.
+* Quickly troubleshoot using [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metrics-correlations.md)
+or [Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx)
+
+If a node is getting disconnected often or has many alerts, we protect you and your team from alert fatigue by sending
+you a flood protection notification. Getting one of these notifications is a good signal of health or performance issues
+on that node.
+
+Admins must enable alert notifications for their [Space(s)](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md#manage-space-notification-settings). All users in a
+Space can then personalize their notifications settings from within their [account
+menu](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/#manage-user-notification-settings).
+
+
+
+Centralized alert notifications from Netdata Cloud is a independent process from [notifications from
+Netdata](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md). You can enable one or the other, or both, based on your needs. However,
+the alerts you see in Netdata Cloud are based on those streamed from your Netdata-monitoring nodes. If you want to tweak
+or add new alert that you see in Netdata Cloud, and receive via centralized alert notifications, you must
+[configure](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) each node's alert watchdog.
+
+
+
+### Alert notifications
+
+Netdata Cloud can send centralized alert notifications to your team whenever a node enters a warning, critical, or unreachable state. By enabling notifications,
+you ensure no alert, on any node in your infrastructure, goes unnoticed by you or your team.
+
+If a node is getting disconnected often or has many alerts, we protect you and your team from alert fatigue by sending you a flood protection notification.
+Getting one of these notifications is a good signal of health or performance issues on that node.
+
+Alert notifications can be delivered through different methods, these can go from an Email sent from Netdata to the use of a 3rd party tool like PagerDuty.
+
+Notification methods are classified on two main attributes:
+* Service level: Personal or System
+* Service classification: Community or Business
+
+Only administrators are able to manage the space's alert notification settings.
+All users in a Space can personalize their notifications settings, for Personal service level notification methods, from within their profile menu.
+
+> ⚠️ Netdata Cloud supports different notification methods and their availability will depend on the plan you are at.
+> For more details check [Service classification](#service-classification) or [netdata.cloud/pricing](https://www.netdata.cloud/pricing).
+
+#### Service level
+
+##### Personal
+
+The notifications methods classified as **Personal** are what we consider generic, meaning that these can't have specific rules for them set by the administrators.
+
+These notifications are sent to the destination of the channel which is a user-specific attribute, e.g. user's e-mail, and the users are the ones that will then be able to
+manage what specific configurations they want for the Space / Room(s) and the desired Notification level, they can achieve this from their User Profile page under
+**Notifications**.
+
+One example of such a notification method is the E-mail.
+
+##### System
+
+For **System** notification methods, the destination of the channel will be a target that usually isn't specific to a single user, e.g. slack channel.
+
+These notification methods allow for fine-grain rule settings to be done by administrators and more than one configuration can exist for them since. You can specify
+different targets depending on Rooms or Notification level settings.
+
+Some examples of such notification methods are: Webhook, PagerDuty, slack.
+
+#### Service classification
+
+##### Community
+
+Notification methods classified as Community can be used by everyone independent on the plan your space is at.
+These are: Email and discord
+
+##### Pro
+
+Notification methods classified as Pro are only available for **Pro** and **Business** plans
+These are: webhook
+
+##### Business
+
+Notification methods classified as Business are only available for **Business** plans
+These are: PagerDuty, slack
+
+## Flood protection
+
+If a node has too many state changes like firing too many alerts or going from reachable to unreachable, Netdata Cloud
+enables flood protection. As long as a node is in flood protection mode, Netdata Cloud does not send notifications about
+this node. Even with flood protection active, it is possible to access the node directly, either via Netdata Cloud or
+the local Agent dashboard at `http://NODE:19999`.
+
+## Anatomy of an alert notification
+
+Email alarm notifications show the following information:
+
+- The Space's name
+- The node's name
+- Alarm status: critical, warning, cleared
+- Previous alarm status
+- Time at which the alarm triggered
+- Chart context that triggered the alarm
+- Name and information about the triggered alarm
+- Alarm value
+- Total number of warning and critical alerts on that node
+- Threshold for triggering the given alarm state
+- Calculation or database lookups that Netdata uses to compute the value
+- Source of the alarm, including which file you can edit to configure this alarm on an individual node
+
+Email notifications also feature a **Go to Node** button, which takes you directly to the offending chart for that node
+within Cloud's embedded dashboards.
+
+Here's an example email notification for the `ram_available` chart, which is in a critical state:
+
+![Screenshot of an alarm notification email from Netdata Cloud](https://user-images.githubusercontent.com/1153921/87461878-e933c480-c5c3-11ea-870b-affdb0801854.png)
+
+## What's next?
+
+Netdata Cloud's alarm notifications feature leverages the alarms configuration on each node in your infrastructure. If
+you'd like to tweak any of these alarms, or even add new ones based on your needs, read our [health
+quickstart](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md).
+
+You can also [view active alarms](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/view-active-alerts.mdx) in Netdata Cloud for an instant
+visualization of the health of your infrastructure.
+
+### Related Topics
+
+#### **Related Concepts**
+- [Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md)
+- [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metrics-correlations.md)
+- [Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx)
+
+#### Related Tasks
+- [View Active alarms](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/view-active-alerts.mdx)
+- [Manage notification methods](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md)
+- [Add webhook notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md)
+- [Add Discord notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-discord-notification-configuration.md)
+- [Add Slack notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-slack-notification-configuration.md)
+- [Add PagerDuty notification configuration](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md)
diff --git a/docs/cloud/alerts-notifications/smartboard.mdx b/docs/cloud/alerts-notifications/smartboard.mdx
new file mode 100644
index 000000000..b9240ce49
--- /dev/null
+++ b/docs/cloud/alerts-notifications/smartboard.mdx
@@ -0,0 +1,46 @@
+---
+title: "Alerts smartboard"
+description: ""
+type: "reference"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/smartboard.mdx"
+sidebar_label: "Alerts smartboard"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations/Alerts"
+---
+
+The Alerts view gives you a high level of availability and performance information for every node you're
+monitoring with Netdata Cloud. We expect it to become the "home base" for many Netdata Cloud users who want to instantly
+understand what's going on with their infrastructure and exactly where issues might be.
+
+The Alerts view is available entirely for free to all users and for any number of nodes.
+
+## Alerts table and filtering
+
+The Alerts view shows all active alerts in your War Room, including the alert's name, the most recent value, a
+timestamp of when it became active, and the relevant node.
+
+You can use the checkboxes in the filter pane on the right side of the screen to filter the alerts displayed in the
+table
+by Status, Class, Type & Componenet, Role, Operating System, or Node.
+
+Click on any of the alert names to see the alert.
+
+## View active alerts
+
+In the `Active` subtab, you can see exactly how many **critical** and **warning** alerts are active across your nodes.
+
+## View configured alerts
+
+You can view all the configured alerts on all the agents that belong to a War Room in the `Alert Configurations` subtab.
+From within the Alerts view, you can click the `Alert Configurations` subtab to see a high level view of the states of
+the alerts on the nodes within this War Room and drill down to the node level where each alert is configured with their
+latest status.
+
+
+
+
+
+
+
+
diff --git a/docs/cloud/alerts-notifications/view-active-alerts.mdx b/docs/cloud/alerts-notifications/view-active-alerts.mdx
new file mode 100644
index 000000000..1035b682e
--- /dev/null
+++ b/docs/cloud/alerts-notifications/view-active-alerts.mdx
@@ -0,0 +1,76 @@
+---
+title: "View active alerts"
+description: >-
+ "Track the health of your infrastructure in one place by taking advantage of the powerful health monitoring
+ watchdog running on every node."
+type: "how-to"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/view-active-alerts.mdx"
+sidebar_label: "View active alerts"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations/Alerts"
+---
+
+Netdata Cloud receives information about active alerts on individual nodes in your infrastructure and updates the
+interface based on those status changes.
+
+Netdata Cloud doesn't produce alerts itself but rather receives and aggregates alerts from each node in your
+infrastructure based on their configuration. Every node comes with hundreds of pre-configured alerts that have been
+tested by Netdata's community of DevOps engineers and SREs, but you may want to customize existing alerts or create new
+ones entirely.
+
+Read our doc on [health alerts](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) to
+learn how to tweak existing alerts or create new
+health entities based on the specific needs of your infrastructure. By taking charge of alert configuration, you'll
+ensure Netdata Cloud always delivers the most relevant alerts about the well-being of your nodes.
+
+## View all active alerts
+
+The [Alerts Smartboard](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/smartboard.mdx)
+provides a high-level interface for viewing the number of critical or warning alerts and where they are in your
+infrastructure.
+
+![The Alerts Smartboard](https://user-images.githubusercontent.com/1153921/119025635-2fcb1b80-b959-11eb-9fdb-7f1a082f43c5.png)
+
+Click on the **Alerts** tab in any War Room to open the Smartboard. Alternatively, click on any of the alert badges in
+the [Nodes view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md) to jump to the Alerts
+Smartboard.
+
+From here, filter active alerts using the **critical** or **warning** boxes, or hover over a box in
+the [nodes map](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/smartboard.mdx#nodes-map)
+to see a
+popup node-specific alert information.
+
+## View alerts in context with charts
+
+If you click on any of the alerts, either in a nodes map popup or the alerts table, Netdata Cloud navigates you to the
+single-node dashboard and scrolls to the relevant chart. Netdata Cloud also draws a highlight and the value at the
+moment your node triggered this alert.
+
+![An alert in context with charts and dimensions](https://user-images.githubusercontent.com/1153921/119039593-4a0cf580-b969-11eb-840c-4ecb123df9f5.png)
+
+You can
+then [select this area](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx#select)
+with `Alt/⌘ + mouse selection` to highlight the alerted timeframe while you explore other charts for root cause
+analysis.
+
+Or, select the area and
+run [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) to
+filter the single-node
+dashboard to only those charts most likely to be connected to the alert.
+
+## What's next?
+
+Learn more about the features of the Smartboard in
+its [reference](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/smartboard.mdx)
+doc. To stay notified of active alerts,
+enable [centralized alert notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+from Netdata Cloud.
+
+If you're through with setting up alerts, it might be time
+to [invite your team](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md).
+
+Check out our recommendations on organizing and
+using [Spaces](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md) and
+[War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md) to streamline your processes once
+you find an alert in Netdata Cloud.
diff --git a/docs/cloud/beta-architecture/new-architecture.md b/docs/cloud/beta-architecture/new-architecture.md
new file mode 100644
index 000000000..c51f08fb1
--- /dev/null
+++ b/docs/cloud/beta-architecture/new-architecture.md
@@ -0,0 +1,36 @@
+---
+title: "Test the New Cloud Architecture"
+description: "Would you like to be the first to try our new architecture and provide feedback? If so, this guide will help you sign up for our beta testing group."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/beta-architecture/new-architecture.md"
+---
+
+To enhance the stability and reliability of Netdata Cloud, we did extensive work on our backend, and we would like to give you the opportunity
+to be among the first users to try these changes to our Cloud architecture and provide feedback.
+
+The backend architecture changes should offer notable improvements in reliability and stability in Netdata Cloud,
+but more importantly, it allows us to develop new features and enhanced functionality, including features and enhancements
+that you have specifically requested. Features that will be developed on the new architecture include:
+
+- Parent/Child Cloud relationships
+- Alert logs
+- Alert management
+- Much more
+
+## Enabling the new architecture
+
+To enable the new architecture, first ensure that you have installed the latest Netdata version following
+[our guide](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx). Then, you or your administrator will need to retrieve the Space IDs
+within Netdata Cloud by clicking `Manage Space` in the left pane, selecting the `Space` tab, and copying the value in the `Space Id` field.
+You can then send an email to [beta@Netdata.cloud](mailto:beta@netdata.cloud) requesting to be included in our beta testers, and include
+in the body of the email a list of Space IDs for any space you would like to have whitelisted for the update. If you received an email
+invitation, you can also just reply to the invitation with your Space IDs in the body of the reply.
+
+Feel free to send the Space IDs for multiple spaces to test the new infrastructure on each of them.
+
+## Reporting issues
+
+After you are set up with the new architecture changes, we ask that you report any issues you encounter in our
+[designated Discord channel](https://discord.gg/dGzdemHwHh). This feedback
+will help us ensure the highest performance of the new architecture and expedite the development and release
+of the aforementioned enhancements and features.
+
diff --git a/docs/cloud/cheatsheet.mdx b/docs/cloud/cheatsheet.mdx
new file mode 100644
index 000000000..c1d0a471d
--- /dev/null
+++ b/docs/cloud/cheatsheet.mdx
@@ -0,0 +1,231 @@
+---
+title: "'Netdata management and configuration cheatsheet'"
+description: "'Connecting an Agent to the Cloud allows a Netdata Agent, running on a distributed node, to securely connect to Netdata Cloud via the encrypted Agent-Cloud link (ACLK).'"
+image: "/cheatsheet/cheatsheet-meta.png"
+sidebar_label: "Cheatsheet"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/cheatsheet.mdx"
+part_of_learn: "True"
+learn_status: "Published"
+learn_topic_type: "Getting started"
+learn_rel_path: "Getting started"
+---
+
+import {
+ OneLineInstallWget,
+ OneLineInstallCurl,
+} from '@site/src/components/OneLineInstall/';
+
+Use our management & configuration cheatsheet to simplify your interactions with Netdata, including configuration,
+using charts, managing the daemon, and more.
+
+## Install Netdata
+
+#### Install Netdata
+
+
+
+Or, if you have cURL but not wget (such as on macOS):
+
+
+
+#### Claim a node to Netdata Cloud
+
+To do so, sign in to Netdata Cloud, click the `Claim Nodes` button, choose the `War Rooms` to add nodes to, then click `Copy` to copy the full script to your clipboard. Paste that into your node’s terminal and run it.
+
+## Metrics collection & retention
+
+You can tweak your settings in the netdata.conf file.
+📄 [Find your netdata.conf file](https://learn.netdata.cloud/guides/step-by-step/step-04#find-your-netdataconf-file)
+
+Open a new terminal and navigate to the netdata.conf file. Use the edit-config script to make changes: `sudo ./edit-config netdata.conf`
+
+The most popular settings to change are:
+
+#### Increase metrics retention (4GiB)
+
+```
+sudo ./edit-config netdata.conf
+```
+
+```
+[global]
+ dbengine multihost disk space = 4096
+```
+
+#### Reduce the collection frequency (every 5 seconds)
+
+```
+sudo ./edit-config netdata.conf
+```
+
+```
+[global]
+ update every = 5
+```
+
+#### Enable/disable plugins (groups of collectors)
+
+```
+sudo ./edit-config netdata.conf
+```
+
+```
+[plugins]
+ go.d = yes # enabled
+ node.d = no # disabled
+```
+
+#### Enable/disable specific collectors
+
+```
+sudo ./edit-config go.d.conf
+```
+
+> `Or python.d.conf, node.d.conf, edbpf.conf, and so on`.
+
+```
+modules:
+ activemq: no # disabled
+ bind: no # disabled
+ cockroachdb: yes # enabled
+```
+
+#### Edit a collector's config (example)
+
+```
+$ sudo ./edit-config go.d/mysql.conf
+$ sudo ./edit-config ebpf.conf
+$ sudo ./edit-config python.d/anomalies.conf
+```
+
+## Configuration
+
+#### The Netdata config directory: `/etc/netdata`
+
+> If you don't have such a directory:
+> 📄 [Find your netdata.conf file](https://learn.netdata.cloud/guides/step-by-step/step-04#find-your-netdataconf-file)
+> The cheatsheet assumes you’re running all commands from within the Netdata config directory!
+
+#### Edit Netdata's main config file: `$ sudo ./edit-config netdata.conf`
+
+#### Edit Netdata's other config files (examples):
+
+- `$ sudo ./edit-config apps_groups.conf`
+- `$ sudo ./edit-config ebpf.conf`
+- `$ sudo ./edit-config health.d/load.conf`
+- `$ sudo ./edit-config go.d/prometheus.conf`
+
+#### View the running Netdata configuration: `http://NODE:19999/netdata.conf`
+
+> Replace `NODE` with the IP address or hostname of your node. Often `localhost`.
+
+## Alarms & notifications
+
+#### Add a new alarm
+
+```
+sudo touch health.d/example-alarm.conf
+sudo ./edit-config health.d/example-alarm.conf
+```
+
+#### Configure a specific alarm
+
+```
+sudo ./edit-config health.d/example-alarm.conf
+```
+
+#### Silence a specific alarm
+
+```
+sudo ./edit-config health.d/example-alarm.conf
+ to: silent
+```
+
+#### Disable alarms and notifications
+
+```
+[health]
+ enabled = no
+```
+
+> After any change, reload the Netdata health configuration
+
+```
+netdatacli reload-health
+```
+
+or if that command doesn't work on your installation, use:
+
+```
+killall -USR2 netdata
+```
+
+## Manage the daemon
+
+| Intent | Action |
+| :-------------------------- | --------------------------------------------------------------------: |
+| Start Netdata | `$ sudo systemctl start netdata` |
+| Stop Netdata | `$ sudo systemctl stop netdata` |
+| Restart Netdata | `$ sudo systemctl restart netdata` |
+| Reload health configuration | `$ sudo netdatacli reload-health` `$ killall -USR2 netdata` |
+| View error logs | `less /var/log/netdata/error.log` |
+
+## See metrics and dashboards
+
+#### Netdata Cloud: `https://app.netdata.cloud`
+
+#### Local dashboard: `https://NODE:19999`
+
+> Replace `NODE` with the IP address or hostname of your node. Often `localhost`.
+
+#### Access the Netdata API: `http://NODE:19999/api/v1/info`
+
+## Interact with charts
+
+| Intent | Action |
+| -------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
+| Stop a chart from updating | `click` |
+| Zoom | **Cloud** use the `zoom in` and `zoom out` buttons on any chart (upper right corner) **Agent** `SHIFT` or `ALT` + `mouse scrollwheel` `SHIFT` or `ALT` + `two-finger pinch` (touchscreen) `SHIFT` or `ALT` + `two-finger scroll` (touchscreen) |
+| Zoom to a specific timeframe | **Cloud** use the `select and zoom` button on any chart and then do a `mouse selection` **Agent** `SHIFT` + `mouse selection` |
+| Pan forward or back in time | `click` & `drag` `touch` & `drag` (touchpad/touchscreen) |
+| Select a certain timeframe | `ALT` + `mouse selection` WIP need to evaluate this `command?` + `mouse selection` (macOS) |
+| Reset to default auto refreshing state | `double click` |
+
+## Dashboards
+
+#### Disable the local dashboard
+
+Use the `edit-config` script to edit the `netdata.conf` file.
+
+```
+[web]
+mode = none
+```
+
+#### Change the port Netdata listens to (port 39999)
+
+```
+[web]
+default port = 39999
+```
+
+#### Opt out from anonymous statistics
+
+```
+sudo touch .opt-out-from-anonymous-statistics
+```
+
+## Understanding the dashboard
+
+**Charts**: A visualization displaying one or more collected/calculated metrics in a time series. Charts are generated
+by collectors.
+
+**Dimensions**: Any value shown on a chart, which can be raw or calculated values, such as percentages, averages,
+minimums, maximums, and more.
+
+**Families**: One instance of a monitored hardware or software resource that needs to be monitored and displayed
+separately from similar instances. Example, disks named
+**sda**, **sdb**, **sdc**, and so on.
+
+**Contexts**: A grouping of charts based on the types of metrics collected and visualized.
+**disk.io**, **disk.ops**, and **disk.backlog** are all contexts.
diff --git a/docs/cloud/cloud.mdx b/docs/cloud/cloud.mdx
new file mode 100644
index 000000000..764ba0e89
--- /dev/null
+++ b/docs/cloud/cloud.mdx
@@ -0,0 +1,74 @@
+---
+title: "Netdata Cloud docs"
+description: "Netdata Cloud is real-time visibility for entire infrastructures. View key metrics, insightful charts, and active alarms from all your nodes."
+custom_edit_url: "https://github.com/netdata/learn/blob/master/docs/cloud.mdx"
+---
+
+import { Grid, Box, BoxList, BoxListItem } from '@site/src/components/Grid/'
+import { RiExternalLinkLine } from 'react-icons/ri'
+
+This is the documentation for the Netdata Cloud web application, which works in parallel with the open-source Netdata
+monitoring agent to help you monitor your entire infrastructure [for free ](https://netdata.cloud/pricing/) in real time and troubleshoot problems that threaten the health of your
+nodes before they occur.
+
+Netdata Cloud requires the open-source [Netdata](/docs/) monitoring agent, which is the basis for the metrics,
+visualizations, and alarms that you'll find in Netdata Cloud. Every time you view a node in Netdata Cloud, its metrics
+and metadata are streamed to Netdata Cloud, then proxied to your browser, with an infrastructure that ensures [data
+privacy ](https://netdata.cloud/privacy/).
+
+
+Read [_What is Netdata?_](https://github.com/netdata/netdata/blob/master/docs/overview/what-is-netdata.md) for details about how Netdata and Netdata Cloud work together
+and how they're different from other monitoring solutions, or the
+[FAQ ](https://community.netdata.cloud/tags/c/general/29/faq) for answers to common questions.
+
+
+
+ Ready to get real-time visibility into your entire infrastructure? This guide will help you get started on Netdata Cloud, from signing in for a free account to connecting your nodes.
+
+
+
+## Learn about Netdata Cloud's features
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/cloud/data-privacy.mdx b/docs/cloud/data-privacy.mdx
new file mode 100644
index 000000000..c99cff946
--- /dev/null
+++ b/docs/cloud/data-privacy.mdx
@@ -0,0 +1,39 @@
+---
+title: "Data privacy in the Netdata Cloud"
+description: "Keeping your data safe and secure is our priority.Netdata never stores your personal information in the Netdata Cloud."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/data-privacy.mdx"
+sidebar_label: "Data privacy in the Netdata Cloud"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Concepts"
+---
+
+[Data privacy](https://netdata.cloud/privacy/) is very important to us. We firmly believe that your data belongs to
+you. This is why **we don't store any metric data in Netdata Cloud**.
+
+Your local installations of the Netdata Agent form the basis for the Netdata Cloud. All the data that you see in the web browser when using Netdata Cloud, is actually streamed directly from the Netdata Agent to the Netdata Cloud dashboard.
+The data passes through our systems, but it isn't stored. You can learn more about [the Agent's security design](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md) in the Agent documentation.
+
+However, to be able to offer the stunning visualizations and advanced functionality of Netdata Cloud, it does store a limited number of _metadata_.
+
+## Metadata
+
+Let's look at the metadata Netdata Cloud stores using the publicly available demo server `frankfurt.my-netdata.io`:
+
+- The email address you used to sign up/or sign in
+- For each node connected to your Spaces in Netdata Cloud:
+ - Hostname (as it appears in Netdata Cloud)
+ - Information shown in `/api/v1/info`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info).
+ - The chart metadata shown in `/api/v1/charts`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info).
+ - Alarm configurations shown in `/api/v1/alarms?all`. For example: [https://frankfurt.my-netdata.io/api/v1/alarms?all](https://frankfurt.my-netdata.io/api/v1/alarms?all).
+ - Active alarms shown in `/api/v1/alarms`. For example: [https://frankfurt.my-netdata.io/api/v1/alarms](https://frankfurt.my-netdata.io/api/v1/alarms).
+
+How we use them:
+
+- The data is stored in our production database on AWS. Some of it is also used in Google BigQuery, our data lake, for analytics purposes. These analytics are crucial for our product development process.
+- Email is used to identify users in regards to product use and to enrich our tools with product use, such as our CRM.
+- This data is only available to Netdata and never to a 3rd party.
+
+## Delete all personal data
+
+To remove all personal info we have about you (email and activities) you need to delete your cloud account by logging into https://app.netdata.cloud and accessing your profile, at the bottom left of your screen.
diff --git a/docs/cloud/get-started.mdx b/docs/cloud/get-started.mdx
new file mode 100644
index 000000000..b9f83af8f
--- /dev/null
+++ b/docs/cloud/get-started.mdx
@@ -0,0 +1,133 @@
+---
+title: "Get started with Netdata Cloud"
+description: >-
+ "Ready to get real-time visibility into your entire infrastructure? This guide will help you get started on
+ Netdata Cloud."
+image: "/img/seo/cloud_get-started.png"
+custom_edit_url: "https://github.com/netdata/learn/blob/master/docs/cloud/get-started.mdx"
+---
+
+import Link from '@docusaurus/Link'
+import Callout from '@site/src/components/Callout'
+
+Ready to get real-time visibility into your entire infrastructure with Netdata Cloud? This guide will walk you through
+the onboarding process, such as setting up your Space and War Room and connecting your first nodes.
+
+## Before you start
+
+Before you get started with Netdata Cloud, you should have the open-source Netdata monitoring agent installed. See our
+[installation guide](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) for details.
+
+If you already have the Netdata agent running on your node(s), make sure to update it to v1.32 or higher. Read the
+[updating documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md) for information
+on how to update based on the method you used to install Netdata on that node.
+
+## Begin the onboarding process
+
+Get started by signing in to Netdata. Read
+the [sign in](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.mdx) doc for details on the
+authentication methods we use.
+
+
+
+ Sign in to Netdata
+
+
+
+
+Once signed in with your preferred method, a
+General [War Room](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md) and
+a [Space](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md)
+named for your login email are automatically created. You can configure more Spaces and War Rooms to help you you
+organize your team
+and the many systems that make up your infrastructure. For example, you can put product and infrastructure SRE teams in
+separate
+Spaces, and then use War Rooms to group nodes by their service (`nginx`), purpose (`webservers`), or physical
+location (`IAD`).
+
+Don't worry! You can always add more Spaces and War Rooms later if you decide to reorganize how you use Netdata Cloud.
+
+## Connect your nodes
+
+From within the created War Rooms, Netdata Cloud prompts you
+to [connect](https://github.com/netdata/netdata/blob/master/claim/README.md) your nodes to Netdata Cloud. Non-admin
+users can users can select from existing nodes already connected to the space or select an admin from a provided list to
+connect node.
+You can connect any node running Netdata, whether it's a physical or virtual machine, a Docker container, IoT device,
+and more.
+
+The connection process securely connects any node to Netdata Cloud using
+the [Agent-Cloud link](https://github.com/netdata/netdata/blob/master/aclk/README.md). By
+connecting a node, you prove you have write and administrative access to that node. Connecting to Cloud also prevents
+any third party
+from connecting a node that you control. Keep in mind:
+
+- _You can only connect any given node in a single Space_. You can, however, add that connected node to multiple War
+ Rooms
+ within that one Space.
+- You must repeat the connection process on every node you want to add to Netdata Cloud.
+
+
+
+**Netdata Cloud ensures your data privacy by not storing metrics data from your nodes**. See our statement on Netdata
+Cloud [data privacy](https://github.com/netdata/netdata/blob/master/aclk/README.md/#data-privacy) for details on the
+data that's streamed from your nodes and the
+[connecting to cloud](https://github.com/netdata/netdata/blob/master/claim/README.md) doc for details about why we
+implemented the connection process and the encryption methods we use to secure your data in transit.
+
+
+
+To connect a node, select which War Rooms you want to add this node to with the dropdown, then copy the script given by
+Netdata Cloud into your node's terminal.
+
+Hit **Enter**. The script should return `Agent was successfully claimed.`. If the claiming script returns errors, or if
+you don't see the node in your Space after 60 seconds, see
+the [troubleshooting information](https://github.com/netdata/netdata/blob/master/claim/README.md#troubleshooting).
+
+Repeat this process with every node you want to add to Netdata Cloud during onboarding. You can also add more nodes once
+you've finished onboarding by clicking the **Connect Nodes** button in
+the [Space management area](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md/#manage-spaces).
+
+### Alternatives and other operating systems
+
+**Docker**: You can execute the claiming script Netdata running as a Docker container, or attach the claiming script
+when creating the container for the first time, such as when you're spinning up ephemeral containers. See
+the [connect an agent running in Docker](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-an-agent-running-in-docker)
+documentation for details.
+
+**Without root privileges**: If you want to connect an agent without using root privileges, see our [connect
+documentation](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-an-agent-without-root-privileges).
+
+**With a proxy**: If your node uses a proxy to connect to the internet, you need to configure the node's proxy settings.
+See
+our [connect through a proxy](https://github.com/netdata/netdata/blob/master/claim/README.md#connect-through-a-proxy)
+doc for details.
+
+## Add bookmarks to essential resources
+
+When an anomaly or outage strikes, your team needs to access other essential resources quickly. You can use Netdata
+Cloud's bookmarks to put these tools in one accessible place. Bookmarks are shared between all War Rooms in a Space, so
+any users in your Space will be able to see and use them.
+
+Bookmarks can link to both internal and external resources. You can bookmark your app's status page for quick updates
+during an outage, a messaging system on your organization's intranet, or other tools your team uses to respond to
+changes in your infrastructure.
+
+To add a new bookmark, click on the **Add bookmark** link. In the panel, name the bookmark, include its URL, and write a
+short description for your team's reference.
+
+## What's next?
+
+You finish onboarding
+by [inviting members of your team](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md)
+to your Space. You
+can also invite them later. At this point, you're ready to use Cloud.
+
+Next, learn about the organization and interfaces
+behind [Spaces](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md)
+and [War
+Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md).
+
+If you're ready to explore, check out how to use
+the [Overview dashboard](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md), which is the
+default view for each new War Room you create.
diff --git a/docs/cloud/insights/anomaly-advisor.mdx b/docs/cloud/insights/anomaly-advisor.mdx
new file mode 100644
index 000000000..98a28d92c
--- /dev/null
+++ b/docs/cloud/insights/anomaly-advisor.mdx
@@ -0,0 +1,86 @@
+---
+title: "Anomaly Advisor"
+description: "Quickly find anomalous metrics anywhere in your infrastructure."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx"
+sidebar_label: "Anomaly Advisor"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+import ReactPlayer from 'react-player'
+
+The Anomaly Advisor feature lets you quickly surface potentially anomalous metrics and charts related to a particular highlight window of
+interest.
+
+
+
+## Getting Started
+
+If you are running a Netdata version higher than `v1.35.0-29-nightly` you will be able to use the Anomaly Advisor out of the box with zero configuration. If you are on an earlier Netdata version you will need to first enable ML on your nodes by following the steps below.
+
+To enable the Anomaly Advisor you must first enable ML on your nodes via a small config change in `netdata.conf`. Once the anomaly detection models have trained on the Agent (with default settings this takes a couple of hours until enough data has been seen to train the models) you will then be able to enable the Anomaly Advisor feature in Netdata Cloud.
+
+### Enable ML on Netdata Agent
+
+To enable ML on your Netdata Agent, you need to edit the `[ml]` section in your `netdata.conf` to look something like the following example.
+
+```bash
+[ml]
+ enabled = yes
+```
+
+At a minimum you just need to set `enabled = yes` to enable ML with default params. More details about configuration can be found in the [Netdata Agent ML docs](https://learn.netdata.cloud/docs/agent/ml#configuration).
+
+**Note**: Follow [this guide](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-04.md) if you are unfamiliar with making configuration changes in Netdata.
+
+When you have finished your configuration, restart Netdata with a command like `sudo systemctl restart netdata` for the config changes to take effect. You can find more info on restarting Netdata [here](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
+
+After a brief delay, you should see the number of `trained` dimensions start to increase on the "dimensions" chart of the "Anomaly Detection" menu on the Overview page. By default the `minimum num samples to train = 3600` parameter means at least 1 hour of data is required to train initial models, but you could set this to `900` if you want to train initial models quicker but on less data. Over time, they will retrain on up to `maximum num samples to train = 14400` (4 hours by default), but you could increase this is you wanted to train on more data.
+
+![image](https://user-images.githubusercontent.com/2178292/166474099-ba6f5ebe-12b2-4ef2-af9f-e84a05349791.png)
+
+Once this line flattens out all configured metrics should have models trained and predicting anomaly scores each second, ready to be used by the new "anomalies" tab of the Anomaly Advisor.
+
+## Using Anomaly Advisor
+
+To use the Anomaly Advisor, go to the "anomalies" tab. Once you highlight a particular timeframe of interest, a selection of the most anomalous dimensions will appear below.
+
+The aim here is to surface the most anomalous metrics in the space or room for the highlighted window to try and cut down on the amount of manual searching required to get to the root cause of your issues.
+
+![image](https://user-images.githubusercontent.com/2178292/164427337-a40820d2-8d36-4a94-8dfb-cfd3194941e0.png)
+
+The "Anomaly Rate" chart shows the percentage of anomalous metrics over time per node. For example, in the following image, 3.21% of the metrics on the "ml-demo-ml-disabled" node were considered anomalous. This elevated anomaly rate could be a sign of something worth investigating.
+
+**Note**: in this example the anomaly rates for this node are actually being calculated on the parent it streams to, you can run ml on the Agent itselt or on a parent the Agent stream to. Read more about the various configuration options in the [Agent docs](https://github.com/netdata/netdata/blob/master/ml/README.md).
+
+![image](https://user-images.githubusercontent.com/2178292/164428307-6a86989a-611d-47f8-a673-911d509cd954.png)
+
+The "Count of Anomalous Metrics" chart (collapsed by default) shows raw counts of anomalous metrics per node so may often be similar to the anomaly rate chart, apart from where nodes may have different numbers of metrics.
+
+The "Anomaly Events Detected" chart (collapsed by default) shows if the anomaly rate per node was sufficiently elevated to trigger a node level anomaly. Anomaly events will appear slightly after the anomaly rate starts to increase in the timeline, this is because a significant number of metrics in the node need to be anomalous before an anomaly event is triggered.
+
+Once you have highlighted a window of interest, you should see an ordered list of anomaly rate sparklines in the "Anomalous metrics" section like below.
+
+![image](https://user-images.githubusercontent.com/2178292/164427592-ab1d0eb1-57e2-4a05-aaeb-da4437a019b1.png)
+
+You can expand any sparkline chart to see the underlying raw data to see how it relates to the corresponding anomaly rate.
+
+![image](https://user-images.githubusercontent.com/2178292/164430105-f747d1e0-f3cb-4495-a5f7-b7bbb71039ae.png)
+
+On the upper right hand side of the page you can select which nodes to filter on if you wish to do so. The ML training status of each node is also displayed.
+
+On the lower right hand side of the page an index of anomaly rates is displayed for the highlighted timeline of interest. The index is sorted from most anomalous metric (highest anomaly rate) to least (lowest anomaly rate). Clicking on an entry in the index will scroll the rest of the page to the corresponding anomaly rate sparkline for that metric.
+
+### Usage Tips
+
+- If you are interested in a subset of specific nodes then filtering to just those nodes before highlighting tends to give better results. This is because when you highlight a region, Netdata Cloud will ask the Agents for a ranking over all metrics so if you can filter this early to just the subset of nodes you are interested in, less 'averaging' will occur and so you might be a less noisy ranking.
+- Ideally try and highlight close to a spike or window of interest so that the resulting ranking can narrow in more easily on the timeline you are interested in.
+
+You can read more detail on how anomaly detection in the Netdata Agent works in our [Agent docs](https://github.com/netdata/netdata/blob/master/ml/README.md).
+
+🚧 **Note**: This functionality is still **under active development** and considered experimental. We dogfood it internally and among early adopters within the Netdata community to build the feature. If you would like to get involved and help us with feedback, you can reach us through any of the following channels:
+- Email us at analytics-ml-team@netdata.cloud
+- Comment on the [beta launch post](https://community.netdata.cloud/t/anomaly-advisor-beta-launch/2717) in the Netdata community
+- Join us in the [🤖-ml-powered-monitoring](https://discord.gg/4eRSEUpJnc) channel of the Netdata discord.
+- Or open a discussion in GitHub if that's more your thing
diff --git a/docs/cloud/insights/metric-correlations.md b/docs/cloud/insights/metric-correlations.md
new file mode 100644
index 000000000..ce8835d34
--- /dev/null
+++ b/docs/cloud/insights/metric-correlations.md
@@ -0,0 +1,87 @@
+---
+title: "Metric Correlations"
+description: "Quickly find metrics and charts closely related to a particular timeframe of interest anywhere in your infrastructure to discover the root cause faster."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md"
+sidebar_label: "Metric Correlations"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+The Metric Correlations (MC) feature lets you quickly find metrics and charts related to a particular window of interest that you want to explore further. By displaying the standard Netdata dashboard, filtered to show only charts that are relevant to the window of interest, you can get to the root cause sooner.
+
+Because Metric Correlations uses every available metric from your infrastructure, with as high as 1-second granularity, you get the most accurate insights using every possible metric.
+
+## Using Metric Correlations
+
+When viewing the overview or a single-node dashboard, the **Metric Correlations** button appears in the top right corner of the page.
+
+![The Metric Correlations button](https://user-images.githubusercontent.com/2178292/201082551-d805b20d-0472-455d-9f11-b2329adf3098.png)
+
+To start correlating metrics, click the **Metric Correlations** button, then hold the `Alt` key (or `⌘` on macOS) and click-and-drag a selection of metrics on a single chart. The selected timeframe needs to be at least 15 seconds for Metric Correlation to work.
+
+The menu then displays information about the selected area and reference baseline. Metric Correlations uses the reference baseline to discover which additional metrics are most closely connected to the selected metrics. The reference baseline is based upon the period immediately preceding the highlighted window and is the length of 4 times the highlighted window. This is to ensure that the reference baseline is always immediately before the highlighted window of interest and a bit longer so as to ensure it's a more representative short term baseline.
+
+Press the **Find Correlations** button to start up the correlations process, the button is only enabled when a valid timeframe is selected (at least 15 seconds). Once pressed, the process will score all available metrics on your nodes and return a filtered version of the Netdata dashboard. Now, you'll see only those metrics that have changed the most between a baseline window and the highlighted window you have selected.
+
+![Metric Correlations results](https://user-images.githubusercontent.com/2178292/181751182-25e0890d-a5f4-4799-9936-1523603cf97d.png)
+
+These charts are fully interactive, and whenever possible, will only show the _dimensions_ related to the timeline you selected.
+
+You can interact with all the scored metrics via the slider. Slide toward **show less** for more nuanced and significant results, or toward **show more** to "loosen" the threshold to explore other charts that may have changed too, but in a less significant manner.
+
+If you find something else interesting in the results, you can select another window and press **Find Correlations** again to kick the process off again.
+
+## Metric Correlations options
+
+MC enables a few input parameters that users can define to iteratively explore their data in different ways. As is usually the case in Machine Learning (ML), there is no "one size fits all" algorithm, what approach works best will typically depend on the type of data (which can be very different from one metric to the next) and even the nature of the event or incident you might be exploring in Netdata.
+
+So when you first run MC it will use the most sensible and general defaults. But you can also then vary any of the below options to explore further.
+
+### Method
+
+There are two algorithms available that aim to score metrics based on how much they have changed between the baseline and highlight windows.
+
+- `KS2` - A statistical test ([Two-sample Kolmogorov Smirnov](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test#Two-sample_Kolmogorov%E2%80%93Smirnov_test)) comparing the distribution of the highlighted window to the baseline to try and quantify which metrics have most evidence of a significant change. You can explore our implementation [here](https://github.com/netdata/netdata/blob/d917f9831c0a1638ef4a56580f321eb6c9a88037/database/metric_correlations.c#L212).
+- `Volume` - A heuristic measure based on the percentage change in averages between highlighted window and baseline, with various edge cases sensibly controlled for. You can explore our implementation [here](https://github.com/netdata/netdata/blob/d917f9831c0a1638ef4a56580f321eb6c9a88037/database/metric_correlations.c#L516).
+
+### Aggregation
+
+Behind the scenes, Netdata will aggregate the raw data as needed such that arbitrary window lengths can be selected for MC. By default, Netdata will just `Average` raw data when needed as part of pre-processing. However other aggregations like `Median`, `Min`, `Max`, `Stddev` are also possible.
+
+### Data
+
+Netdata is different from typical observability agents since, in addition to just collecting raw metric values, it will by default also assign an "[Anomaly Bit](/docs/agent/ml#anomaly-bit)" related to each collected metric each second. This bit will be 0 for "normal" and 1 for "anomalous". This means that each metric also natively has an "[Anomaly Rate](/docs/agent/ml#anomaly-rate)" associated with it and, as such, MC can be run against the raw metric values or their corresponding anomaly rates.
+
+**Note**: Read more [here](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection.md) to learn more about the native anomaly detection features within netdata.
+
+- `Metrics` - Run MC on the raw metric values.
+- `Anomaly Rate` - Run MC on the corresponding anomaly rate for each metric.
+
+## Metric Correlations on the agent
+
+As of `v1.35.0` Netdata is able to run the Metric Correlations algorithm ([Two Sample Kolmogorov-Smirnov test](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test#Two-sample_Kolmogorov%E2%80%93Smirnov_test)) on the agent itself. This avoids sending the underlying raw data to the original Netdata Cloud based microservice and so typically will be much much faster as no data moves around and the computation happens instead on the agent.
+
+When a Metric Correlations request is made to Netdata Cloud, if any node instances have MC enabled then the request will be routed to the node instance with the highest hops (e.g. a parent node if one is found or the node itself if not). If no node instances have MC enabled then the request will be routed to the original Netdata Cloud based service which will request input data from the nodes and run the computation within the Netdata Cloud backend.
+
+#### Enabling/Disabling Metric Correlations on the agent
+
+As of `v1.35.0-22-nightly` Metric Correlation has been enabled by default on all agents. After further optimizations to the implementation, the impact of running the metric correlations algorithm on the agent was less than the impact of preparing all the data to send to cloud for MC to run in the cloud, as such running MC on the agent is less impactful on local resources than running via cloud.
+
+Should you still want to, disabling nodes for Metric Correlation on the agent is a simple one line config change. Just set `enable metric correlations = no` in the `[global]` section of `netdata.conf`
+
+## Usage tips!
+
+- When running Metric Correlations from the [Overview tab](https://learn.netdata.cloud/docs/cloud/visualize/overview#overview) across multiple nodes, you might find better results if you iterate on the initial results by grouping by node to then filter to nodes of interest and run the Metric Correlations again. So a typical workflow in this case would be to:
+ - If unsure which nodes you are interested in then run MC on all nodes.
+ - Within the initial results returned group the most interesting chart by node to see if the changes are across all nodes or a subset of nodes.
+ - If you see a subset of nodes clearly jump out when you group by node, then filter for just those nodes of interest and run the MC again. This will result in less aggregation needing to be done by Netdata and so should help give clearer results as you interact with the slider.
+- Use the `Volume` algorithm for metrics with a lot of gaps (e.g. request latency when there are few requests), otherwise stick with `KS2`
+ - By default, Netdata uses the `KS2` algorithm which is a tried and tested method for change detection in a lot of domains. The [Wikipedia](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test) article gives a good overview of how this works. Basically, it is comparing, for each metric, its cumulative distribution in the highlight window with its cumulative distribution in the baseline window. The statistical test then seeks to quantify the extent to which we can say these two distributions look similar enough to be considered the same or not. The `Volume` algorithm is a bit more simple than `KS2` in that it basically compares (with some edge cases sensibly handled) the average value of the metric across baseline and highlight and looks at the percentage change. Often both `KS2` and `Volume` will have significant agreement and return similar metrics.
+ - `Volume` might favour picking up more sparse metrics that were relatively flat and then came to life with some spikes (or vice versa). This is because for such metrics that just don't have that many different values in them, it is impossible to construct a cumulative distribution that can then be compared. So `Volume` might be useful in spotting examples of metrics turning on or off. ![example where volume captured network traffic turning on](https://user-images.githubusercontent.com/2178292/182336924-d02fd3d3-7f09-41da-9cfc-809d01396d9d.png)
+ - `KS2` since it relies on the full distribution might be better at highlighting more complex changes that `Volume` is unable to capture. For example a change in the variation of a metric might be picked up easily by `KS2` but missed (or just much lower scored) by `Volume` since the averages might remain not all that different between baseline and highlight even if their variance has changed a lot. ![example where KS2 captured a change in entropy distribution that volume alone might not have picked up](https://user-images.githubusercontent.com/2178292/182338289-59b61e6b-089d-431c-bc8e-bd19ba6ad5a5.png)
+- Use `Volume` and `Anomaly Rate` together to ask what metrics have turned most anomalous from baseline to highlighted window. You can expand the embedded anomaly rate chart once you have results to see this more clearly. ![example where Volume and Anomaly Rate together help show what dimensions where most anomalous](https://user-images.githubusercontent.com/2178292/182338666-6d19fa92-89d3-4d61-804c-8f10982114f5.png)
+
+## What's next?
+
+You can read more about all the ML powered capabilities of Netdata [here](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection.md). If you aren't yet familiar with the power of Netdata Cloud's visualization features, check out the [Nodes view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md) and learn how to [build new dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md).
diff --git a/docs/cloud/manage/invite-your-team.md b/docs/cloud/manage/invite-your-team.md
new file mode 100644
index 000000000..f294a627d
--- /dev/null
+++ b/docs/cloud/manage/invite-your-team.md
@@ -0,0 +1,37 @@
+---
+title: "Invite your team"
+description: >-
+ "Invite your entire SRE, DevOPs, or ITOps team to Netdata Cloud to give everyone insights into your
+ infrastructure from a single pane of glass."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md"
+sidebar_label: "Invite your team"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+Invite new users to your Space by clicking on **Invite Users** in
+the [Space](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md) management area.
+
+![Opening the invitation panel in Netdata Cloud](https://user-images.githubusercontent.com/1153921/108529805-1b13b480-7292-11eb-862f-0499e3fdac17.png)
+
+Enter the email addresses for the users you want to invite to your Space. You can enter any number of email addresses,
+separated by a comma, to send multiple invitations at once.
+
+Next, choose the War Rooms you want to invite these users to. Once logged in, these users are not restricted only to
+these War Rooms. They can be invited to others, or join any that are public.
+
+Click the **Send** button to send an email invitation, which will prompt them
+to [sign up](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.mdx) and join your Space.
+
+![The invitation panel in Netdata Cloud](https://user-images.githubusercontent.com/1153921/97762959-53b33680-1ac7-11eb-8e9d-f3f4a14c0028.png)
+
+Any unaccepted invitations remain under **Invitations awaiting response**. These invitations can be rescinded at any
+time by clicking the trash can icon.
+
+## What's next?
+
+If your team members have trouble signing in, direct them to
+the [sign in guide](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.mdx). Once your
+team is onboarded to Netdata Cloud, they can view shared assets, such
+as [new dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md).
diff --git a/docs/cloud/manage/sign-in.mdx b/docs/cloud/manage/sign-in.mdx
new file mode 100644
index 000000000..32fcb22e7
--- /dev/null
+++ b/docs/cloud/manage/sign-in.mdx
@@ -0,0 +1,88 @@
+---
+title: "Sign in with email, Google, or GitHub"
+description: "Learn how signing in to Cloud works via one of our three authentication methods, plus some tips if you're having trouble signing in."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.mdx"
+sidebar_label: "Sign in with email, Google, or GitHub"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+You can [sign in to Netdata](https://app.netdata.cloud/sign-in?cloudRoute=spaces?utm_source=docs&utm_content=sign_in_button_first_section) through one of three methods: email, Google, or GitHub. Email uses a
+time-sensitive link that authenticates your browser, and Google/GitHub both use OAuth to associate your email address
+with a Netdata Cloud account.
+
+No matter the method, your Netdata Cloud account is based around your email address. Netdata Cloud does not store
+passwords.
+
+
+## Email
+
+To sign in with email, visit [Netdata Cloud](https://app.netdata.cloud/sign-in?cloudRoute=spaces?utm_source=docs&utm_content=sign_in_button_email_section), enter your email address, and click
+the **Sign in by email** button.
+
+![Verify your email!](https://user-images.githubusercontent.com/82235632/125475486-c667635a-067f-4866-9411-9f7f795a0d50.png)
+
+Click the **Verify** button in the email to begin using Netdata Cloud.
+
+To use this same Netdata Cloud account on additional devices, request another sign in email, open the email on that
+device, and sign in.
+
+### Don't have a Netdata Cloud account yet?
+
+If you don't have a Netdata Cloud account yet you won't need to worry about it. During the sign in process we will create one for you and make the process seamless to you.
+
+After your account is created and you sign in to Netdata, you first are asked to agree to Netdata Cloud's [Privacy
+Policy](https://www.netdata.cloud/privacy/) and [Terms of Use](https://www.netdata.cloud/terms/). Once you agree with these you are directed
+through the Netdata Cloud onboarding process, which is explained in the [Netdata Cloud
+quickstart](https://github.com/netdata/netdata/blob/master/docs/cloud/get-started.mdx).
+
+### Troubleshooting
+
+You should receive your sign in email in less than a minute. The subject is **Verify your email!** and the sender is `no-reply@app.netdata.cloud` via `sendgrid.net`.
+
+If you don't see the email, try the following:
+
+- Check [Netdata Cloud status](https://status.netdata.cloud) for ongoing issues with our infrastructure.
+- Request another sign in email via the [sign in page](https://app.netdata.cloud/sign-in?cloudRoute=spaces?utm_source=docs&utm_content=sign_in_button_troubleshooting_section).
+- Check your spam folder.
+- In Gmail, check the **Updates** category.
+
+You may also want to add `no-reply@app.netdata.cloud` to your address book or contacts list, especially if you're using
+a public email service, such as Gmail. You may also want to whitelist/allowlist either the specific email or the entire
+`app.netdata.cloud` domain.
+
+## Google and GitHub OAuth
+
+When you use Google/GitHub OAuth, your Netdata Cloud account is associated with the email address that Netdata Cloud
+receives via OAuth.
+
+To sign in with Google or GitHub OAuth, visit [Netdata Cloud](https://app.netdata.cloud/sign-in?cloudRoute=spaces?utm_source=docs&utm_content=sign_in_button_google_github_section) and click the
+**Continue with Google/GitHub** or button. Enter your Google/GitHub username and your password. Complete two-factor
+authentication if you or your organization has it enabled.
+
+You are then signed in to Netdata Cloud or directed to the new-user onboarding if you have not signed up previously.
+
+## Reset a password
+
+Netdata Cloud does not store passwords and does not support password resets. All of our sign in methods do not
+require passwords, and use either links in emails or Google/GitHub OAuth for authentication.
+
+## Switch between sign in methods
+
+You can switch between sign in methods if the email account associated with each method is the same.
+
+For example, you first sign in via your email account, `user@example.com`, and later sign out. You later attempt to sign
+in via a GitHub account associated with `user@example.com`. Netdata Cloud recognizes that the two are the same and signs
+you in to your original account.
+
+However, if you first sign in via your `user@example.com` email account and then sign in via a Google account associated
+with `user2@example.com`, Netdata Cloud creates a new account and begins the onboarding process.
+
+It is not currently possible to link an account created with `user@example.com` to a Google account associated with
+`user2@example.com`.
+
+## What's next?
+
+If you haven't already onboarded to Netdata Cloud and connected your first nodes, visit
+the [get started guide](https://github.com/netdata/netdata/blob/master/docs/cloud/get-started.mdx).
diff --git a/docs/cloud/manage/themes.md b/docs/cloud/manage/themes.md
new file mode 100644
index 000000000..11d5cb32f
--- /dev/null
+++ b/docs/cloud/manage/themes.md
@@ -0,0 +1,22 @@
+---
+title: "Choose your Netdata Cloud theme"
+description: "Switch between Light and Dark themes in Netdata Cloud to match your personal visualization preferences."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/manage/themes.md"
+sidebar_label: "Choose your Netdata Cloud theme"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+The Dark theme is the default for all new Netdata Cloud accounts.
+
+To change your theme across Netdata Cloud, click on your profile picture, then **Profile**. Click on the **Settings**
+tab, then choose your preferred theme: Light or Dark.
+
+**Light**:
+
+![Dark theme](https://user-images.githubusercontent.com/1153921/108530742-2ca98c00-7293-11eb-8c1e-1e0dd34eb87b.png)
+
+**Dark (default)**:
+
+![Light theme](https://user-images.githubusercontent.com/1153921/108530848-4519a680-7293-11eb-897d-1c470b67ceb0.png)
diff --git a/docs/cloud/netdata-functions.md b/docs/cloud/netdata-functions.md
new file mode 100644
index 000000000..e1b9dd0b1
--- /dev/null
+++ b/docs/cloud/netdata-functions.md
@@ -0,0 +1,65 @@
+
+
+Netdata Agent collectors are able to expose functions that can be executed in run-time and on-demand. These will be
+executed on the node - host where the function is made
+available.
+
+#### What is a function?
+
+Collectors besides the metric collection, storing, and/or streaming work are capable of executing specific routines on
+request. These routines will bring additional information
+to help you troubleshoot or even trigger some action to happen on the node itself.
+
+A function is a `key` - `value` pair. The `key` uniquely identifies the function within a node. The `value` is a
+function (i.e. code) to be run by a data collector when
+the function is invoked.
+
+For more details please check out documentation on how we use our internal collector to get this from the first collector that exposes
+functions - [plugins.d](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#function).
+
+#### What functions are currently available?
+
+| Function | Description | plugin - module |
+| :-- | :-- | :-- |
+| processes | Detailed information on the currently running processes on the node. | [apps.plugin](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) |
+
+If you have ideas or requests for other functions:
+* open a [Feature request](https://github.com/netdata/netdata-cloud/issues/new?assignees=&labels=feature+request%2Cneeds+triage&template=FEAT_REQUEST.yml&title=%5BFeat%5D%3A+) on Netdata Cloud repo
+* engage with our community on the [Netdata Discord server](https://discord.com/invite/mPZ6WZKKG2).
+#### How do functions work with streaming?
+
+Via streaming, the definitions of functions are transmitted to a parent node so it knows all the functions available on
+any children connected to it.
+
+If the parent node is the one connected to Netdata Cloud it is capable of triggering the call to the respective children
+node to run the function.
+
+#### Why are they available only on Netdata Cloud?
+
+Since these functions are able to execute routines on the node and due the potential use cases that they can cover, our
+concern is to ensure no sensitive
+information or disruptive actions are exposed through the Agent's API.
+
+With the communication between the Netdata Agent and Netdata Cloud being
+through [ACLK](https://github.com/netdata/netdata/blob/master/aclk/README.md) this
+concern is addressed.
+
+## Related Topics
+
+### **Related Concepts**
+
+- [ACLK](https://github.com/netdata/netdata/blob/master/aclk/README.md)
+- [plugins.d](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md)
+
+### Related Tasks
+
+- [Run-time troubleshooting with Functions](https://github.com/netdata/netdata/blob/master/docs/cloud/runtime-troubleshooting-with-functions.md)
diff --git a/docs/cloud/runtime-troubleshooting-with-functions.md b/docs/cloud/runtime-troubleshooting-with-functions.md
new file mode 100644
index 000000000..3800ea20d
--- /dev/null
+++ b/docs/cloud/runtime-troubleshooting-with-functions.md
@@ -0,0 +1,43 @@
+
+
+Netdata Functions feature allows you to execute on-demand a pre-defined routine on a node where a Netdata Agent is running. These routines are exposed by a given collector.
+These routines can be used to retrieve additional information to help you troubleshoot or to trigger some action to happen on the node itself.
+
+
+### Prerequisites
+
+The following is required to be able to run Functions from Netdata Cloud.
+* At least one of the nodes claimed to your Space should be on a Netdata agent version higher than `v1.37.1`
+* Ensure that the node has the collector that exposes the function you want enabled ([see current available functions](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md#what-functions-are-currently-available))
+
+### Execute a function (from functions view)
+
+1. From the right-hand bar select the **Function** you want to run
+2. Still on the right-hand bar select the **Node** where you want to run it
+3. Results will be displayed in the central area for you to interact with
+4. Additional filtering capabilities, depending on the function, should be available on right-hand bar
+
+### Execute a function (from Nodes view)
+
+1. Click on the functions icon for a node that has this active
+2. You are directed to the **Functions** tab
+3. Follow the above instructions from step 3.
+
+> ⚠️ If you get an error saying that your node can't execute Functions please check the [prerequisites](#prerequisites).
+
+## Related Topics
+
+### **Related Concepts**
+- [Netdata Functions](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md)
+
+#### Related References documentation
+- [External plugins overview](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#function)
diff --git a/docs/cloud/spaces.md b/docs/cloud/spaces.md
new file mode 100644
index 000000000..31d8a47ae
--- /dev/null
+++ b/docs/cloud/spaces.md
@@ -0,0 +1,91 @@
+---
+title: "Spaces"
+description: >-
+ "Organize your infrastructure monitoring on Netdata Cloud by creating Spaces, then groupingyour
+ Agent-monitored nodes."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md"
+sidebar_label: "Spaces"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+A Space is a high-level container. It's a collaboration space where you can organize team members, access levels and the
+nodes you want to monitor.
+
+Let's talk through some strategies for creating the most intuitive Cloud experience for your team.
+
+## How to organize your Netdata Cloud
+
+You can use any number of Spaces you want, but as you organize your Cloud experience, keep in mind that _you can only
+add any given node to a single Space_. This 1:1 relationship between node and Space may dictate whether you use one
+encompassing Space for your entire team and separate them by War Rooms, or use different Spaces for teams monitoring
+discrete parts of your infrastructure.
+
+If you have been invited to Netdata Cloud by another user by default you will able to see this space. If you are a new
+user the first space is already created.
+
+The other consideration for the number of Spaces you use to organize your Netdata Cloud experience is the size and
+complexity of your organization.
+
+For small team and infrastructures we recommend sticking to a single Space so that you can keep all your nodes and their
+respective metrics in one place. You can then use
+multiple [War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md)
+to further organize your infrastructure monitoring.
+
+Enterprises may want to create multiple Spaces for each of their larger teams, particularly if those teams have
+different responsibilities or parts of the overall infrastructure to monitor. For example, you might have one SRE team
+for your user-facing SaaS application and a second team for infrastructure tooling. If they don't need to monitor the
+same nodes, you can create separate Spaces for each team.
+
+## Navigate between spaces
+
+Click on any of the boxes to switch between available Spaces.
+
+Netdata Cloud abbreviates each Space to the first letter of the name, or the first two letters if the name is two words
+or more. Hover over each icon to see the full name in a tooltip.
+
+To add a new Space click on the green **+** button . Enter the name of the Space and click **Save**.
+
+![Switch between Spaces](/img/cloud/main-page-add-space.png)
+
+## Manage Spaces
+
+Manage your spaces by selecting in a particular space and clicking in the small gear icon in the lower left corner. This
+will open a side tab in which you can:
+
+1. _Configure this Space*_, in the first tab (**Space**) you can change the name, description or/and some privilege
+ options of this space
+
+2. _Edit the War Rooms*_, click on the **War rooms** tab to add or remove War Rooms.
+
+3. _Connect nodes*_, click on **Nodes** tab. Copy the claiming script to your node and run it. See the
+ [connect to Cloud doc](https://github.com/netdata/netdata/blob/master/claim/README.md) for details.
+
+4. _Manage the users*_, click on **Users**.
+ The [invitation doc](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md)
+ details the invitation process.
+
+5. _Manage notification setting*_, click on **Notifications** tab to turn off/on notification methods.
+
+6. _Manage your bookmarks*_, click on the **Bookmarks** tab to add or remove bookmarks that you need.
+
+:::note \* This action requires admin rights for this space
+:::
+
+## Obsoleting offline nodes from a Space
+
+Netdata admin users now have the ability to remove obsolete nodes from a space.
+
+- Only admin users have the ability to obsolete nodes
+- Only offline nodes can be marked obsolete (Live nodes and stale nodes cannot be obsoleted)
+- Node obsoletion works across the entire space, so the obsoleted node will be removed from all rooms belonging to the
+ space
+- If the obsoleted nodes eventually become live or online once more they will be automatically re-added to the space
+
+![Obsoleting an offline node](https://user-images.githubusercontent.com/24860547/173087202-70abfd2d-f0eb-4959-bd0f-74aeee2a2a5a.gif)
+
+## What's next?
+
+Once you configured your Spaces, it's time to set up
+your [War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md).
diff --git a/docs/cloud/visualize/dashboards.md b/docs/cloud/visualize/dashboards.md
new file mode 100644
index 000000000..3c6d7ffd5
--- /dev/null
+++ b/docs/cloud/visualize/dashboards.md
@@ -0,0 +1,122 @@
+---
+title: "Build new dashboards"
+description: >-
+ "Design new dashboards that target your infrastructure's unique needs and share them with your team for
+ targeted visual anomaly detection or incident response."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md"
+sidebar_label: "Build new dashboards"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations/Visualizations"
+---
+
+With Netdata Cloud, you can build new dashboards that target your infrastructure's unique needs. Put key metrics from
+any number of distributed systems in one place for a bird's eye view of your infrastructure.
+
+Click on the **Dashboards** tab in any War Room to get started.
+
+## Create your first dashboard
+
+From the Dashboards tab, click on the **+** button.
+
+![Add or manage
+dashboards](https://user-images.githubusercontent.com/1153921/108529360-a2145d00-7291-11eb-814b-2ea3303beb64.png)
+
+In the modal, give your new dashboard a name, and click **+ Add**.
+
+Click the **Add Chart** button to add your first chart card. From the dropdown, select either *All Nodes** or a specific
+node. If you select **All Nodes**, you will add a [composite chart](/docs/cloud/visualize/overview#composite-charts) to
+your new dashboard. Next, select the context. You'll see a preview of the chart before you finish adding it.
+
+The **Add Text** button creates a new card with user-defined text, which you can use to describe or document a
+particular dashboard's meaning and purpose.
+
+Be sure to click the **Save** button any time you make changes to your dashboard.
+
+![An example multi-node dashboard for system CPU
+metrics](https://user-images.githubusercontent.com/1153921/108526381-4f857180-728e-11eb-9d65-1613e60891a5.png)
+
+## Using your dashboard
+
+Dashboards are designed to be interactive and flexible so you can design them to your exact needs. Dashboards are made
+of any number of **cards**, which can contain charts or text.
+
+### Chart cards
+
+Click the **Add Chart** button to add your first chart card. From the dropdown, select either *All Nodes** or a specific
+node. If you select **All Nodes**, you will add a [composite chart](/docs/cloud/visualize/overview#composite-charts) to
+your new dashboard. Next, select the context. You'll see a preview of the chart before you finish adding it.
+
+The charts you add to any dashboard are fully interactive, just like the charts in an Agent dashboard or a single node's
+dashboard in Cloud. Zoom in and out, highlight timeframes, and more. See our
+[Agent dashboard docs](https://learn.netdata.cloud/docs/agent/web#using-charts) for all the shortcuts.
+
+Charts also synchronize as you interact with them, even across contexts _or_ nodes.
+
+### Text cards
+
+The **Add Text** button creates a new card with user-defined text. When you create a new text card or edit an existing
+one, select/highlight characters or words to open a modal to make them **bold**, _italic_, or underlined . You
+can also create a link.
+
+### Move cards
+
+To move any card, click and hold on the top of the card, then drag it to a new location. A red placeholder indicates the
+new location. Once you release your mouse, other charts re-sort to the grid system automatically.
+
+### Resize cards
+
+To resize any card on a dashboard, click on the bottom-right corner and drag to the card's new size. Other cards re-sort
+to the grid system automatically.
+
+## Jump to single-node dashboards
+
+Quickly jump to any node's dashboard by clicking the 3-dot icon in the corner of any card to open a menu. Hit the **Go
+to Chart** item.
+
+You'll land directly on that chart of interest, but you can now scroll up and down to correlate your findings with other
+charts. Of course, you can continue to zoom, highlight, and pan through time just as you're used to with Agent
+dashboards.
+
+## Pin dashboards
+
+Click on the **Pin** button in any dashboard to put those charts into a separate panel at the bottom of the screen. You
+can now navigate through Netdata Cloud freely, individual Cloud dashboards, the Nodes view, different War Rooms, or even
+different Spaces, and have those valuable metrics follow you.
+
+Pinning dashboards helps you correlate potentially related charts across your infrastructure, no matter how you
+organized your Spaces and War Rooms, and helps you discover root causes faster.
+
+## Manage your dashboards
+
+To see dashboards associated with the current War Room, click **Dashboards** tab in any War Room. You can select
+dashboards and delete them using the 🗑️ icon.
+
+### Update/save a dashboard
+
+If you've made changes to a dashboard, such as adding or moving cards, the **Save** button is enabled. Click it to save
+your most recent changes. Any other members of the War Room will be able to see these changes the next time they load
+this dashboard.
+
+If multiple users attempt to make concurrent changes to the same dashboard, the second user who hits Save will be
+prompted to either overwrite the dashboard or reload to see the most recent changes.
+
+### Remove an individual card
+
+Click on the 3-dot icon in the corner of any card to open a menu. Click the **Remove Card** item to remove the card.
+
+### Delete a dashboard
+
+Delete any dashboard by navigating to it and clicking the **Delete** button. This will remove this entry from the
+dropdown for every member of this War Room.
+
+### Minimum browser viewport
+
+Because of the visual complexity of individual charts, dashboards require a minimum browser viewport of 800px.
+
+## What's next?
+
+Once you've designed a dashboard or two, make sure
+to [invite your team](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md) if
+you haven't already. You can add these new users to the same War Room to let them see the same dashboards without any
+effort.
diff --git a/docs/cloud/visualize/interact-new-charts.md b/docs/cloud/visualize/interact-new-charts.md
new file mode 100644
index 000000000..4b33fe85f
--- /dev/null
+++ b/docs/cloud/visualize/interact-new-charts.md
@@ -0,0 +1,222 @@
+---
+title: "Interact with charts"
+description: >-
+ "Learn how to get the most out of Netdata's charts. These charts will help you make sense of all the
+ metrics at your disposal, helping you troubleshoot with real-time, per-second metric data"
+type: "how-to"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md"
+sidebar_label: "Interact with charts"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Operations/Visualizations"
+---
+
+> ⚠️ This new version of charts is currently **only** available on Netdata Cloud. We didn't want to keep this valuable
+> feature from you, so after we get this into your hands on the Cloud, we will collect and implement your feedback.
+> Together, we will be able to provide the best possible version of charts on the Netdata Agent dashboard, as quickly as
+> possible.
+
+Netdata excels in collecting, storing, and organizing metrics in out-of-the-box dashboards.
+To make sense of all the metrics, Netdata offers an enhanced version of charts that update every second.
+
+These charts provide a lot of useful information, so that you can:
+
+- Enjoy the high-resolution, granular metrics collected by Netdata
+- Explore visualization with more options such as _line_, _stacked_ and _area_ types (other types like _bar_, _pie_ and
+ _gauges_ are to be added shortly)
+- Examine all the metrics by hovering over them with your cursor
+- Use intuitive tooling and shortcuts to pan, zoom or highlight your charts
+- On highlight, ease access
+ to [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) to
+ see other metrics with similar patterns
+- Have the dimensions sorted based on name or value
+- View information about the chart, its plugin, context, and type
+- Get the chart status and possible errors. On top, reload functionality
+
+These charts will available
+on [Overview tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md), Single Node view and
+on your [Custom Dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md).
+
+## Overview
+
+Have a look at the can see the overall look and feel of the charts for both with a composite chart from
+the [Overview tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md) and a simple chart
+from the single node view:
+
+![NRve6zr325.gif](https://images.zenhubusercontent.com/60b4ebb03f4163193ec31819/5ecaf5ec-1229-480e-b122-62f63e9df227)
+
+With a quick glance you have immediate information available at your disposal:
+
+- Chart title and units
+- Action bars
+- Chart area
+- Legend with dimensions
+
+## Play, Pause and Reset
+
+Your charts are controlled using the
+available [Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.mdx#time-controls).
+Besides these, when interacting with the chart you can also activate these controls by:
+
+- hovering over any chart to temporarily pause it - this momentarily switches time control to Pause, so that you can
+ hover over a specific timeframe. When moving out of the chart time control will go back to Play (if it was it's
+ previous state)
+- clicking on the chart to lock it - this enables the Pause option on the time controls, to the current timeframe. This
+ is if you want to jump to a different chart to look for possible correlations.
+- double clicking to release a previously locked chart - move the time control back to Play
+
+ ![23CHKCPnnJ.gif](https://images.zenhubusercontent.com/60b4ebb03f4163193ec31819/0b1e111e-df44-4d92-b2e3-be5cfd9db8df)
+
+| Interaction | Keyboard/mouse | Touchpad/touchscreen | Time control |
+|:------------------|:---------------|:---------------------|:----------------------|
+| **Pause** a chart | `hover` | `n/a` | Temporarily **Pause** |
+| **Stop** a chart | `click` | `tap` | **Pause** |
+| **Reset** a chart | `double click` | `n/a` | **Play** |
+
+Note: These interactions are available when the default "Pan" action is used. Other actions are accessible via
+the [Exploration action bar](#exploration-action-bar).
+
+## Title and chart action bar
+
+When you start interacting with a chart, you'll notice valuable information on the top bar. You will see information
+from the chart title to a chart action bar.
+
+The elements that you can find on this top bar are:
+
+- Netdata icon: this indicates that data is continuously being updated, this happens
+ if [Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.mdx#time-controls)
+ are in Play or Force Play mode
+- Chart status icon: indicates the status of the chart. Possible values are: Loading, Timeout, Error or No data
+- Chart title: on the chart title you can see the title together with the metric being displayed, as well as the unit of
+ measurement
+- Chart action bar: here you'll have access to chart info, change chart types, enables fullscreen mode, and the ability
+ to add the chart to a custom dashboard
+
+![image.png](https://images.zenhubusercontent.com/60b4ebb03f4163193ec31819/c8f5f0bd-5f84-4812-970b-0e4340f4773b)
+
+### Chart action bar
+
+On this bar you have access to immediate actions over the chart, the available actions are:
+
+- Chart info: you will be able to get more information relevant to the chart you are interacting with
+- Chart type: change the chart type from _line_, _stacked_ or _area_
+- Enter fullscreen mode: allows you expand the current chart to the full size of your screen
+- Add chart to dashboard: This allows you to add the chart to an existing custom dashboard or directly create a new one
+ that includes the chart.
+
+
+
+## Exploration action bar
+
+When exploring the chart you will see a second action bar. This action bar is there to support you on this task. The
+available actions that you can see are:
+
+- Pan
+- Highlight
+- Horizontal and Vertical zooms
+- In-context zoom in and out
+
+
+
+### Pan
+
+Drag your mouse/finger to the right to pan backward through time, or drag to the left to pan forward in time. Think of
+it like pushing the current timeframe off the screen to see what came before or after.
+
+| Interaction | Keyboard | Mouse | Touchpad/touchscreen |
+|:------------|:---------|:---------------|:---------------------|
+| **Pan** | `n/a` | `click + drag` | `touch drag` |
+
+### Highlight
+
+Selecting timeframes is useful when you see an interesting spike or change in a chart and want to investigate further,
+from looking at the same period of time on other charts/sections or triggering actions to help you troubleshoot with an
+in-context action bar to help you troubleshoot (currently only available on
+Single Node view). The available actions:
+
+-
+
+run [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md)
+
+- zoom in on the selected timeframe
+
+[Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md)
+will only be available if you respect the timeframe selection limitations. The selected duration pill together with the
+button state helps visualize this.
+
+
+
+
+
+| Interaction | Keyboard/mouse | Touchpad/touchscreen |
+|:-----------------------------------|:---------------------------------------------------------|:---------------------|
+| **Highlight** a specific timeframe | `Alt + mouse selection` or `⌘ + mouse selection` (macOS) | `n/a` |
+
+### Zoom
+
+Zooming in helps you see metrics with maximum granularity, which is useful when you're trying to diagnose the root cause
+of an anomaly or outage. Zooming out lets you see metrics within the larger context, such as the last hour, day, or
+week, which is useful in understanding what "normal" looks like, or to identify long-term trends, like a slow creep in
+memory usage.
+
+The actions above are _normal_ vertical zoom actions. We also provide an horizontal zoom action that helps you focus on
+a
+specific Y-axis area to further investigate a spike or dive on your charts.
+
+![Y5IESOjD3s.gif](https://images.zenhubusercontent.com/60b4ebb03f4163193ec31819/f8722ee8-e69b-426c-8bcb-6cb79897c177)
+
+| Interaction | Keyboard/mouse | Touchpad/touchscreen |
+|:-------------------------------------------|:-------------------------------------|:-----------------------------------------------------|
+| **Zoom** in or out | `Shift + mouse scrollwheel` | `two-finger pinch` `Shift + two-finger scroll` |
+| **Zoom** to a specific timeframe | `Shift + mouse vertical selection` | `n/a` |
+| **Horizontal Zoom** a specific Y-axis area | `Shift + mouse horizontal selection` | `n/a` |
+
+You also have two direct action buttons on the exploration action bar for in-context `Zoom in` and `Zoom out`.
+
+## Other interactions
+
+### Order dimensions legend
+
+The bottom legend of the chart where you can see the dimensions of the chart can now be ordered by:
+
+- Dimension name (Ascending or Descending)
+- Dimension value (Ascending or Descending)
+
+
+
+### Show and hide dimensions
+
+Hiding dimensions simplifies the chart and can help you better discover exactly which aspect of your system might be
+behaving strangely.
+
+| Interaction | Keyboard/mouse | Touchpad/touchscreen |
+|:---------------------------------------|:----------------|:---------------------|
+| **Show one** dimension and hide others | `click` | `tap` |
+| **Toggle (show/hide)** one dimension | `Shift + click` | `n/a` |
+
+### Resize
+
+To resize the chart, click-and-drag the icon on the bottom-right corner of any chart. To restore the chart to its
+original height,
+double-click the same icon.
+
+![AjqnkIHB9H.gif](https://images.zenhubusercontent.com/60b4ebb03f4163193ec31819/1bcc6a0a-a58e-457b-8a0c-e5d361a3083c)
+
+## What's next?
+
+We recommend you read up on the differences
+between [chart dimensions, contexts, and families](https://github.com/netdata/netdata/blob/master/docs/dashboard/dimensions-contexts-families.mdx)
+to strengthen your understanding of how Netdata organizes its dashboards. Another valuable way to interact with charts
+is to use
+the [date and time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.mdx),
+which helps you visualize specific moments of historical metrics.
+
+### Further reading & related information
+
+- Dashboard
+ - [How the dashboard works](https://github.com/netdata/netdata/blob/master/docs/dashboard/how-dashboard-works.mdx)
+ - [Chart dimensions, contexts, and families](https://github.com/netdata/netdata/blob/master/docs/dashboard/dimensions-contexts-families.mdx)
+ - [Date and Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.mdx)
+ - [Customize the standard dashboard](https://github.com/netdata/netdata/blob/master/docs/dashboard/customize.mdx)
+ - [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md)
+ - [Netdata Agent - Interact with charts](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx)
diff --git a/docs/cloud/visualize/kubernetes.md b/docs/cloud/visualize/kubernetes.md
new file mode 100644
index 000000000..0ff839703
--- /dev/null
+++ b/docs/cloud/visualize/kubernetes.md
@@ -0,0 +1,154 @@
+---
+title: "Kubernetes visualizations"
+description: "Netdata Cloud features rich, zero-configuration Kubernetes monitoring for the resource utilization and application metrics of Kubernetes (k8s) clusters."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md"
+sidebar_label: "Kubernetes visualizations"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Operations/Visualizations"
+---
+
+Netdata Cloud features enhanced visualizations for the resource utilization of Kubernetes (k8s) clusters, embedded in
+the default [Overview](/docs/cloud/visualize/overview/) dashboard.
+
+These visualizations include a health map for viewing the status of k8s pods/containers, in addition to composite charts
+for viewing per-second CPU, memory, disk, and networking metrics from k8s nodes.
+
+## Before you begin
+
+In order to use the Kubernetes visualizations in Netdata Cloud, you need:
+
+- A Kubernetes cluster running Kubernetes v1.9 or newer.
+- A Netdata deployment using the latest version of the [Helm chart](https://github.com/netdata/helmchart), which
+ installs [v1.29.2](https://github.com/netdata/netdata/releases) or newer of the Netdata Agent.
+- To connect your Kubernetes cluster to Netdata Cloud.
+- To enable the feature flag described below.
+
+See our [Kubernetes deployment instructions](/docs/agent/packaging/installer/methods/kubernetes/) for details on
+installation and connecting to Netdata Cloud.
+
+## Available Kubernetes metrics
+
+Netdata Cloud organizes and visualizes the following metrics from your Kubernetes cluster from every container:
+
+- `cpu_limit`: CPU utilization as a percentage of the limit defined by the [pod specification
+ `spec.containers[].resources.limits.cpu`](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container)
+ or a [`LimitRange`
+ object](https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/#create-a-limitrange-and-a-pod).
+- `cpu`: CPU utilization of the pod/container. 100% usage equals 1 fully-utilized core, 200% equals 2 fully-utilized
+ cores, and so on.
+- `cpu_per_core`: CPU utilization averaged across available cores.
+- `mem_usage_limit`: Memory utilization, without cache, as a percentage of the limit defined by the [pod specification
+ `spec.containers[].resources.limits.memory`](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-requests-and-limits-of-pod-and-container)
+ or a [`LimitRange`
+ object](https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/cpu-default-namespace/#create-a-limitrange-and-a-pod).
+- `mem_usage`: Used memory, without cache.
+- `mem`: The sum of `cache` and `rss` (resident set size) memory usage.
+- `writeback`: The size of `dirty` and `writeback` cache.
+- `mem_activity`: Sum of `in` and `out` bandwidth.
+- `pgfaults`: Sum of page fault bandwidth, which are raised when the Kubernetes cluster tries accessing a memory page
+ that is mapped into the virtual address space, but not actually loaded into main memory.
+- `throttle_io`: Sum of `read` and `write` per second across all PVs/PVCs attached to the container.
+- `throttle_serviced_ops`: Sum of the `read` and `write` operations per second across all PVs/PVCs attached to the
+ container.
+- `net.net`: Sum of `received` and `sent` bandwidth per second.
+- `net.packets`: Sum of `multicast`, `received`, and `sent` packets.
+
+When viewing the [health map](#health-map), Netdata Cloud shows the above metrics per container, or aggregated based on
+their associated pods.
+
+When viewing the [composite charts](#composite-charts), Netdata Cloud aggregates metrics from multiple nodes, pods, or
+containers, depending on the grouping chosen. For example, if you group the `cpu_limit` composite chart by
+`k8s_namespace`, the metrics shown will be the average of `cpu_limit` metrics from all nodes/pods/containers that are
+part of that namespace.
+
+## Health map
+
+The health map places each container or pod as a single box, then varies the intensity of its color to visualize the
+resource utilization of specific k8s pods/containers.
+
+![The Kubernetes health map in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/106964367-39f54100-66ff-11eb-888c-5a04f8abb3d0.png)
+
+Change the health map's coloring, grouping, and displayed nodes to customize your experience and learn more about the
+status of your k8s cluster.
+
+### Color by
+
+Color the health map by choosing an aggregate function to apply to an [available Kubernetes
+metric](#available-kubernetes-metrics), then whether you to display boxes for individual pods or containers.
+
+The default is the _average, of CPU within the configured limit, organized by container_.
+
+### Group by
+
+Group the health map by the `k8s_cluster_id`, `k8s_controller_kind`, `k8s_controller_name`, `k8s_kind`, `k8s_namespace`,
+and `k8s_node_name`. The default is `k8s_controller_name`.
+
+### Filtering
+
+Filtering behaves identically to the [node filter in War Rooms](/docs/cloud/war-rooms#node-filter), with the ability to
+filter pods/containers by `container_id` and `namespace`.
+
+### Detailed information
+
+Hover over any of the pods/containers in the map to display a modal window, which contains contextual information
+and real-time metrics from that resource.
+
+![The modal containing additional information about a k8s
+resource](https://user-images.githubusercontent.com/1153921/106964369-3a8dd780-66ff-11eb-8a8a-a5c8f0d5711f.png)
+
+The **context** tab provides the following details about a container or pod:
+
+- Cluster ID
+- Node
+- Controller Kind
+- Controller Name
+- Pod Name
+- Container
+- Kind
+- Pod UID
+
+This information helps orient you as to where the container/pod operates inside your cluster.
+
+The **Metrics** tab contains charts visualizing the last 15 minutes of the same metrics available in the [color by
+option](#color-by). Use these metrics along with the context, to identify which containers or pods are experiencing
+problematic behavior to investigate further, troubleshoot, and remediate with `kubectl` or another tool.
+
+## Composite charts
+
+The Kubernetes composite charts show real-time and historical resource utilization metrics from nodes, pods, or
+containers within your Kubernetes deployment.
+
+See the [Overview](/docs/cloud/visualize/overview#definition-bar) doc for details on how composite charts work. These
+work similarly, but in addition to visualizing _by dimension_ and _by node_, Kubernetes composite charts can also be
+grouped by the following labels:
+
+- `k8s_cluster_id`
+- `k8s_container_id`
+- `k8s_container_name`
+- `k8s_controller_kind`
+- `k8s_kind`
+- `k8s_namespace`
+- `k8s_node_name`
+- `k8s_pod_name`
+- `k8s_pod_uid`
+
+![Composite charts of Kubernetes metrics in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/106964370-3a8dd780-66ff-11eb-8858-05b2253b25c6.png)
+
+In addition, when you hover over a composite chart, the colors in the heat map changes as well, so you can see how
+certain pod/container-level metrics change over time.
+
+## Caveats
+
+There are some caveats and known issues with Kubernetes monitoring with Netdata Cloud.
+
+- **No way to remove any nodes** you might have
+ [drained](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/) from your Kubernetes cluster. These
+ drained nodes will be marked "unreachable" and will show up in War Room management screens/dropdowns. The same applies
+ for any ephemeral nodes created and destroyed during horizontal scaling.
+
+## What's next?
+
+For more information about monitoring a k8s cluster with Netdata, see our guide: [_Kubernetes monitoring with Netdata: Overview and visualizations_](/guides/monitor/kubernetes-k8s-netdata/).
diff --git a/docs/cloud/visualize/nodes.md b/docs/cloud/visualize/nodes.md
new file mode 100644
index 000000000..9878b6b10
--- /dev/null
+++ b/docs/cloud/visualize/nodes.md
@@ -0,0 +1,53 @@
+---
+title: "Nodes view"
+description: "See charts from all your nodes in one pane of glass, then dive in to embedded dashboards for granular troubleshooting of ongoing issues."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md"
+sidebar_label: "Nodes view"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Operations/Visualizations"
+---
+
+The Nodes view lets you see and customize key metrics from any number of Agent-monitored nodes and seamlessly navigate
+to any node's dashboard for troubleshooting performance issues or anomalies using Netdata's highly-granular metrics.
+
+![The Nodes view in Netdata
+Cloud](https://user-images.githubusercontent.com/1153921/119035218-2eebb700-b964-11eb-8b74-4ec2df0e457c.png)
+
+Each War Room's Nodes view is populated based on the nodes you added to that specific War Room. Each node occupies a
+single row, first featuring that node's alarm status (yellow for warnings, red for critical alarms) and operating
+system, some essential information about the node, followed by columns of user-defined key metrics represented in
+real-time charts.
+
+Use the [Overview](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md) for monitoring an infrastructure in real time using
+composite charts and Netdata's familiar dashboard UI.
+
+Check the [War Room docs](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md) for details on the utility bar, which contains the [node
+filter](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md#node-filter) and the [timeframe
+selector](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md#play-pause-force-play-and-timeframe-selector).
+
+## Add and customize metrics columns
+
+Add more metrics columns by clicking the gear icon. Choose the context you'd like to add, give it a relevant name, and
+select whether you want to see all dimensions (the default), or only the specific dimensions your team is interested in.
+
+Click the gear icon and hover over any existing charts, then click the pencil icon. This opens a panel to
+edit that chart. Edit the context, its title, add or remove dimensions, or delete the chart altogether.
+
+These customizations appear for anyone else with access to that War Room.
+
+## See more metrics in Netdata Cloud
+
+If you want to add more metrics to your War Rooms and they don't show up when you add new metrics to Nodes, you likely
+need to configure those nodes to collect from additional data sources. See our [collectors doc](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md)
+to learn how to use dozens of pre-installed collectors that can instantly collect from your favorite services and applications.
+
+If you want to see up to 30 days of historical metrics in Cloud (and more on individual node dashboards), read our guide
+on [long-term storage of historical metrics](https://github.com/netdata/netdata/blob/master/docs/guides/longer-metrics-storage.md). Also, see our
+[calculator](/docs/store/change-metrics-storage#calculate-the-system-resources-RAM-disk-space-needed-to-store-metrics)
+for finding the disk and RAM you need to store metrics for a certain period of time.
+
+## What's next?
+
+Now that you know how to view your nodes at a glance, learn how to [track active
+alarms](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/view-active-alerts.mdx) with the Alerts Smartboard.
diff --git a/docs/cloud/visualize/overview.md b/docs/cloud/visualize/overview.md
new file mode 100644
index 000000000..35c07656a
--- /dev/null
+++ b/docs/cloud/visualize/overview.md
@@ -0,0 +1,250 @@
+---
+title: "Home, Overview and Single Node view"
+description: >-
+ "The Home tab automatically presents relevant information of your War Room, the Overview uses composite
+ charts from all the nodes in a given War Room and Single Node view provides a look at a specific Node"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md"
+sidebar_label: "Home, Overview and Single Node view"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Operations/Visualizations"
+---
+
+## Home
+
+The Home tab provides a predefined dashboard of relevant information about entities in the War Room.
+
+This tab will
+automatically present summarized information in an easily digestible display. You can see information about your
+nodes, data collection and retention stats, alerts, users and dashboards.
+
+## Overview
+
+The Overview tab is another great way to monitor infrastructure using Netdata Cloud. While the interface might look
+similar to local
+dashboards served by an Agent Overview uses **composite charts**.
+These charts display real-time aggregated metrics from all the nodes (or a filtered selection) in a given War Room.
+
+With Overview's composite charts, you can see your infrastructure from a single pane of glass, discover trends or
+anomalies, then drill down by grouping metrics by node and jumping to single-node dashboards for root cause analysis.
+
+## Single Node view
+
+The Single Node view dashboard engine is the same as the Overview, meaning that it also uses **composite charts**, and
+displays real-time aggregated metrics from a specific node.
+
+As mentioned above, the interface is similar to local dashboards served by an Agent but this dashboard also uses *
+*composite charts** which, in the case of a single node, will aggregate
+multiple chart _instances_ belonging to a context into a single chart. For example, on `disk.io` context it will get
+into a single chart an aggregated view of each disk the node has.
+
+Further tools provided in composite chart [definiton bar](/docs/cloud/visualize/overview#definition-bar) will allow you
+to explore in more detail what is happening on each _instance_.
+
+## Before you get started
+
+Only nodes with v1.25.0-127 or later of the the [open-source Netdata](https://github.com/netdata/netdata) monitoring
+agent can contribute to composite charts. If your node(s) use an earlier version of Netdata, you will see them marked as
+**needs upgrade** in various dropdowns.
+
+See our [update docs](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md) for the preferred
+update method based on how you installed
+Netdata.
+
+## Composite charts
+
+The Overview uses composite charts, which aggregate metrics from all the nodes (or a filtered selection) in a given War
+Room.
+
+## Definition bar
+
+Each composite chart has a definition bar to provide information about the following:
+
+* Grouping option
+* Aggregate function to be applied in case multiple data sources exist
+* Instances
+* Nodes
+* Dimensions, and
+* Aggregate function over time to be applied if one point in the chart consists of multiple data points aggregated
+
+### Group by dimension, node, or chart
+
+Click on the **dimension** dropdown to change how a composite chart groups metrics.
+
+The default option is by _dimension_, so that each line/area in the visualization is the aggregation of a single
+dimension.
+This provides a per dimension view of the data from all the nodes in the War Room, taking into account filtering
+criteria if defined.
+
+A composite chart grouped by _node_ visualizes a single metric across contributing nodes. If the composite chart has
+five
+contributing nodes, there will be five lines/areas. This is typically an absolute value of the sum of the dimensions
+over each node but there
+are some opinionated-but-valuable exceptions where a specific dimension is selected.
+Grouping by nodes allows you to quickly understand which nodes in your infrastructure are experiencing anomalous
+behavior.
+
+A composite chart grouped by _instance_ visualizes each instance of one software or hardware on a node and displays
+these as a separate dimension. By grouping the
+`disk.io` chart by _instance_, you can visualize the activity of each disk on each node that contributes to the
+composite
+chart.
+
+Another very pertinent example is composite charts over contexts related to cgroups (VMs and containers). You have the
+means to change the default group by or apply filtering to
+get a better view into what data your are trying to analyze. For example, if you change the group by to _instance_ you
+get a view with the data of all the instances (cgroups) that
+contribute to that chart. Then you can use further filtering tools to focus the data that is important to you and even
+save the result to your own dashboards.
+
+![image](https://user-images.githubusercontent.com/82235632/201902017-04b76701-0ff9-4498-aa9b-6d507b567bea.png)
+
+### Aggregate functions over data sources
+
+Each chart uses an opinionated-but-valuable default aggregate function over the data sources. For example,
+the `system.cpu` chart shows the
+average for each dimension from every contributing chart, while the `net.net` chart shows the sum for each dimension
+from every contributing chart, which can also come from multiple networking interfaces.
+
+The following aggregate functions are available for each selected dimension:
+
+- **Average**: Displays the average value from contributing nodes. If a composite chart has 5 nodes with the following
+ values for the `out` dimension—`-2.1`, `-5.5`, `-10.2`, `-15`, `-0.1`—the composite chart displays a
+ value of `−6.58`.
+- **Sum**: Displays the sum of contributed values. Using the same nodes, dimension, and values as above, the composite
+ chart displays a metric value of `-32.9`.
+- **Min**: Displays a minimum value. For dimensions with positive values, the min is the value closest to zero. For
+ charts with negative values, the min is the value with the largest magnitude.
+- **Max**: Displays a maximum value. For dimensions with positive values, the max is the value with the largest
+ magnitude. For charts with negative values, the max is the value closet to zero.
+
+### Dimensions
+
+Select which dimensions to display on the composite chart. You can choose **All dimensions**, a single dimension, or any
+number of dimensions available on that context.
+
+### Instances
+
+Click on **X Instances** to display a dropdown of instances and nodes contributing to that composite chart. Each line in
+the
+dropdown displays an instance name and the associated node's hostname.
+
+### Nodes
+
+Click on **X Nodes** to display a dropdown of nodes contributing to that composite chart. Each line displays a hostname
+to help you identify which nodes contribute to a chart. You can also use this component to filter nodes directly on the
+chart.
+
+If one or more nodes can't contribute to a given chart, the definition bar shows a warning symbol plus the number of
+affected nodes, then lists them in the dropdown along with the associated error. Nodes might return errors because of
+networking issues, a stopped `netdata` service, or because that node does not have any metrics for that context.
+
+### Aggregate functions over time
+
+When the granularity of the data collected is higher than the plotted points on the chart an aggregation function over
+time
+is applied. By default the aggregation applied is _average_ but the user can choose different options from the
+following:
+
+* Min
+* Max
+* Average
+* Sum
+* Incremental sum (Delta)
+* Standard deviation
+* Median
+* Single exponential smoothing
+* Double exponential smoothing
+* Coefficient variation
+* Trimmed Median `*`
+* Trimmed Mean `*`
+* Percentile `**`
+
+:::info
+
+- `*` For **Trimmed Median and Mean** you can choose the percentage of data tha you want to focus on: 1%, 2%, 3%, 5%,
+ 10%, 15%, 20% and 25%.
+- `**` For **Percentile** you can specify the percentile you want to focus on: 25th, 50th, 75th, 80th, 90th, 95th, 97th,
+ 98th and 99th.
+
+:::
+
+For more details on each, you can refer to our Agent's HTTP API details
+on [Data Queries - Data Grouping](/docs/agent/web/api/queries#data-grouping).
+
+### Reset to defaults
+
+Click on the 3-dot icon (**⋮**) on any chart, then **Reset to Defaults**, to reset the definition bar to its initial
+state.
+
+## Jump to single-node dashboards
+
+Click on **X Charts**/**X Nodes** to display one of the two dropdowns that list the charts and nodes contributing to a
+given composite chart. For example, the nodes dropdown.
+
+![The nodes dropdown in a composite
+chart](https://user-images.githubusercontent.com/1153921/99305049-7c019b80-2810-11eb-942a-8ebfcf236b7f.png)
+
+To jump to a single-node dashboard, click on the link icon next to the
+node you're interested in.
+
+The single-node dashboard opens in a new tab. From there, you can continue to troubleshoot or run [Metric
+Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) for faster root
+cause analysis.
+
+## Add composite charts to a dashboard
+
+Click on the 3-dot icon (**⋮**) on any chart, then click on **Add to Dashboard**. Click the **+** button for any
+dashboard you'd like to add this composite chart to, or create a new dashboard an initiate it with your chosen chart by
+entering the name and clicking **New Dashboard**.
+
+## Interacting with composite charts: pan, zoom, and resize
+
+You can interact with composite charts as you would with other Netdata charts. You can use the controls beneath each
+chart to pan, zoom, or resize the chart, or use various combinations of the keyboard and mouse. See
+the [chart interaction doc](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx) for
+details.
+
+## Menu
+
+The Overview uses a similar menu to local Agent dashboards and single-node dashboards in Netdata Cloud, with sections
+and sub-menus aggregated from every contributing node. For example, even if only two nodes actively collect from and
+monitor an Apache web server, the **Apache** section still appears and displays composite charts from those two nodes.
+
+![A menu in the Overview
+screen](https://user-images.githubusercontent.com/1153921/95785094-fa0ad980-0c89-11eb-8328-2ff11ac630b4.png)
+
+One difference between the Overview's menu and those found in single-node dashboards or local Agent dashboards is that
+the Overview condenses multiple services, families, or instances into single sections, sub-menus, and associated charts.
+
+For services, let's say you have two concurrent jobs with the [web_log
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md), one for Apache and another for
+Nginx. A single-node or
+local dashboard shows two section, **web_log apache** and **web_log nginx**, whereas the Overview condenses these into a
+single **web_log** section containing composite charts from both jobs.
+
+The Overview also consdenses multiple families or multiple instances into a single **all** sub-menu and associated
+charts. For example, if Node A has 5 disks, and Node B has 3, each disk contributes to a single `disk.io` composite
+chart. The utility bar should show that there are 8 charts from 2 nodes contributing to that chart.
+
+This action applies to disks, network devices, and other metric types that involve multiple instances of a piece of
+hardware or software. The Overview currently does not display metrics from filesystems. Read more about [families and
+instances](https://github.com/netdata/netdata/blob/master/docs/dashboard/dimensions-contexts-families.mdx)
+
+## Persistence of composite chart settings
+
+When you change a composite chart via its definition bar, Netdata Cloud persists these settings in a query string
+attached to the URL in your browser. You can "save" these settings by bookmarking this particular URL, or share it with
+colleagues by having them copy-paste it into their browser.
+
+## What's next?
+
+For another way to view an infrastructure from a high level, see
+the [Nodes view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md).
+
+If you need a refresher on how Netdata's charts work, see our doc
+on [interacting with charts](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx).
+
+Or, get more granular with configuring how you monitor your infrastructure
+by [building new dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md).
diff --git a/docs/cloud/war-rooms.md b/docs/cloud/war-rooms.md
new file mode 100644
index 000000000..99f9e3680
--- /dev/null
+++ b/docs/cloud/war-rooms.md
@@ -0,0 +1,162 @@
+---
+title: "War Rooms"
+description: >-
+ "Netdata Cloud uses War Rooms to group related nodes and create insightful compositedashboards based on
+ their aggregate health and performance."
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md"
+sidebar_label: "War Rooms"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Operations"
+---
+
+War Rooms organize your connected nodes and provide infrastructure-wide dashboards using real-time metrics and
+visualizations.
+
+Once you add nodes to a Space, all of your nodes will be visible in the _All nodes_ War Room. This is a special War Room
+which gives you an overview of all of your nodes in this particular space. Then you can create functional separations of
+your nodes into more War Rooms. Every War Room has its own dashboards, navigation, indicators, and management tools.
+
+![An example War Room](/img/cloud/main-page.png)
+
+## Navigation
+
+### Switching between views - static tabs
+
+Every War Rooms provides multiple views. Each view focus on a particular area/subject of the nodes which you monitor in
+this War Rooms. Let's explore what view you have available:
+
+- The default view for any War Room is
+ the [Home tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md#home), which give you
+ an overview
+ of this space. Here you can see the number of Nodes claimed, data retention statics, user particate, alerts and more
+
+- The second and most important view is
+ the [Overview tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md#overview) which
+ uses composite
+ charts to display real-time metrics from every available node in a given War Room.
+
+- The [Nodes tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md) gives you the ability to
+ see the status (offline or online), host details
+ , alarm status and also a short overview of some key metrics from all your nodes at a glance.
+
+- [Kubernetes tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md) is a logical
+ grouping of charts regards to your Kubernetes clusters.
+ It contains a subset of the charts available in the _Overview tab_
+
+-
+
+The [Dashboards tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md)
+gives you the ability to have tailored made views of
+specific/targeted interfaces for your infrastructure using any number of charts from any number of nodes.
+
+- The **Alerts tab** provides you with an overview for all the active alerts you receive for the nodes in this War Room,
+ you can also see alla the alerts that are configured to be triggered in any given moment.
+
+- The **Anomalies tab** is dedicated to
+ the [Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx) tool
+
+### Non static tabs
+
+If you open
+a [new dashboard](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md),
+jump to a single-node dashboard, or navigate to a dedicated alert page they will open in a new War Room tab.
+
+Tabs can be rearranged with drag-and-drop or closed with the **X** button. Open tabs persist between sessions, so you
+can always come right back to your preferred setup.
+
+### Play, pause, force play, and timeframe selector
+
+A War Room has three different states: playing, paused, and force playing. The default playing state refreshes charts
+every second as long as the browser tab is in
+focus. [Interacting with a chart](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx)
+pauses
+the War Room. Once the tab loses focus, charts pause automatically.
+
+The top navigation bar features a play/pause button to quickly change the state, and a dropdown to select **Force Play**
+, which keeps charts refreshing, potentially at the expense of system performance.
+
+Next to the play/pause button is the timeframe selector, which helps you select a precise window of metrics data to
+visualize. By default, all visualizations in Netdata Cloud show the last 15 minutes of metrics data.
+
+Use the **Quick Selector** to visualize metrics from predefined timeframes, or use the input field below to enter a
+number and an appropriate unit of time. The calendar allows you to select multiple days of metrics data.
+
+Click **Apply** to re-render all visualizations with new metrics data streamed to your browser from each distributed
+node. Click **Clear** to remove any changes and apply the default 15-minute timeframe.
+
+The fields beneath the calendar display the beginning and ending timestamps your selected timeframe.
+
+### Node filter
+
+The node filter allows you to quickly filter the nodes visualized in a War Room's views. It appears on all views, but
+not on single-node dashboards.
+
+![The node filter](https://user-images.githubusercontent.com/12612986/172674440-df224058-2b2c-41da-bb45-f4eb82e342e5.png)
+
+## War Room organization
+
+We recommend a few strategies for organizing your War Rooms.
+
+**Service, purpose, location, etc.**: You can group War Rooms by a service (think Nginx, MySQL, Pulsar, and so on),
+their purpose (webserver, database, application), their physical location, whether they're baremetal or a Docker
+container, the PaaS/cloud provider it runs on, and much more. This allows you to see entire slices of your
+infrastructure by moving from one War Room to another.
+
+**End-to-end apps/services**: If you have a user-facing SaaS product, or an internal service that said product relies
+on, you may want to monitor that entire stack in a single War Room. This might include Kubernetes clusters, Docker
+containers, proxies, databases, web servers, brokers, and more. End-to-end War Rooms are valuable tools for ensuring the
+health and performance of your organization's essential services.
+
+**Incident response**: You can also create new War Rooms as one of the first steps in your incident response process.
+For example, you have a user-facing web app that relies on Apache Pulsar for a message queue, and one of your nodes
+using the [Pulsar collector](https://github.com/netdata/go.d.plugin/blob/master/modules/pulsar/README.md) begins
+reporting a suspiciously low messages rate. You can create a War Room called `$year-$month-$day-pulsar-rate`, add all
+your Pulsar nodes in addition to nodes they connect to, and begin diagnosing the root cause in a War Room optimized for
+getting to resolution as fast as possible.
+
+## Add War Rooms
+
+To add new War Rooms to any Space, click on the green plus icon **+** next the **War Rooms** heading. on the left (
+space's) sidebar.
+
+In the panel, give the War Room a name and description, and choose whether it's public or private. Anyone in your Space
+can join public War Rooms, but can only join private War Rooms with an invitation.
+
+## Manage War Rooms
+
+All the users and nodes involved in a particular space can potential be part of a War Room.
+
+Any user can change simple settings of a War room, like the name or the users participating in it. Click on the gear
+icon of the War Room's name in the top of the page to do that. A sidebar will open with options for this War Room:
+
+1. To _change a War Room's name, description, or public/private status_, click on **War Room** tab of the sidebar.
+
+2. To _include an existing node_ to a War Room or _connect a new node*_ click on **Nodes** tab of the sidebar. Choose
+ any
+ connected node you want to add to this War Room by clicking on the checkbox next to its hostname, then click **+ Add
+ **
+ at the top of the panel.
+
+3. To _add existing users to a War Room_, click on **Add Users**. See
+ our [invite doc](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md)
+ for details on inviting new users to your Space in Netdata Cloud.
+
+:::note
+\* This action requires admin rights for this space
+:::
+
+### More actions
+
+To _view or remove nodes_ in a War Room, click on **Nodes view**. To remove a node from the current War Room, click on
+the **🗑** icon.
+
+:::info
+Removing a node from a War Room does not remove it from your Space.
+:::
+
+## What's next?
+
+Once you've figured out an organizational structure that works for your team, learn more about how you can use Netdata
+Cloud to monitor distributed nodes
+using [real-time composite charts](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md).
diff --git a/docs/collect/application-metrics.md b/docs/collect/application-metrics.md
index c9bc4e2c8..454ed95ad 100644
--- a/docs/collect/application-metrics.md
+++ b/docs/collect/application-metrics.md
@@ -2,7 +2,10 @@
title: "Collect application metrics with Netdata"
sidebar_label: "Application metrics"
description: "Monitor and troubleshoot every application on your infrastructure with per-second metrics, zero configuration, and meaningful charts."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/application-metrics.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/docs/collect/application-metrics.md"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Concepts"
-->
# Collect application metrics with Netdata
@@ -12,7 +15,7 @@ web servers, databases, message brokers, email servers, search platforms, and mu
pre-installed with every Netdata Agent and usually require zero configuration. Netdata also collects and visualizes
resource utilization per application on Linux systems using `apps.plugin`.
-[**apps.plugin**](/collectors/apps.plugin/README.md) looks at the Linux process tree every second, much like `top` or
+[**apps.plugin**](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) looks at the Linux process tree every second, much like `top` or
`ps fax`, and collects resource utilization information on every running process. By reading the process tree, Netdata
shows CPU, disk, networking, processes, and eBPF for every application or Linux user. Unlike `top` or `ps fax`, Netdata
adds a layer of meaningful visualization on top of the process tree metrics, such as grouping applications into useful
@@ -21,43 +24,43 @@ charts under **Users**, and per-user group charts under **User Groups**.
Our most popular application collectors:
-- [Prometheus endpoints](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus): Gathers
+- [Prometheus endpoints](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md): Gathers
metrics from one or more Prometheus endpoints that use the OpenMetrics exposition format. Auto-detects more than 600
endpoints.
-- [Web server logs (Apache, NGINX)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/):
+- [Web server logs (Apache, NGINX)](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md):
Tail access logs and provide very detailed web server performance statistics. This module is able to parse 200k+
rows in less than half a second.
-- [MySQL](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql/): Collect database global,
+- [MySQL](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md): Collect database global,
replication, and per-user statistics.
-- [Redis](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/redis): Monitor database status by
+- [Redis](https://github.com/netdata/go.d.plugin/blob/master/modules/redis/README.md): Monitor database status by
reading the server's response to the `INFO` command.
-- [Apache](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache/): Collect Apache web server
+- [Apache](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/README.md): Collect Apache web server
performance metrics via the `server-status?auto` endpoint.
-- [Nginx](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx/): Monitor web server status
+- [Nginx](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md): Monitor web server status
information by gathering metrics via `ngx_http_stub_status_module`.
-- [Postgres](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/postgres): Collect database health
+- [Postgres](https://github.com/netdata/go.d.plugin/blob/master/modules/postgres/README.md): Collect database health
and performance metrics.
-- [ElasticSearch](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/elasticsearch): Collect search
+- [ElasticSearch](https://github.com/netdata/go.d.plugin/blob/master/modules/elasticsearch/README.md): Collect search
engine performance and health statistics. Optionally collects per-index metrics.
-- [PHP-FPM](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpfpm/): Collect application summary
+- [PHP-FPM](https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/README.md): Collect application summary
and processes health metrics by scraping the status page (`/status?full`).
-Our [supported collectors list](/collectors/COLLECTORS.md#service-and-application-collectors) shows all Netdata's
+Our [supported collectors list](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md#service-and-application-collectors) shows all Netdata's
application metrics collectors, including those for containers/k8s clusters.
## Collect metrics from applications running on Windows
Netdata is fully capable of collecting and visualizing metrics from applications running on Windows systems. The only
-caveat is that you must [install Netdata](/docs/get-started.mdx) on a separate system or a compatible VM because there
+caveat is that you must [install Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) on a separate system or a compatible VM because there
is no native Windows version of the Netdata Agent.
Once you have Netdata running on that separate system, you can follow the [enable and configure
-doc](/docs/collect/enable-configure.md) to tell the collector to look for exposed metrics on the Windows system's IP
+doc](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) to tell the collector to look for exposed metrics on the Windows system's IP
address or hostname, plus the applicable port.
For example, you have a MySQL database with a root password of `my-secret-pw` running on a Windows system with the IP
address 203.0.113.0. you can configure the [MySQL
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql) to look at `203.0.113.0:3306`:
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md) to look at `203.0.113.0:3306`:
```yml
jobs:
@@ -66,16 +69,16 @@ jobs:
```
This same logic applies to any application in our [supported collectors
-list](/collectors/COLLECTORS.md#service-and-application-collectors) that can run on Windows.
+list](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md#service-and-application-collectors) that can run on Windows.
## What's next?
-If you haven't yet seen the [supported collectors list](/collectors/COLLECTORS.md) give it a once-over for any
+If you haven't yet seen the [supported collectors list](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) give it a once-over for any
additional applications you may want to monitor using Netdata's native collectors, or the [generic Prometheus
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus).
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md).
Collecting all the available metrics on your nodes, and across your entire infrastructure, is just one piece of the
puzzle. Next, learn more about Netdata's famous real-time visualizations by [seeing an overview of your
-infrastructure](/docs/visualize/overview-infrastructure.md) using Netdata Cloud.
+infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) using Netdata Cloud.
diff --git a/docs/collect/container-metrics.md b/docs/collect/container-metrics.md
index 5d145362e..b6b6a432c 100644
--- a/docs/collect/container-metrics.md
+++ b/docs/collect/container-metrics.md
@@ -2,7 +2,10 @@
title: "Collect container metrics with Netdata"
sidebar_label: "Container metrics"
description: "Use Netdata to collect per-second utilization and application-level metrics from Linux/Docker containers and Kubernetes clusters."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/container-metrics.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/docs/collect/container-metrics.md"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Concepts"
-->
# Collect container metrics with Netdata
@@ -10,35 +13,35 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/con
Thanks to close integration with Linux cgroups and the virtual files it maintains under `/sys/fs/cgroup`, Netdata can
monitor the health, status, and resource utilization of many different types of Linux containers.
-Netdata uses [cgroups.plugin](/collectors/cgroups.plugin/README.md) to poll `/sys/fs/cgroup` and convert the raw data
+Netdata uses [cgroups.plugin](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md) to poll `/sys/fs/cgroup` and convert the raw data
into human-readable metrics and meaningful visualizations. Through cgroups, Netdata is compatible with **all Linux
containers**, such as Docker, LXC, LXD, Libvirt, systemd-nspawn, and more. Read more about [Docker-specific
monitoring](#collect-docker-metrics) below.
Netdata also has robust **Kubernetes monitoring** support thanks to a
-[Helmchart](/packaging/installer/methods/kubernetes.md) to automate deployment, collectors for k8s agent services, and
+[Helmchart](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md) to automate deployment, collectors for k8s agent services, and
robust [service discovery](https://github.com/netdata/agent-service-discovery/#service-discovery) to monitor the
services running inside of pods in your k8s cluster. Read more about [Kubernetes
monitoring](#collect-kubernetes-metrics) below.
A handful of additional collectors gather metrics from container-related services, such as
-[dockerd](/collectors/python.d.plugin/dockerd/README.md) or [Docker
-Engine](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/docker_engine/). You can find all
+[dockerd](https://github.com/netdata/go.d.plugin/blob/master/modules/docker/README.md) or [Docker
+Engine](https://github.com/netdata/go.d.plugin/blob/master/modules/docker_engine/README.md). You can find all
container collectors in our supported collectors list under the
-[containers/VMs](/collectors/COLLECTORS.md#containers-and-vms) and
-[Kubernetes](/collectors/COLLECTORS.md#containers-and-vms) headings.
+[containers/VMs](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md#containers-and-vms) and
+[Kubernetes](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md#containers-and-vms) headings.
## Collect Docker metrics
Netdata has robust Docker monitoring thanks to the aforementioned
-[cgroups.plugin](/collectors/cgroups.plugin/README.md). By polling cgroups every second, Netdata can produce meaningful
+[cgroups.plugin](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md). By polling cgroups every second, Netdata can produce meaningful
visualizations about the CPU, memory, disk, and network utilization of all running containers on the host system with
zero configuration.
Netdata also collects metrics from applications running inside of Docker containers. For example, if you create a MySQL
database container using `docker run --name some-mysql -e MYSQL_ROOT_PASSWORD=my-secret-pw -d mysql:tag`, it exposes
metrics on port 3306. You can configure the [MySQL
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql) to look at `127.0.0.0:3306` for
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md) to look at `127.0.0.0:3306` for
MySQL metrics:
```yml
@@ -48,18 +51,18 @@ jobs:
```
Netdata then collects metrics from the container itself, but also dozens [MySQL-specific
-metrics](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql#charts) as well.
+metrics](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md#charts) as well.
### Collect metrics from applications running in Docker containers
You could use this technique to monitor an entire infrastructure of Docker containers. The same [enable and
-configure](/docs/collect/enable-configure.md) procedures apply whether an application runs on the host system or inside
+configure](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) procedures apply whether an application runs on the host system or inside
a container. You may need to configure the target endpoint if it's not the application's default.
-Netdata can even [run in a Docker container](/packaging/docker/README.md) itself, and then collect metrics about the
+Netdata can even [run in a Docker container](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md) itself, and then collect metrics about the
host system, its own container with cgroups, and any applications you want to monitor.
-See our [application metrics doc](/docs/collect/application-metrics.md) for details about Netdata's application metrics
+See our [application metrics doc](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md) for details about Netdata's application metrics
collection capabilities.
## Collect Kubernetes metrics
@@ -74,26 +77,26 @@ your k8s infrastructure.
configuration files for [compatible
applications](https://github.com/netdata/helmchart#service-discovery-and-supported-services) and any endpoints
covered by our [generic Prometheus
- collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus). With these
+ collector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md). With these
configuration files, Netdata collects metrics from any compatible applications as they run _inside_ of a pod.
Service discovery happens without manual intervention as pods are created, destroyed, or moved between nodes.
-- A [Kubelet collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet), which runs
+- A [Kubelet collector](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/README.md), which runs
on each node in a k8s cluster to monitor the number of pods/containers, the volume of operations on each container,
and more.
-- A [kube-proxy collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy), which
+- A [kube-proxy collector](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/README.md), which
also runs on each node and monitors latency and the volume of HTTP requests to the proxy.
-- A [cgroups collector](/collectors/cgroups.plugin/README.md), which collects CPU, memory, and bandwidth metrics for
+- A [cgroups collector](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md), which collects CPU, memory, and bandwidth metrics for
each container running on your k8s cluster.
For a holistic view of Netdata's Kubernetes monitoring capabilities, see our guide: [_Monitor a Kubernetes (k8s) cluster
-with Netdata_](https://learn.netdata.cloud/guides/monitor/kubernetes-k8s-netdata).
+with Netdata_](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/kubernetes-k8s-netdata.md).
## What's next?
Netdata is capable of collecting metrics from hundreds of applications, such as web servers, databases, messaging
-brokers, and more. See more in the [application metrics doc](/docs/collect/application-metrics.md).
+brokers, and more. See more in the [application metrics doc](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md).
If you already have all the information you need about collecting metrics, move into Netdata's meaningful visualizations
-with [seeing an overview of your infrastructure](/docs/visualize/overview-infrastructure.md) using Netdata Cloud.
+with [seeing an overview of your infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) using Netdata Cloud.
diff --git a/docs/collect/enable-configure.md b/docs/collect/enable-configure.md
index 19e680c21..cd8960ac1 100644
--- a/docs/collect/enable-configure.md
+++ b/docs/collect/enable-configure.md
@@ -1,14 +1,18 @@
# Enable or configure a collector
When Netdata starts up, each collector searches for exposed metrics on the default endpoint established by that service
or application's standard installation procedure. For example, the [Nginx
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) searches at
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md) searches at
`http://127.0.0.1/stub_status` for exposed metrics in the correct format. If an Nginx web server is running and exposes
metrics on that endpoint, the collector begins gathering them.
@@ -20,7 +24,7 @@ enable or configure a collector to gather all available metrics from your system
You can enable/disable collectors individually, or enable/disable entire orchestrators, using their configuration files.
For example, you can change the behavior of the Go orchestrator, or any of its collectors, by editing `go.d.conf`.
-Use `edit-config` from your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) to open
+Use `edit-config` from your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) to open
the orchestrator primary configuration file:
```bash
@@ -33,14 +37,14 @@ enable/disable it with `yes` and `no` settings. Uncomment any line you change to
start.
After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Configure a collector
-First, [find the collector](/collectors/COLLECTORS.md) you want to edit and open its documentation. Some software has
+First, [find the collector](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) you want to edit and open its documentation. Some software has
collectors written in multiple languages. In these cases, you should always pick the collector written in Go.
-Use `edit-config` from your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) to open a
+Use `edit-config` from your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) to open a
collector's configuration file. For example, edit the Nginx collector with the following:
```bash
@@ -53,16 +57,16 @@ configure that collector. Uncomment any line you change to ensure the collector'
read it on start.
After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## What's next?
-Read high-level overviews on how Netdata collects [system metrics](/docs/collect/system-metrics.md), [container
-metrics](/docs/collect/container-metrics.md), and [application metrics](/docs/collect/application-metrics.md).
+Read high-level overviews on how Netdata collects [system metrics](https://github.com/netdata/netdata/blob/master/docs/collect/system-metrics.md), [container
+metrics](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md), and [application metrics](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md).
If you're already collecting all metrics from your systems, containers, and applications, it's time to move into
-Netdata's visualization features. [See an overview of your infrastructure](/docs/visualize/overview-infrastructure.md)
+Netdata's visualization features. [See an overview of your infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md)
using Netdata Cloud, or learn how to [interact with dashboards and
-charts](/docs/visualize/interact-dashboards-charts.md).
+charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md).
diff --git a/docs/collect/how-collectors-work.md b/docs/collect/how-collectors-work.md
index 07e34858f..382d4ccc6 100644
--- a/docs/collect/how-collectors-work.md
+++ b/docs/collect/how-collectors-work.md
@@ -1,7 +1,11 @@
# How Netdata's metrics collectors work
@@ -10,7 +14,7 @@ When Netdata starts, and with zero configuration, it auto-detects thousands of d
per-second metrics.
Netdata can immediately collect metrics from these endpoints thanks to 300+ **collectors**, which all come pre-installed
-when you [install Netdata](/docs/get-started.mdx).
+when you [install Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx).
Every collector has two primary jobs:
@@ -19,15 +23,15 @@ Every collector has two primary jobs:
If the collector finds compatible metrics exposed on the configured endpoint, it begins a per-second collection job. The
Netdata Agent gathers these metrics, sends them to the [database engine for
-storage](/docs/store/change-metrics-storage.md), and immediately [visualizes them
-meaningfully](/docs/visualize/interact-dashboards-charts.md) on dashboards.
+storage](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md), and immediately [visualizes them
+meaningfully](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) on dashboards.
Each collector comes with a pre-defined configuration that matches the default setup for that application. This endpoint
can be a URL and port, a socket, a file, a web page, and more.
-For example, the [Nginx collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) searches
+For example, the [Nginx collector](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md) searches
at `http://127.0.0.1/stub_status`, which is the default endpoint for exposing Nginx metrics. The [web log collector for
-Nginx or Apache](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) searches at
+Nginx or Apache](https://github.com/netdata/go.d.plugin/blob/master/README.mdmodules/weblog) searches at
`/var/log/nginx/access.log` and `/var/log/apache2/access.log`, respectively, both of which are standard locations for
access log files on Linux systems.
@@ -35,15 +39,15 @@ The endpoint is user-configurable, as are many other specifics of what a given c
## What can Netdata collect?
-To quickly find your answer, see our [list of supported collectors](/collectors/COLLECTORS.md).
+To quickly find your answer, see our [list of supported collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md).
Generally, Netdata's collectors can be grouped into three types:
-- [Systems](/docs/collect/system-metrics.md): Monitor CPU, memory, disk, networking, systemd, eBPF, and much more.
+- [Systems](https://github.com/netdata/netdata/blob/master/docs/collect/system-metrics.md): Monitor CPU, memory, disk, networking, systemd, eBPF, and much more.
Every metric exposed by `/proc`, `/sys`, and other Linux kernel sources.
-- [Containers](/docs/collect/container-metrics.md): Gather metrics from container agents, like `dockerd` or `kubectl`,
+- [Containers](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md): Gather metrics from container agents, like `dockerd` or `kubectl`,
along with the resource usage of containers and the applications they run.
-- [Applications](/docs/collect/application-metrics.md): Collect per-second metrics from web servers, databases, logs,
+- [Applications](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md): Collect per-second metrics from web servers, databases, logs,
message brokers, APM tools, email servers, and much more.
## Collector architecture and terminology
@@ -56,11 +60,11 @@ terms related to collecting metrics.
- **Modules** are a type of collector.
- **Orchestrators** are external plugins that run and manage one or more modules. They run as independent processes.
The Go orchestrator is in active development.
- - [go.d.plugin](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/): An orchestrator for data
+ - [go.d.plugin](https://github.com/netdata/go.d.plugin/blob/master/README.md): An orchestrator for data
collection modules written in `go`.
- - [python.d.plugin](/collectors/python.d.plugin/README.md): An orchestrator for data collection modules written in
+ - [python.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md): An orchestrator for data collection modules written in
`python` v2/v3.
- - [charts.d.plugin](/collectors/charts.d.plugin/README.md): An orchestrator for data collection modules written in
+ - [charts.d.plugin](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md): An orchestrator for data collection modules written in
`bash` v4+.
- **External plugins** gather metrics from external processes, such as a webserver or database, and run as independent
processes that communicate with the Netdata daemon via pipes.
@@ -69,10 +73,10 @@ terms related to collecting metrics.
## What's next?
-[Enable or configure a collector](/docs/collect/enable-configure.md) if the default settings are not compatible with
+[Enable or configure a collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) if the default settings are not compatible with
your infrastructure.
-See our [collectors reference](/collectors/REFERENCE.md) for detailed information on Netdata's collector architecture,
+See our [collectors reference](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md) for detailed information on Netdata's collector architecture,
troubleshooting a collector, developing a custom collector, and more.
diff --git a/docs/collect/system-metrics.md b/docs/collect/system-metrics.md
index ecd8dad70..442b13823 100644
--- a/docs/collect/system-metrics.md
+++ b/docs/collect/system-metrics.md
@@ -2,59 +2,62 @@
title: "Collect system metrics with Netdata"
sidebar_label: "System metrics"
description: "Netdata collects thousands of metrics from physical and virtual systems, IoT/edge devices, and containers with zero configuration."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/collect/system-metrics.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/docs/collect/system-metrics.md"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Concepts"
-->
# Collect system metrics with Netdata
Netdata collects thousands of metrics directly from the operating systems of physical and virtual systems, IoT/edge
-devices, and [containers](/docs/collect/container-metrics.md) with zero configuration.
+devices, and [containers](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md) with zero configuration.
To gather system metrics, Netdata uses roughly a dozen plugins, each of which has one or more collectors for very
specific metrics exposed by the host. The system metrics Netdata users interact with most for health monitoring and
performance troubleshooting are collected and visualized by `proc.plugin`, `cgroups.plugin`, and `ebpf.plugin`.
-[**proc.plugin**](/collectors/proc.plugin/README.md) gathers metrics from the `/proc` and `/sys` folders in Linux
+[**proc.plugin**](https://github.com/netdata/netdata/blob/master/collectors/proc.plugin/README.md) gathers metrics from the `/proc` and `/sys` folders in Linux
systems, along with a few other endpoints, and is responsible for the bulk of the system metrics collected and
visualized by Netdata. It collects CPU, memory, disks, load, networking, mount points, and more with zero configuration.
It even allows Netdata to monitor its own resource utilization!
-[**cgroups.plugin**](/collectors/cgroups.plugin/README.md) collects rich metrics about containers and virtual machines
+[**cgroups.plugin**](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md) collects rich metrics about containers and virtual machines
using the virtual files under `/sys/fs/cgroup`. By reading cgroups, Netdata can instantly collect resource utilization
metrics for systemd services, all containers (Docker, LXC, LXD, Libvirt, systemd-nspawn), and more. Learn more in the
-[collecting container metrics](/docs/collect/container-metrics.md) doc.
+[collecting container metrics](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md) doc.
-[**ebpf.plugin**](/collectors/ebpf.plugin/README.md): Netdata's extended Berkeley Packet Filter (eBPF) collector
+[**ebpf.plugin**](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md): Netdata's extended Berkeley Packet Filter (eBPF) collector
monitors Linux kernel-level metrics for file descriptors, virtual filesystem IO, and process management. You can use our
eBPF collector to analyze how and when a process accesses files, when it makes system calls, whether it leaks memory or
creating zombie processes, and more.
While the above plugins and associated collectors are the most important for system metrics, there are many others. You
-can find all system collectors in our [supported collectors list](/collectors/COLLECTORS.md#system-collectors).
+can find all system collectors in our [supported collectors list](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md#system-collectors).
## Collect Windows system metrics
Netdata is also capable of monitoring Windows systems. The [WMI
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi) integrates with
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/wmi/README.md) integrates with
[windows_exporter](https://github.com/prometheus-community/windows_exporter), a small Go-based binary that you can run
on Windows systems. The WMI collector then gathers metrics from an endpoint created by windows_exporter, for more
-details see [the requirements](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi#requirements).
+details see [the requirements](https://github.com/netdata/go.d.plugin/blob/master/modules/wmi/README.md#requirements).
Next, [configure the WMI
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/wmi#configuration) to point to the URL
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/wmi/README.md#configuration) to point to the URL
and port of your exposed endpoint. Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system. You'll start seeing Windows system metrics, such as CPU
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. You'll start seeing Windows system metrics, such as CPU
utilization, memory, bandwidth per NIC, number of processes, and much more.
For information about collecting metrics from applications _running on Windows systems_, see the [application metrics
-doc](/docs/collect/application-metrics.md#collect-metrics-from-applications-running-on-windows).
+doc](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md#collect-metrics-from-applications-running-on-windows).
## What's next?
-Because there's some overlap between system metrics and [container metrics](/docs/collect/container-metrics.md), you
+Because there's some overlap between system metrics and [container metrics](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md), you
should investigate Netdata's container compatibility if you use them heavily in your infrastructure.
-If you don't use containers, skip ahead to collecting [application metrics](/docs/collect/application-metrics.md) with
+If you don't use containers, skip ahead to collecting [application metrics](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md) with
Netdata.
diff --git a/docs/configure/common-changes.md b/docs/configure/common-changes.md
index 93b12d226..e1dccfceb 100644
--- a/docs/configure/common-changes.md
+++ b/docs/configure/common-changes.md
@@ -1,7 +1,11 @@
# Common configuration changes
@@ -10,19 +14,24 @@ The Netdata Agent requires no configuration upon installation to collect thousan
systems, containers, and applications, but there are hundreds of settings to tweak if you want to exercise more control
over your monitoring platform.
-This document assumes familiarity with using [`edit-config`](/docs/configure/nodes.md) from the Netdata config
+This document assumes familiarity with
+using [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) from the Netdata config
directory.
## Change dashboards and visualizations
-The Netdata Agent's [local dashboard](/web/gui/README.md), accessible at `http://NODE:19999` is highly configurable. If
-you use Netdata Cloud for [infrastructure monitoring](/docs/quickstart/infrastructure.md), you will see many of these
+The Netdata Agent's [local dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md), accessible
+at `http://NODE:19999` is highly configurable. If
+you use Netdata Cloud
+for [infrastructure monitoring](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md), you
+will see many of these
changes reflected in those visualizations due to the way Netdata Cloud proxies metric data and metadata to your browser.
### Increase the long-term metrics retention period
-Increase the values for the `page cache size` and `dbengine multihost disk space` settings in the [`[global]`
-section](/daemon/config/README.md#global-section-options) of `netdata.conf`.
+Increase the values for the `page cache size` and `dbengine multihost disk space` settings in
+the [`[global]`section](https://github.com/netdata/netdata/blob/master/daemon/config/README.md#global-section-options)
+of `netdata.conf`.
```conf
[global]
@@ -30,13 +39,17 @@ section](/daemon/config/README.md#global-section-options) of `netdata.conf`.
dbengine multihost disk space = 4096 # 4GiB of disk space for metrics storage
```
-Read our doc on [increasing long-term metrics storage](/docs/store/change-metrics-storage.md) for details, including a
-[calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics)
+Read our doc
+on [increasing long-term metrics storage](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md)
+for details, including a
+[calculator](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics)
to help you determine the exact settings for your desired retention period.
### Reduce the data collection frequency
-Change `update every` in the [`[global]` section](/daemon/config/README.md#global-section-options) of `netdata.conf` so
+Change `update every` in
+the [`[global]` section](https://github.com/netdata/netdata/blob/master/daemon/config/README.md#global-section-options)
+of `netdata.conf` so
that it is greater than `1`. An `update every` of `5` means the Netdata Agent enforces a _minimum_ collection frequency
of 5 seconds.
@@ -47,12 +60,15 @@ of 5 seconds.
Every collector and plugin has its own `update every` setting, which you can also change in the `go.d.conf`,
`python.d.conf` or `charts.d.conf` files, or in individual collector configuration files. If the `update
-every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See the [enable
-or configure a collector](/docs/collect/enable-configure.md) doc for details.
+every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See
+the [enable or configure a collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md)
+doc for details.
### Disable a collector or plugin
-Turn off entire plugins in the [`[plugins]` section](/daemon/config/README.md#plugins-section-options) of
+Turn off entire plugins in
+the [`[plugins]` section](https://github.com/netdata/netdata/blob/master/daemon/config/README.md#plugins-section-options)
+of
`netdata.conf`.
To disable specific collectors, open `go.d.conf`, `python.d.conf` or `charts.d.conf` and find the line
@@ -77,17 +93,20 @@ sudo ./edit-config health.d/example-alarm.conf
Or, append your new alarm to an existing file by editing a relevant existing file in the `health.d/` directory.
-Read more about [configuring alarms](/docs/monitor/configure-alarms.md) to get started, and see the [health monitoring
-reference](/health/REFERENCE.md) for a full listing of options available in health entities.
+Read more about [configuring alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) to
+get started, and see
+the [health monitoring reference](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md) for a full listing
+of options available in health entities.
### Configure a specific alarm
Tweak existing alarms by editing files in the `health.d/` directory. For example, edit `health.d/cpu.conf` to change how
the Agent responds to anomalies related to CPU utilization.
-To see which configuration file you need to edit to configure a specific alarm, [view your active
-alarms](/docs/monitor/view-active-alarms.md) in Netdata Cloud or the local Agent dashboard and look for the **source**
-line. For example, it might read `source 4@/usr/lib/netdata/conf.d/health.d/cpu.conf`.
+To see which configuration file you need to edit to configure a specific
+alarm, [view your active alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/view-active-alarms.md) in
+Netdata Cloud or the local Agent dashboard and look for the **source** line. For example, it might
+read `source 4@/usr/lib/netdata/conf.d/health.d/cpu.conf`.
Because the source path contains `health.d/cpu.conf`, run `sudo edit-config health.d/cpu.conf` to configure that alarm.
@@ -106,13 +125,16 @@ template: disk_fill_rate
### Turn of all alarms and notifications
-Set `enabled` to `no` in the [`[health]` section](/daemon/config/README.md#health-section-options) section of
+Set `enabled` to `no` in
+the [`[health]` section](https://github.com/netdata/netdata/blob/master/daemon/config/README.md#health-section-options)
+section of
`netdata.conf`.
### Enable alarm notifications
Open `health_alarm_notify.conf` for editing. First, read the [enabling
-notifications](/docs/monitor/enable-notifications.md#netdata-agent) doc for an example of the process using Slack, then
+notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md#netdata-agent) doc
+for an example of the process using Slack, then
click on the link to your preferred notification method to find documentation for that specific endpoint.
## Improve node security
@@ -120,14 +142,17 @@ click on the link to your preferred notification method to find documentation fo
While the Netdata Agent is both [open and secure by design](https://www.netdata.cloud/blog/netdata-agent-dashboard/), we
recommend every user take some action to administer and secure their nodes.
-Learn more about a few of the following changes in the [node security doc](/docs/configure/secure-nodes.md).
+Learn more about a few of the following changes in
+the [node security doc](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md).
### Disable the local Agent dashboard (`http://NODE:19999`)
If you use Netdata Cloud to visualize metrics, stream metrics to a parent node, or otherwise don't need the local Agent
dashboard, disabling it reduces the Agent's resource utilization and improves security.
-Change the `mode` setting to `none` in the [`[web]` section](/web/server/README.md#configuration) of `netdata.conf`.
+Change the `mode` setting to `none` in
+the [`[web]` section](https://github.com/netdata/netdata/blob/master/web/server/README.md#configuration)
+of `netdata.conf`.
```conf
[web]
@@ -136,11 +161,12 @@ Change the `mode` setting to `none` in the [`[web]` section](/web/server/README.
### Use access lists to restrict access to specific assets
-Allow access from only specific IP addresses, ranges of IP addresses, or hostnames using [access
-lists](/web/server/README.md#access-lists) and [simple patterns](/libnetdata/simple_pattern/README.md).
+Allow access from only specific IP addresses, ranges of IP addresses, or hostnames
+using [access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists)
+and [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md).
See a quickstart to access lists in the [node security
-doc](/docs/configure/secure-nodes.md#restrict-access-to-the-local-dashboard).
+doc](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md#restrict-access-to-the-local-dashboard).
### Stop sending anonymous statistics to Google Analytics
@@ -151,7 +177,8 @@ the statistics script.
sudo touch .opt-out-from-anonymous-statistics
```
-Learn more about [why we collect anonymous statistics](/docs/anonymous-statistics.md).
+Learn more
+about [why we collect anonymous statistics](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md).
### Change the IP address/port Netdata listens to
@@ -162,26 +189,30 @@ Change the `default port` setting in the `[web]` section to a port other than `1
default port = 39999
```
-Use the `bind to` setting to the ports other assets, such as the [running `netdata.conf`
-configuration](/docs/configure/nodes.md#see-an-agents-running-configuration), API, or streaming requests listen to.
+Use the `bind to` setting to the ports other assets, such as
+the [running `netdata.conf` configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#see-an-agents-running-configuration),
+API, or streaming requests listen to.
## Reduce resource usage
-Read our [performance optimization guide](/docs/guides/configure/performance.md) for a long list of specific changes
+Read
+our [performance optimization guide](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md)
+for a long list of specific changes
that can reduce the Netdata Agent's CPU/memory footprint and IO requirements.
## Organize nodes with host labels
Beginning with v1.20, Netdata accepts user-defined **host labels**. These labels are sent during streaming, exporting,
and as metadata to Netdata Cloud, and help you organize the metrics coming from complex infrastructure. Host labels are
-defined in the section `[host labels]`.
+defined in the section `[host labels]`.
-For a quick introduction, read the [host label guide](/docs/guides/using-host-labels.md).
+For a quick introduction, read
+the [host label guide](https://github.com/netdata/netdata/blob/master/docs/guides/using-host-labels.md).
-The following restrictions apply to host label names:
-
-- Names cannot start with `_`, but it can be present in other parts of the name.
-- Names only accept alphabet letters, numbers, dots, and dashes.
+The following restrictions apply to host label names:
+
+- Names cannot start with `_`, but it can be present in other parts of the name.
+- Names only accept alphabet letters, numbers, dots, and dashes.
The policy for values is more flexible, but you can not use exclamation marks (`!`), whitespaces (` `), single quotes
(`'`), double quotes (`"`), or asterisks (`*`), because they are used to compare label values in health alarms and
@@ -189,26 +220,33 @@ templates.
## What's next?
-If you haven't already, learn how to [secure your nodes](/docs/configure/secure-nodes.md).
+If you haven't already, learn how
+to [secure your nodes](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md).
-As mentioned at the top, there are plenty of other
+As mentioned at the top, there are plenty of other
You can also take what you've learned about node configuration to tweak the Agent's behavior or enable new features:
-- [Enable new collectors](/docs/collect/enable-configure.md) or tweak their behavior.
-- [Configure existing health alarms](/docs/monitor/configure-alarms.md) or create new ones.
-- [Enable notifications](/docs/monitor/enable-notifications.md) to receive updates about the health of your
+- [Enable new collectors](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) or tweak
+ their behavior.
+- [Configure existing health alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) or
+ create new ones.
+- [Enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to receive
+ updates about the health of your
infrastructure.
-- Change [the long-term metrics retention period](/docs/store/change-metrics-storage.md) using the database engine.
+-
+
+Change [the long-term metrics retention period](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md)
+using the database engine.
### Related reference documentation
-- [Netdata Agent · Daemon](/health/README.md)
-- [Netdata Agent · Daemon configuration](/daemon/config/README.md)
-- [Netdata Agent · Web server](/web/server/README.md)
-- [Netdata Agent · Local Agent dashboard](/web/gui/README.md)
-- [Netdata Agent · Health monitoring](/health/REFERENCE.md)
-- [Netdata Agent · Notifications](/health/notifications/README.md)
-- [Netdata Agent · Simple patterns](/libnetdata/simple_pattern/README.md)
+- [Netdata Agent · Daemon](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Netdata Agent · Daemon configuration](https://github.com/netdata/netdata/blob/master/daemon/config/README.md)
+- [Netdata Agent · Web server](https://github.com/netdata/netdata/blob/master/web/server/README.md)
+- [Netdata Agent · Local Agent dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md)
+- [Netdata Agent · Health monitoring](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md)
+- [Netdata Agent · Notifications](https://github.com/netdata/netdata/blob/master/health/notifications/README.md)
+- [Netdata Agent · Simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md)
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fcommon-changes&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/nodes.md b/docs/configure/nodes.md
index 841419a72..8f54b1bfb 100644
--- a/docs/configure/nodes.md
+++ b/docs/configure/nodes.md
@@ -1,7 +1,11 @@
# Configure the Netdata Agent
@@ -19,7 +23,7 @@ anomaly, or change in infrastructure affects how their Agents should perform.
## The Netdata config directory
On most Linux systems, using our [recommended one-line
-installation](/docs/get-started.mdx#install-on-linux-with-one-line-installer), the **Netdata config
+installation](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx#install-on-linux-with-one-line-installer), the **Netdata config
directory** is `/etc/netdata/`. The config directory contains several configuration files with the `.conf` extension, a
few directories, and a shell script named `edit-config`.
@@ -37,23 +41,23 @@ these files in your own Netdata config directory, as the next section describes
exist.
- `netdata.conf` is the main configuration file. This is where you'll find most configuration options. Read descriptions
- for each in the [daemon config](/daemon/config/README.md) doc.
+ for each in the [daemon config](https://github.com/netdata/netdata/blob/master/daemon/config/README.md) doc.
- `edit-config` is a shell script used for [editing configuration files](#use-edit-config-to-edit-configuration-files).
- Various configuration files ending in `.conf` for [configuring plugins or
- collectors](/docs/collect/enable-configure.md#enable-a-collector-or-its-orchestrator) behave. Examples: `go.d.conf`,
+ collectors](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md#enable-a-collector-or-its-orchestrator) behave. Examples: `go.d.conf`,
`python.d.conf`, and `ebpf.d.conf`.
- Various directories ending in `.d`, which contain other configuration files, each ending in `.conf`, for [configuring
- specific collectors](/docs/collect/enable-configure.md#configure-a-collector).
+ specific collectors](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md#configure-a-collector).
- `apps_groups.conf` is a configuration file for changing how applications/processes are grouped when viewing the
- **Application** charts from [`apps.plugin`](/collectors/apps.plugin/README.md) or
- [`ebpf.plugin`](/collectors/ebpf.plugin/README.md).
-- `health.d/` is a directory that contains [health configuration files](/docs/monitor/configure-alarms.md).
-- `health_alarm_notify.conf` enables and configures [alarm notifications](/docs/monitor/enable-notifications.md).
-- `statsd.d/` is a directory for configuring Netdata's [statsd collector](/collectors/statsd.plugin/README.md).
-- `stream.conf` configures [parent-child streaming](/streaming/README.md) between separate nodes running the Agent.
+ **Application** charts from [`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) or
+ [`ebpf.plugin`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md).
+- `health.d/` is a directory that contains [health configuration files](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md).
+- `health_alarm_notify.conf` enables and configures [alarm notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md).
+- `statsd.d/` is a directory for configuring Netdata's [statsd collector](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md).
+- `stream.conf` configures [parent-child streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) between separate nodes running the Agent.
- `.environment` is a hidden file that describes the environment in which the Netdata Agent is installed, including the
- `PATH` and any installation options. Useful for [reinstalling](/packaging/installer/REINSTALL.md) or
- [uninstalling](/packaging/installer/UNINSTALL.md) the Agent.
+ `PATH` and any installation options. Useful for [reinstalling](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md) or
+ [uninstalling](https://github.com/netdata/netdata/blob/master/packaging/installer/UNINSTALL.md) the Agent.
The Netdata config directory also contains one symlink:
@@ -63,7 +67,7 @@ The Netdata config directory also contains one symlink:
## Configure a Netdata docker container
-See [configure agent containers](/packaging/docker/README.md#configure-agent-containers).
+See [configure agent containers](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md#configure-agent-containers).
## Use `edit-config` to edit configuration files
@@ -103,7 +107,7 @@ method for `edit-config` to write into the config directory. Use your `$EDITOR`,
> defaulted to `vim` or `nano`. Use `export EDITOR=` to change this temporarily, or edit your shell configuration file
> to change to permanently.
-After you make your changes, you need to [restart the Agent](/docs/configure/start-stop-restart.md) with `sudo systemctl
+After you make your changes, you need to [restart the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with `sudo systemctl
restart netdata` or the appropriate method for your system.
Here's an example of editing the node's hostname, which appears in both the local dashboard and in Netdata Cloud.
@@ -145,26 +149,26 @@ curl -o /etc/netdata/netdata.conf http://NODE:19999/netdata.conf
## What's next?
-Learn more about [starting, stopping, or restarting](/docs/configure/start-stop-restart.md) the Netdata daemon to apply
+Learn more about [starting, stopping, or restarting](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) the Netdata daemon to apply
configuration changes.
-Apply some [common configuration changes](/docs/configure/common-changes.md) to quickly tweak the Agent's behavior.
+Apply some [common configuration changes](https://github.com/netdata/netdata/blob/master/docs/configure/common-changes.md) to quickly tweak the Agent's behavior.
-[Add security to your node](/docs/configure/secure-nodes.md) with what you've learned about the Netdata config directory
+[Add security to your node](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md) with what you've learned about the Netdata config directory
and `edit-config`. We put together a few security best practices based on how you use the Netdata.
You can also take what you've learned about node configuration to enable or enhance features:
-- [Enable new collectors](/docs/collect/enable-configure.md) or tweak their behavior.
-- [Configure existing health alarms](/docs/monitor/configure-alarms.md) or create new ones.
-- [Enable notifications](/docs/monitor/enable-notifications.md) to receive updates about the health of your
+- [Enable new collectors](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) or tweak their behavior.
+- [Configure existing health alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) or create new ones.
+- [Enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to receive updates about the health of your
infrastructure.
-- Change [the long-term metrics retention period](/docs/store/change-metrics-storage.md) using the database engine.
+- Change [the long-term metrics retention period](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) using the database engine.
### Related reference documentation
-- [Netdata Agent · Daemon](/daemon/README.md)
-- [Netdata Agent · Health monitoring](/health/README.md)
-- [Netdata Agent · Notifications](/health/notifications/README.md)
+- [Netdata Agent · Daemon](https://github.com/netdata/netdata/blob/master/daemon/README.md)
+- [Netdata Agent · Health monitoring](https://github.com/netdata/netdata/blob/master/health/README.md)
+- [Netdata Agent · Notifications](https://github.com/netdata/netdata/blob/master/health/notifications/README.md)
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fnodes&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/secure-nodes.md b/docs/configure/secure-nodes.md
index 02057ab9e..75bf6fd36 100644
--- a/docs/configure/secure-nodes.md
+++ b/docs/configure/secure-nodes.md
@@ -1,7 +1,11 @@
# Secure your nodes
@@ -11,13 +15,13 @@ internet at large, anyone can access the dashboard and your node's metrics at `h
so that the local dashboard was immediately accessible to users, and so that we don't dictate how professionals set up
and secure their infrastructures.
-Despite this design decision, your [data](/docs/netdata-security.md#your-data-is-safe-with-netdata) and your
-[systems](/docs/netdata-security.md#your-systems-are-safe-with-netdata) are safe with Netdata. Netdata is read-only,
+Despite this design decision, your [data](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#your-data-is-safe-with-netdata) and your
+[systems](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#your-systems-are-safe-with-netdata) are safe with Netdata. Netdata is read-only,
cannot do anything other than present metrics, and runs without special/`sudo` privileges. Also, the local dashboard
only exposes chart metadata and metric values, not raw data.
While Netdata is secure by design, we believe you should [protect your
-nodes](/docs/netdata-security.md#why-netdata-should-be-protected). If left accessible to the internet at large, the
+nodes](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#why-netdata-should-be-protected). If left accessible to the internet at large, the
local dashboard could reveal sensitive information about your infrastructure. For example, an attacker can view which
applications you run (databases, webservers, and so on), or see every user account on a node.
@@ -37,7 +41,7 @@ that align with your goals and your organization's standards.
This is the _recommended method for those who have connected their nodes to Netdata Cloud_ and prefer viewing real-time
metrics using the War Room Overview, Nodes view, and Cloud dashboards.
-You can disable the local dashboard (and API) but retain the encrypted Agent-Cloud link ([ACLK](/aclk/README.md)) that
+You can disable the local dashboard (and API) but retain the encrypted Agent-Cloud link ([ACLK](https://github.com/netdata/netdata/blob/master/aclk/README.md)) that
allows you to stream metrics on demand from your nodes via the Netdata Cloud interface. This change mitigates all
concerns about revealing metrics and system design to the internet at large, while keeping all the functionality you
need to view metrics and troubleshoot issues with Netdata Cloud.
@@ -50,17 +54,17 @@ static-threaded` setting, and change it to `none`.
mode = none
```
-Save and close the editor, then [restart your Agent](/docs/configure/start-stop-restart.md) using `sudo systemctl
+Save and close the editor, then [restart your Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) using `sudo systemctl
restart netdata`. If you try to visit the local dashboard to `http://NODE:19999` again, the connection will fail because
that node no longer serves its local dashboard.
-> See the [configuration basics doc](/docs/configure/nodes.md) for details on how to find `netdata.conf` and use
+> See the [configuration basics doc](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) for details on how to find `netdata.conf` and use
> `edit-config`.
## Restrict access to the local dashboard
If you want to keep using the local dashboard, but don't want it exposed to the internet, you can restrict access with
-[access lists](/web/server/README.md#access-lists). This method also fully retains the ability to stream metrics
+[access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists). This method also fully retains the ability to stream metrics
on-demand through Netdata Cloud.
The `allow connections from` setting helps you allow only certain IP addresses or FQDN/hostnames, such as a trusted
@@ -68,7 +72,7 @@ static IP, only `localhost`, or connections from behind a management LAN.
By default, this setting is `localhost *`. This setting allows connections from `localhost` in addition to _all_
connections, using the `*` wildcard. You can change this setting using Netdata's [simple
-patterns](/libnetdata/simple_pattern/README.md).
+patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md).
```conf
[web]
@@ -95,8 +99,8 @@ The `allow connections from` setting is global and restricts access to the dashb
allow management from = localhost
```
-See the [web server](/web/server/README.md#access-lists) docs for additional details about access lists. You can take
-access lists one step further by [enabling SSL](/web/server/README.md#enabling-tls-support) to encrypt data from local
+See the [web server](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists) docs for additional details about access lists. You can take
+access lists one step further by [enabling SSL](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) to encrypt data from local
dashboard in transit. The connection to Netdata Cloud is always secured with TLS.
## Use a reverse proxy
@@ -106,18 +110,18 @@ local dashboard and Netdata Cloud dashboards. You can use a reverse proxy to pas
enable HTTPS to encrypt metadata and metric values in transit.
We recommend Nginx, as it's what we use for our [demo server](https://london.my-netdata.io/), and we have a guide
-dedicated to [running Netdata behind Nginx](/docs/Running-behind-nginx.md).
+dedicated to [running Netdata behind Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md).
-We also have guides for [Apache](/docs/Running-behind-apache.md), [Lighttpd](/docs/Running-behind-lighttpd.md),
-[HAProxy](/docs/Running-behind-haproxy.md), and [Caddy](/docs/Running-behind-caddy.md).
+We also have guides for [Apache](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md), [Lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md),
+[HAProxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-haproxy.md), and [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md).
## What's next?
-Read about [Netdata's security design](/docs/netdata-security.md) and our [blog
+Read about [Netdata's security design](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md) and our [blog
post](https://www.netdata.cloud/blog/netdata-agent-dashboard/) about why the local Agent dashboard is both open and
secure by design.
-Next up, learn about [collectors](/docs/collect/how-collectors-work.md) to ensure you're gathering every essential
+Next up, learn about [collectors](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md) to ensure you're gathering every essential
metric about your node, its applications, and your infrastructure at large.
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fsecure-nodesa&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/configure/start-stop-restart.md b/docs/configure/start-stop-restart.md
index 4967fff08..3c04777da 100644
--- a/docs/configure/start-stop-restart.md
+++ b/docs/configure/start-stop-restart.md
@@ -1,12 +1,16 @@
# Start, stop, or restart the Netdata Agent
-When you install the Netdata Agent, the [daemon](/daemon/README.md) is configured to start at boot and stop and
+When you install the Netdata Agent, the [daemon](https://github.com/netdata/netdata/blob/master/daemon/README.md) is configured to start at boot and stop and
restart/shutdown.
You will most often need to _restart_ the Agent to load new or editing configuration files. [Health
@@ -40,7 +44,7 @@ If you start the daemon this way, close it with `sudo killall netdata`.
## Using `netdatacli`
-The Netdata Agent also comes with a [CLI tool](/cli/README.md) capable of performing shutdowns. Start the Agent back up
+The Netdata Agent also comes with a [CLI tool](https://github.com/netdata/netdata/blob/master/cli/README.md) capable of performing shutdowns. Start the Agent back up
using your preferred method listed above.
```bash
@@ -80,19 +84,19 @@ again with `service netdata start`, or the appropriate method for your system.
## What's next?
-Learn more about [securing the Netdata Agent](/docs/configure/secure-nodes.md).
+Learn more about [securing the Netdata Agent](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md).
You can also use the restart/reload methods described above to enable new features:
-- [Enable new collectors](/docs/collect/enable-configure.md) or tweak their behavior.
-- [Configure existing health alarms](/docs/monitor/configure-alarms.md) or create new ones.
-- [Enable notifications](/docs/monitor/enable-notifications.md) to receive updates about the health of your
+- [Enable new collectors](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) or tweak their behavior.
+- [Configure existing health alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) or create new ones.
+- [Enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to receive updates about the health of your
infrastructure.
-- Change [the long-term metrics retention period](/docs/store/change-metrics-storage.md) using the database engine.
+- Change [the long-term metrics retention period](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) using the database engine.
### Related reference documentation
-- [Netdata Agent · Daemon](/daemon/README.md)
-- [Netdata Agent · Netdata CLI](/cli/README.md)
+- [Netdata Agent · Daemon](https://github.com/netdata/netdata/blob/master/daemon/README.md)
+- [Netdata Agent · Netdata CLI](https://github.com/netdata/netdata/blob/master/cli/README.md)
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fconfigure%2Fstart-stop-restart&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/contributing/contributing-documentation.md b/docs/contributing/contributing-documentation.md
index 68b861d40..da28272b4 100644
--- a/docs/contributing/contributing-documentation.md
+++ b/docs/contributing/contributing-documentation.md
@@ -18,7 +18,7 @@ The Netdata team aggregates and publishes all documentation at [learn.netdata.cl
## Before you get started
Anyone interested in contributing to documentation should first read the [Netdata style
-guide](/docs/contributing/style-guide.md) and the [Netdata Community Code of Conduct](https://learn.netdata.cloud/contribute/code-of-conduct).
+guide](https://github.com/netdata/netdata/blob/master/docs/contributing/style-guide.md) and the [Netdata Community Code of Conduct](https://github.com/netdata/.github/blob/main/CODE_OF_CONDUCT.md).
Netdata's documentation uses Markdown syntax. If you're not familiar with Markdown, read the [Mastering
Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub for the basics on creating
@@ -40,7 +40,7 @@ Netdata's documentation is separated into four sections.
- Published under the **Reference** section in the Netdata Learn sidebar.
- **Netdata Cloud reference**: Reference documentation for the closed-source Netdata Cloud web application.
- Stored in a private GitHub repository and not editable by the community.
- - Published at [`https://learn.netdata.cloud/docs/cloud`](https://learn.netdata.cloud/docs/cloud).
+ - Published at [`https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx`](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx).
- **Guides**: Solutions-based articles for users who want instructions on completing a specific complex task using the
Netdata Agent and/or Netdata Cloud.
- Stored in the [`/docs/guides` folder](https://github.com/netdata/netdata/tree/master/docs/guides) within the
@@ -59,7 +59,7 @@ fixes to a single document, such as fixing a typo or clarifying a confusing sent
Click on the **Edit this page** button on any published document on [Netdata Learn](https://learn.netdata.cloud). Each
page has two of these buttons: One beneath the table of contents, and another at the end of the document, which take you
-to GitHub's code editor. Make your suggested changes, keeping [Netdata style guide](/docs/contributing/style-guide.md)
+to GitHub's code editor. Make your suggested changes, keeping [Netdata style guide](https://github.com/netdata/netdata/blob/master/docs/contributing/style-guide.md)
in mind, and use *Preview changes** button to ensure your Markdown syntax works as expected.
Under the **Commit changes** header, write descriptive title for your requested change. Click the **Commit changes**
@@ -86,7 +86,7 @@ git clone https://github.com/YOUR-GITHUB-USERNAME/netdata.git
```
Create a new branch using `git checkout -b BRANCH-NAME`. Use your favorite text editor to make your changes, keeping the
-[Netdata style guide](/docs/contributing/style-guide.md) in mind. Add, commit, and push changes to your fork. When
+[Netdata style guide](https://github.com/netdata/netdata/blob/master/docs/contributing/style-guide.md) in mind. Add, commit, and push changes to your fork. When
you're finished, visit the [Netdata Agent Pull requests](https://github.com/netdata/netdata/pulls) to create a new pull
request based on the changes you made in the new branch of your fork.
diff --git a/docs/contributing/style-guide.md b/docs/contributing/style-guide.md
index 5ff61164d..7d1b86478 100644
--- a/docs/contributing/style-guide.md
+++ b/docs/contributing/style-guide.md
@@ -67,8 +67,8 @@ Netdata is a global company in every sense, with employees, contributors, and us
communicate in a way that is clear and easily understood by everyone.
Here are some guidelines, pointers, and questions to be aware of as you write to ensure your writing is universal. Some
-of these are expanded into individual sections in the [language, grammar, and
-mechanics](#language-grammar-and-mechanics) section below.
+of these are expanded into individual sections in
+the [language, grammar, and mechanics](#language-grammar-and-mechanics) section below.
- Would this language make sense to someone who doesn't work here?
- Could someone quickly scan this document and understand the material?
@@ -97,8 +97,8 @@ mechanics](#language-grammar-and-mechanics) section below.
To ensure Netdata's writing is clear, concise, and universal, we have established standards for language, grammar, and
certain writing mechanics. However, if you're writing about Netdata for an external publication, such as a guest blog
-post, follow that publication's style guide or standards, while keeping the [preferred spelling of Netdata
-terms](#netdata-specific-terms) in mind.
+post, follow that publication's style guide or standards, while keeping
+the [preferred spelling of Netdata terms](#netdata-specific-terms) in mind.
### Active voice
@@ -106,31 +106,32 @@ Active voice is more concise and easier to understand compared to passive voice.
the sentence is action. In passive voice, the subject is acted upon. A famous example of passive voice is the phrase
"mistakes were made."
-| | |
-|-----------------|---------------------------------------------------------------------------------------------|
-| Not recommended | When an alarm is triggered by a metric, a notification is sent by Netdata. |
-| **Recommended** | When a metric triggers an alarm, Netdata sends a notification to your preferred endpoint. |
+| | |
+|-----------------|-------------------------------------------------------------------------------------------|
+| Not recommended | When an alarm is triggered by a metric, a notification is sent by Netdata. |
+| **Recommended** | When a metric triggers an alarm, Netdata sends a notification to your preferred endpoint. |
### Second person
-Use the second person ("you") to give instructions or "talk" directly to users.
+Use the second person ("you") to give instructions or "talk" directly to users.
In these situations, avoid "we," "I," "let's," and "us," particularly in documentation. The "you" pronoun can also be
-implied, depending on your sentence structure.
+implied, depending on your sentence structure.
One valid exception is when a member of the Netdata team or community wants to write about said team or community.
-| | |
-|--------------------------------|-------------------------------------------------------------------------------------------|
-| Not recommended | To install Netdata, we should try the one-line installer... |
-| **Recommended** | To install Netdata, you should try the one-line installer... |
-| **Recommended**, implied "you" | To install Netdata, try the one-line installer... |
+| | |
+|--------------------------------|--------------------------------------------------------------|
+| Not recommended | To install Netdata, we should try the one-line installer... |
+| **Recommended** | To install Netdata, you should try the one-line installer... |
+| **Recommended**, implied "you" | To install Netdata, try the one-line installer... |
### "Easy" or "simple"
-Using words that imply the complexity of a task or feature goes against our policy of [universal
-communication](#universal-communication). If you claim that a task is easy and the reader struggles to complete it, you
-may inadvertently discourage them.
+Using words that imply the complexity of a task or feature goes against our policy
+of [universal communication](#universal-communication). If you claim that a task is easy and the reader struggles to
+complete it, you
+may inadvertently discourage them.
However, if you give users two options and want to relay that one option is genuinely less complex than another, be
specific about how and why.
@@ -163,11 +164,11 @@ See the [word list](#word-list) for spellings of specific words.
Follow the general [English standards](https://owl.purdue.edu/owl/general_writing/mechanics/help_with_capitals.html) for
capitalization. In summary:
-- Capitalize the first word of every new sentence.
-- Don't use uppercase for emphasis. (Netdata is the BEST!)
-- Capitalize the names of brands, software, products, and companies according to their official guidelines. (Netdata,
- Docker, Apache, NGINX)
-- Avoid camel case (NetData) or all caps (NETDATA).
+- Capitalize the first word of every new sentence.
+- Don't use uppercase for emphasis. (Netdata is the BEST!)
+- Capitalize the names of brands, software, products, and companies according to their official guidelines. (Netdata,
+ Docker, Apache, NGINX)
+- Avoid camel case (NetData) or all caps (NETDATA).
Whenever you refer to the company Netdata, Inc., or the open-source monitoring agent the company develops, capitalize
**Netdata**.
@@ -244,10 +245,10 @@ must reflect the _current state of [production](https://app.netdata.cloud).
Every link should clearly state its destination. Don't use words like "here" to describe where a link will take your
reader.
-| | |
-|-----------------|-------------------------------------------------------------------------------------------|
-| Not recommended | To install Netdata, click [here](/packaging/installer/README.md). |
-| **Recommended** | To install Netdata, read the [installation instructions](/packaging/installer/README.md). |
+| | |
+|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------|
+| Not recommended | To install Netdata, click [here](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md). |
+| **Recommended** | To install Netdata, read the [installation instructions](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md). |
Use links as often as required to provide necessary context. Blog posts and guides require less hyperlinks than
documentation. See the section on [linking between documentation](#linking-between-documentation) for guidance on the
@@ -268,7 +269,7 @@ and desired audience.
## Technical/Linux standards
Configuration or maintenance of the Netdata Agent requires some system administration skills, such as navigating
-directories, editing files, or starting/stopping/restarting services. Certain processes
+directories, editing files, or starting/stopping/restarting services. Certain processes
### Switching Linux users
@@ -302,16 +303,17 @@ Netdata Agent installation will have commands under the same paths. When applica
path, providing a recommendation or instructions on how to view the running configuration, which includes the correct
paths.
-For example, the [configuration](/docs/configure/nodes.md) doc first teaches users how to find the Netdata config
+For example, the [configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) doc first
+teaches users how to find the Netdata config
directory and navigate to it, then runs commands from the `/etc/netdata` path so that the instructions are more
universal.
Don't include full paths, beginning from the system's root (`/`), as these might not work on certain systems.
-| | |
-|-----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Not recommended | Use `edit-config` to edit Netdata's configuration: `sudo /etc/netdata/edit-config netdata.conf`. |
-| **Recommended** | Use `edit-config` to edit Netdata's configuration by first navigating to your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`, then running `sudo edit-config netdata.conf`. |
+| | |
+|-----------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Not recommended | Use `edit-config` to edit Netdata's configuration: `sudo /etc/netdata/edit-config netdata.conf`. |
+| **Recommended** | Use `edit-config` to edit Netdata's configuration by first navigating to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`, then running `sudo edit-config netdata.conf`. |
### `sudo`
@@ -371,8 +373,8 @@ Some documents, like the Ansible guide and others in the `/docs/guides` folder,
this case, replace `/docs` with `/img/seo`, and then rebuild the remainder of the path to the document in question. End
the path with `.png`. A member of the Netdata team will assist in creating the image when publishing the content.
-For example, here is the frontmatter for the guide about [deploying the Netdata Agent with
-Ansible](https://learn.netdata.cloud/guides/deploy/ansible).
+For example, here is the frontmatter for the guide
+about [deploying the Netdata Agent with Ansible](https://github.com/netdata/netdata/blob/master/docs/guides/deploy/ansible.md).
```markdown
# Visualization date and time controls
@@ -11,7 +15,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/dashboard/v
### Pick timeframes to visualize
-While [panning through time and zooming in/out](/docs/dashboard/interact-charts.mdx) from charts it is helpful when
+While [panning through time and zooming in/out](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx) from charts it is helpful when
you're looking a recent history, or want to do granular troubleshooting, what if you want to see metrics from 6 hours
ago? Or 6 days?
@@ -80,7 +84,7 @@ distributed in different timezones and they need to collaborate.
Our goal is to make it easier for you and your teams to troubleshoot based on your timezone preference and communicate easily
with varying timezones and timeframes without the need to be concerned about their specificity.
-![Timezon selector](https://user-images.githubusercontent.com/82235632/129209528-bc1d572d-4582-4142-aace-918287849499.png)
+
When you change the timezone all the date and time fields will be updated to be displayed according to the specified timezone, this goes from
charts to alerts information and across the Netdata Cloud.
@@ -99,23 +103,23 @@ beyond stored historical metrics, you'll see this message:
![Screenshot of reaching the end of historical metrics
storage](https://user-images.githubusercontent.com/1153921/114207597-63a23280-9911-11eb-863d-4d2f75b030b4.png)
-At any time, [configure the internal TSDB's storage capacity](/docs/store/change-metrics-storage.md) to expand your
+At any time, [configure the internal TSDB's storage capacity](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) to expand your
depth of historical metrics.
## What's next?
One useful next step after selecting a timeframe is [exporting the
-metrics](/docs/dashboard/import-export-print-snapshot.mdx) into a snapshot file, which can then be shared and imported
+metrics](https://github.com/netdata/netdata/blob/master/docs/dashboard/import-export-print-snapshot.mdx) into a snapshot file, which can then be shared and imported
into any other Netdata dashboard.
-There are also many ways to [customize](/docs/dashboard/customize.mdx) the standard dashboard experience, from changing
+There are also many ways to [customize](https://github.com/netdata/netdata/blob/master/docs/dashboard/customize.mdx) the standard dashboard experience, from changing
the theme to editing the text that accompanies every section of charts.
## Further reading & related information
- Dashboard
- - [How the dashboard works](/docs/dashboard/how-dashboard-works.mdx)
- - [Interact with charts](/docs/dashboard/interact-charts.mdx)
- - [Chart dimensions, contexts, and families](/docs/dashboard/dimensions-contexts-families.mdx)
- - [Import, export, and print a snapshot](/docs/dashboard/import-export-print-snapshot.mdx)
- - [Customize the standard dashboard](/docs/dashboard/customize.mdx)
+ - [How the dashboard works](https://github.com/netdata/netdata/blob/master/docs/dashboard/how-dashboard-works.mdx)
+ - [Interact with charts](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx)
+ - [Chart dimensions, contexts, and families](https://github.com/netdata/netdata/blob/master/docs/dashboard/dimensions-contexts-families.mdx)
+ - [Import, export, and print a snapshot](https://github.com/netdata/netdata/blob/master/docs/dashboard/import-export-print-snapshot.mdx)
+ - [Customize the standard dashboard](https://github.com/netdata/netdata/blob/master/docs/dashboard/customize.mdx)
diff --git a/docs/export/enable-connector.md b/docs/export/enable-connector.md
index a914a114a..28208e2f4 100644
--- a/docs/export/enable-connector.md
+++ b/docs/export/enable-connector.md
@@ -1,25 +1,31 @@
# Enable an exporting connector
Now that you found the right connector for your [external time-series
-database](/docs/export/external-databases.md#supported-databases), you can now enable the exporting engine and the
+database](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md#supported-databases), you can now enable the exporting engine and the
connector itself. We'll walk through the process of enabling the exporting engine itself, followed by two examples using
the OpenTSDB and Graphite connectors.
> When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
-> restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
+> restart its process_, not the entire
+> [database of long-term metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md).
Once you understand the process of enabling a connector, you can translate that knowledge to any other connector.
## Enable the exporting engine
-Use `edit-config` from your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) to open
-`exporting.conf`:
+Use `edit-config` from your
+[Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory)
+to open `exporting.conf`:
```bash
sudo ./edit-config exporting.conf
@@ -47,14 +53,16 @@ Use the following configuration as a starting point. Copy and paste it into `exp
Replace `my_opentsdb_http_instance` with an instance name of your choice, and change the `destination` setting to the IP
address or hostname of your OpenTSDB database.
-Restart your Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to begin exporting to your OpenTSDB database. The
+Restart your Agent with `sudo systemctl restart netdata`, or
+the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to begin exporting to your OpenTSDB
+database. The
Netdata Agent exports metrics _beginning from the time the process starts_, and because it exports as metrics are
collected, you should start seeing data in your external database after only a few seconds.
Any further configuration is optional, based on your needs and the configuration of your OpenTSDB database. See the
-[OpenTSDB connector doc](/exporting/opentsdb/README.md) and [exporting engine
-reference](/exporting/README.md#configuration) for details.
+[OpenTSDB connector doc](https://github.com/netdata/netdata/blob/master/exporting/opentsdb/README.md)
+and [exporting engine reference](https://github.com/netdata/netdata/blob/master/exporting/README.md#configuration) for
+details.
## Example: Enable the Graphite connector
@@ -69,27 +77,29 @@ Use the following configuration as a starting point. Copy and paste it into `exp
Replace `my_graphite_instance` with an instance name of your choice, and change the `destination` setting to the IP
address or hostname of your Graphite-supported database.
-Restart your Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to begin exporting to your Graphite-supported database.
+Restart your Agent with `sudo systemctl restart netdata`, or
+the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to begin exporting to your
+Graphite-supported database.
Because the Agent exports metrics as they're collected, you should start seeing data in your external database after
only a few seconds.
Any further configuration is optional, based on your needs and the configuration of your Graphite-supported database.
-See [exporting engine reference](/exporting/README.md#configuration) for details.
+See [exporting engine reference](https://github.com/netdata/netdata/blob/master/exporting/README.md#configuration) for
+details.
## What's next?
-If you want to further configure your exporting connectors, see the [exporting engine
-reference](/exporting/README.md#configuration).
+If you want to further configure your exporting connectors, see
+the [exporting engine reference](https://github.com/netdata/netdata/blob/master/exporting/README.md#configuration).
-For a comprehensive example of using the Graphite connector, read our guide: [_Export and visualize Netdata metrics in
-Graphite_](/docs/guides/export/export-netdata-metrics-graphite.md). Or, start [using host
-labels](/docs/guides/using-host-labels.md) on exported metrics.
+For a comprehensive example of using the Graphite connector, read our guide:
+[_Export and visualize Netdata metrics in Graphite_](https://github.com/netdata/netdata/blob/master/docs/guides/export/export-netdata-metrics-graphite.md). Or, start
+[using host labels](https://github.com/netdata/netdata/blob/master/docs/guides/using-host-labels.md) on exported metrics.
### Related reference documentation
-- [Exporting engine reference](/exporting/README.md)
-- [OpenTSDB connector](/exporting/opentsdb/README.md)
-- [Graphite connector](/exporting/graphite/README.md)
+- [Exporting engine reference](https://github.com/netdata/netdata/blob/master/exporting/README.md)
+- [OpenTSDB connector](https://github.com/netdata/netdata/blob/master/exporting/opentsdb/README.md)
+- [Graphite connector](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md)
diff --git a/docs/export/external-databases.md b/docs/export/external-databases.md
index a542e8ee7..00ca7410e 100644
--- a/docs/export/external-databases.md
+++ b/docs/export/external-databases.md
@@ -1,13 +1,17 @@
# Export metrics to external time-series databases
Netdata allows you to export metrics to external time-series databases with the [exporting
-engine](/exporting/README.md). This system uses a number of **connectors** to initiate connections to [more than
+engine](https://github.com/netdata/netdata/blob/master/exporting/README.md). This system uses a number of **connectors** to initiate connections to [more than
thirty](#supported-databases) supported databases, including InfluxDB, Prometheus, Graphite, ElasticSearch, and much
more.
@@ -18,55 +22,55 @@ Based on your needs and resources you allocated to your external time-series dat
that metrics are exported or export only certain charts with filtering. You can also choose whether metrics are exported
as-collected, a normalized average, or the sum/volume of metrics values over the configured interval.
-Exporting is an important part of Netdata's effort to be [interoperable](/docs/overview/netdata-monitoring-stack.md)
+Exporting is an important part of Netdata's effort to be [interoperable](https://github.com/netdata/netdata/blob/master/docs/overview/netdata-monitoring-stack.md)
with other monitoring software. You can use an external time-series database for long-term metrics retention, further
analysis, or correlation with other tools, such as application tracing.
## Supported databases
Netdata supports exporting metrics to the following databases through several
-[connectors](/exporting/README.md#features). Once you find the connector that works for your database, open its
-documentation and the [enabling a connector](/docs/export/enable-connector.md) doc for details on enabling it.
-
-- **AppOptics**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **AWS Kinesis**: [AWS Kinesis Data Streams](/exporting/aws_kinesis/README.md)
-- **Azure Data Explorer**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Azure Event Hubs**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Blueflood**: [Graphite](/exporting/graphite/README.md)
-- **Chronix**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Cortex**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **CrateDB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **ElasticSearch**: [Graphite](/exporting/graphite/README.md), [Prometheus remote
- write](/exporting/prometheus/remote_write/README.md)
-- **Gnocchi**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Google BigQuery**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Google Cloud Pub/Sub**: [Google Cloud Pub/Sub Service](/exporting/pubsub/README.md)
-- **Graphite**: [Graphite](/exporting/graphite/README.md), [Prometheus remote
- write](/exporting/prometheus/remote_write/README.md)
-- **InfluxDB**: [Graphite](/exporting/graphite/README.md), [Prometheus remote
- write](/exporting/prometheus/remote_write/README.md)
-- **IRONdb**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **JSON**: [JSON document databases](/exporting/json/README.md)
-- **Kafka**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **KairosDB**: [Graphite](/exporting/graphite/README.md), [OpenTSDB](/exporting/opentsdb/README.md)
-- **M3DB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **MetricFire**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **MongoDB**: [MongoDB](/exporting/mongodb/README.md)
-- **New Relic**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **OpenTSDB**: [OpenTSDB](/exporting/opentsdb/README.md), [Prometheus remote
- write](/exporting/prometheus/remote_write/README.md)
-- **PostgreSQL**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+[connectors](https://github.com/netdata/netdata/blob/master/exporting/README.md#features). Once you find the connector that works for your database, open its
+documentation and the [enabling a connector](https://github.com/netdata/netdata/blob/master/docs/export/enable-connector.md) doc for details on enabling it.
+
+- **AppOptics**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **AWS Kinesis**: [AWS Kinesis Data Streams](https://github.com/netdata/netdata/blob/master/exporting/aws_kinesis/README.md)
+- **Azure Data Explorer**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Azure Event Hubs**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Blueflood**: [Graphite](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md)
+- **Chronix**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Cortex**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **CrateDB**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **ElasticSearch**: [Graphite](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md), [Prometheus remote
+ write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Gnocchi**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Google BigQuery**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Google Cloud Pub/Sub**: [Google Cloud Pub/Sub Service](https://github.com/netdata/netdata/blob/master/exporting/pubsub/README.md)
+- **Graphite**: [Graphite](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md), [Prometheus remote
+ write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **InfluxDB**: [Graphite](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md), [Prometheus remote
+ write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **IRONdb**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **JSON**: [JSON document databases](https://github.com/netdata/netdata/blob/master/exporting/json/README.md)
+- **Kafka**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **KairosDB**: [Graphite](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md), [OpenTSDB](https://github.com/netdata/netdata/blob/master/exporting/opentsdb/README.md)
+- **M3DB**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **MetricFire**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **MongoDB**: [MongoDB](https://github.com/netdata/netdata/blob/master/exporting/mongodb/README.md)
+- **New Relic**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **OpenTSDB**: [OpenTSDB](https://github.com/netdata/netdata/blob/master/exporting/opentsdb/README.md), [Prometheus remote
+ write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **PostgreSQL**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
via [PostgreSQL Prometheus Adapter](https://github.com/CrunchyData/postgresql-prometheus-adapter)
-- **Prometheus**: [Prometheus scraper](/exporting/prometheus/README.md)
-- **TimescaleDB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md),
- [netdata-timescale-relay](/exporting/TIMESCALE.md)
-- **QuasarDB**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **SignalFx**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Splunk**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **TiKV**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Thanos**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **VictoriaMetrics**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
-- **Wavefront**: [Prometheus remote write](/exporting/prometheus/remote_write/README.md)
+- **Prometheus**: [Prometheus scraper](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md)
+- **TimescaleDB**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md),
+ [netdata-timescale-relay](https://github.com/netdata/netdata/blob/master/exporting/TIMESCALE.md)
+- **QuasarDB**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **SignalFx**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Splunk**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **TiKV**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Thanos**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **VictoriaMetrics**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
+- **Wavefront**: [Prometheus remote write](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md)
Can't find your preferred external time-series database? Ask our [community](https://community.netdata.cloud/) for
solutions, or file an [issue on
@@ -74,16 +78,16 @@ GitHub](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cne
## What's next?
-We recommend you read our document on [enabling a connector](/docs/export/enable-connector.md) to learn about the
+We recommend you read our document on [enabling a connector](https://github.com/netdata/netdata/blob/master/docs/export/enable-connector.md) to learn about the
process and discover important configuration options. If you would rather skip ahead, click on any of the above links to
connectors for their reference documentation, which outline any prerequisites to install for that connector, along with
connector-specific configuration options.
Read about one possible use case for exporting metrics in our guide: [_Export and visualize Netdata metrics in
-Graphite_](/docs/guides/export/export-netdata-metrics-graphite.md).
+Graphite_](https://github.com/netdata/netdata/blob/master/docs/guides/export/export-netdata-metrics-graphite.md).
### Related reference documentation
-- [Exporting engine reference](/exporting/README.md)
+- [Exporting engine reference](https://github.com/netdata/netdata/blob/master/exporting/README.md)
diff --git a/docs/get-started.mdx b/docs/get-started.mdx
index 892baa0ce..aa82e811b 100644
--- a/docs/get-started.mdx
+++ b/docs/get-started.mdx
@@ -1,67 +1,96 @@
----
-title: "Get started with Netdata"
+
+
+import { OneLineInstallWget, OneLineInstallCurl } from '@site/src/components/OneLineInstall/'
+import { InstallRegexLink, InstallBoxRegexLink } from '@site/src/components/InstallRegexLink/'
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
Netdata is a free and open-source (FOSS) monitoring agent that collects thousands of hardware and software metrics from
any physical or virtual system (we call them _nodes_). These metrics are organized in an easy-to-use and -navigate interface.
-Together with [Netdata Cloud](https://learn.netdata.cloud/docs/cloud), you can monitor your entire infrastructure in
+Together with [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx), you can monitor your entire infrastructure in
real time and troubleshoot problems that threaten the health of your nodes.
Netdata runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT devices. It
runs on Linux distributions (Ubuntu, Debian, CentOS, and more), container/microservice platforms (Kubernetes clusters,
Docker), and many other operating systems (FreeBSD, macOS), with no `sudo` required.
+To install Netdata in minutes on your platform:
+
+1. Sign up to https://app.netdata.cloud/
+2. You will be presented with an empty space, and a prompt to "Connect Nodes" with the install command for each platform
+3. Select the platform you want to install Netdata to, copy and paste the script into your node's terminal, and run it
+
+Upon installation completing successfully, you should be able to see the node live in your Netdata Space!
+
+Continue reading for more advanced instructions and installation options.
+
## Install on Linux with one-line installer
The **recommended** way to install Netdata on a Linux node (physical, virtual, container, IoT) is our one-line
-[kickstart script](/packaging/installer/methods/kickstart.md). This script automatically installs dependencies and
-builds Netdata from its source code.
+[kickstart script](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kickstart.md).
+This script automatically installs dependencies and builds Netdata from its source code.
-Copy the script, paste it into your node's terminal, and hit `Enter` to begin the installation process.
+To install, copy the script, paste it into your node's terminal, and hit `Enter` to begin the installation process.
-
+
+ wget>
+
+
+
+
+ curl>
+
+
+
+
+
+
+:::note
+If you plan to also Claim the node to Netdata Cloud,
+make sure to replace `YOUR_CLAIM_TOKEN` with the claim token of your space,
+and `YOUR_ROOM_ID` with the ID of the room you are willing to claim to.
+:::
Jump down to [what's next](#whats-next) to learn how to view your new dashboard and take your next steps monitoring and
troubleshooting with Netdata.
## Other installation options
-
-
+
-
-
-
-
-
-
+
## What's next?
@@ -73,35 +102,28 @@ Where you go from here is based on your use case, immediate needs, and experienc
### Dashboard
-Learn more about [how the dashboard works](/docs/dashboard/how-dashboard-works.mdx), or dive directly into the many ways
-to [interact with charts](/docs/dashboard/interact-charts.mdx).
+Learn more about [how the dashboard works](https://github.com/netdata/netdata/blob/master/docs/dashboard/how-dashboard-works.mdx), or dive directly into the many ways
+to [interact with charts](https://github.com/netdata/netdata/blob/master/docs/dashboard/interact-charts.mdx).
### Configuration
-Discover the recommended way to [configure Netdata's settings or behavior](/docs/configure/nodes.md) using our built-in
+Discover the recommended way to [configure Netdata's settings or behavior](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) using our built-in
`edit-config` script, then apply that knowledge to mission-critical tweaks, such as [changing how long Netdata stores
-metrics](/docs/store/change-metrics-storage.md).
+metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md).
### Data collection
If Netdata didn't autodetect all the hardware, containers, services, or applications running on your node, you should
-learn more about [how data collectors work](/docs/collect/how-collectors-work.md). If there's a [supported
-collector](/collectors/COLLECTORS.md) for metrics you need, [configure the collector](/docs/collect/enable-configure.md)
+learn more about [how data collectors work](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md). If there's a [supported
+collector](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) for metrics you need, [configure the collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md)
or read about its requirements to configure your endpoint to publish metrics in the correct format and endpoint.
### Alarms & notifications
Netdata comes with hundreds of preconfigured alarms, designed by our monitoring gurus in parallel with our open-source
-community, but you may want to [edit alarms](/docs/monitor/configure-alarms.md) or [enable
-notifications](/docs/monitor/enable-notifications.md) to customize your Netdata experience.
-
-### Need to monitor multiple nodes in one place?
+community, but you may want to [edit alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) or
+[enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to customize your Netdata experience.
-For robust multi-node monitoring from a single interface, consider [Netdata
-Cloud](https://learn.netdata.cloud/docs/cloud), which streams, aggregates, and visualizes metrics from any number of
-nodes. It's all the same out-of-the-box, zero-configuration functionality of the open-source monitoring agent, but for
-any number of distributed nodes, _entirely for free_.
+### Make your deployment production ready
-There is an alternative for those who aren't interested in using Netdata Cloud, albeit with some required configuration.
-Each node can [stream](/streaming/README.md) its metrics to any other node, and the default
-[registry](/registry/README.md) is configurable to create a private "network" of Netdata dashboards.
+Both [securing Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md) and [setting up replication](https://github.com/netdata/netdata/blob/master/streaming/README.md) are strongly recommended.
diff --git a/docs/getting-started/integrations.md b/docs/getting-started/integrations.md
new file mode 100644
index 000000000..9f38a67d0
--- /dev/null
+++ b/docs/getting-started/integrations.md
@@ -0,0 +1,12 @@
+
+
+This page is autogenerated, this is placeholder document
\ No newline at end of file
diff --git a/docs/getting-started/introduction.md b/docs/getting-started/introduction.md
new file mode 100644
index 000000000..1ace5e3a6
--- /dev/null
+++ b/docs/getting-started/introduction.md
@@ -0,0 +1,158 @@
+
+
+## What is Netdata ?
+
+Netdata is designed by system administrators, DevOps engineers, and developers to collect everything, help you visualize
+metrics, troubleshoot complex performance problems, and make data interoperable with the rest of your monitoring stack.
+
+You can install Netdata on most Linux distributions (Ubuntu, Debian, CentOS, and more), container platforms (Kubernetes
+clusters, Docker), and many other operating systems (FreeBSD).
+
+Netdata is:
+
+### Simple to deploy
+
+- **One-line deployment** for Linux distributions, plus support for Kubernetes/Docker infrastructures.
+- **Zero configuration and maintenance** required to collect thousands of metrics, every second, from the underlying
+ OS and running applications.
+- **Prebuilt charts and alarms** alert you to common anomalies and performance issues without manual configuration.
+- **Distributed storage** to simplify the cost and complexity of storing metrics data from any number of nodes.
+
+### Powerful and scalable
+
+- **1% CPU utilization, a few MB of RAM, and minimal disk I/O** to run the monitoring Agent on bare metal, virtual
+ machines, containers, and even IoT devices.
+- **Per-second granularity** for an unlimited number of metrics based on the hardware and applications you're running
+ on your nodes.
+- **Interoperable exporters** let you connect Netdata's per-second metrics with an existing monitoring stack and other
+ time-series databases.
+
+### Optimized for troubleshooting
+
+- **Visual anomaly detection** with a UI/UX that emphasizes the relationships between charts.
+- **Customizable dashboards** to pinpoint correlated metrics, respond to incidents, and help you streamline your
+ workflows.
+- **Distributed metrics in a centralized interface** to assist users or teams trace complex issues between distributed
+ nodes.
+
+### Secure by design
+
+- **Distributed data architecture** so fast and efficient, there’s no limit to the number of metrics you can follow.
+- Because your data is **stored at the edge**, security is ensured.
+-
+### Comparison with other monitoring solutions
+
+Netdata offers many benefits over the existing monitoring landscape, whether they're expensive SaaS products or other
+open-source tools.
+
+| Netdata | Others (open-source and commercial) |
+| :-------------------------------------------------------------- | :--------------------------------------------------------------- |
+| **High resolution metrics** (1s granularity) | Low resolution metrics (10s granularity at best) |
+| Collects **thousands of metrics per node** | Collects just a few metrics |
+| Fast UI optimized for **anomaly detection** | UI is good for just an abstract view |
+| **Long-term, autonomous storage** at one-second granularity | Centralized metrics in an expensive data lake at 10s granularity |
+| **Meaningful presentation**, to help you understand the metrics | You have to know the metrics before you start |
+| Install and get results **immediately** | Long sales process and complex installation process |
+| Use it for **troubleshooting** performance problems | Only gathers _statistics of past performance_ |
+| **Kills the console** for tracing performance issues | The console is always required for troubleshooting |
+| Requires **zero dedicated resources** | Require large dedicated resources |
+
+
+Netdata works with tons of applications, notifications platforms, and other time-series databases:
+
+- **300+ system, container, and application endpoints**: Collectors autodetect metrics from default endpoints and
+ immediately visualize them into meaningful charts designed for troubleshooting. See [everything we
+ support](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md).
+- **20+ notification platforms**: Netdata's health watchdog sends warning and critical alarms to your [favorite
+ platform](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to inform you of anomalies just seconds
+ after they affect your node.
+- **30+ external time-series databases**: Export resampled metrics as they're collected to other [local- and
+ Cloud-based databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md) for best-in-class
+ interoperability.
+
+
+## How it works
+
+Netdata is a highly efficient, highly modular, metrics management engine. Its lockless design makes it ideal for concurrent operations on the metrics.
+
+You can see a high level representation in the following diagram.
+
+![Diagram of Netdata's core functionality](https://user-images.githubusercontent.com/2662304/199225735-01a41cc5-c074-4fe2-b780-5f08e92c6769.png)
+
+And a higher level diagram in this one.
+
+![Diagram 2 of Netdata's core
+functionality](https://user-images.githubusercontent.com/1153921/95367248-5f755980-0889-11eb-827f-9b7aa02a556e.png)
+
+You can even visit this slightly dated [interactive infographic](https://my-netdata.io/infographic.html) and get lost in a rabbit hole.
+
+But the best way to get under the hood or in the steering wheel of our highly efficient, low-latency system (supporting multiple readers and one writer on each metric) is to read the rest of our docs, or just to jump in and [get started](app.netdata.com). But here's a good breakdown:
+
+### Netdata Agent
+
+Netdata's distributed monitoring Agent collects thousands of metrics from systems, hardware, and applications with zero configuration. It runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT devices.
+
+You can install Netdata on most Linux distributions (Ubuntu, Debian, CentOS, and more), container/microservice platforms (Kubernetes clusters, Docker), and many other operating systems (FreeBSD, macOS), with no sudo required.
+
+### Netdata Cloud
+Netdata Cloud is a web application that gives you real-time visibility for your entire infrastructure. With Netdata Cloud, you can view key metrics, insightful charts, and active alarms from all your nodes in a single web interface. When an anomaly strikes, seamlessly navigate to any node to troubleshoot and discover the root cause with the familiar Netdata dashboard.
+
+Netdata Cloud is free! You can add an entire infrastructure of nodes, invite all your colleagues, and visualize any number of metrics, charts, and alarms entirely for free.
+
+While Netdata Cloud offers a centralized method of monitoring your Agents, your metrics data is not stored or centralized in any way. Metrics data remains with your nodes and is only streamed to your browser, through Cloud, when you're viewing the Netdata Cloud interface.
+
+
+## Community
+
+Netdata is an inclusive open-source project and community. Please read our [Code of Conduct](https://github.com/netdata/.github/blob/main/CODE_OF_CONDUCT.md).
+
+Find most of the Netdata team in our [community forums](https://community.netdata.cloud). It's the best place to
+ask questions, find resources, and engage with passionate professionals. The team is also available and active in our [Discord](https://discord.com/invite/mPZ6WZKKG2) too.
+
+You can also find Netdata on:
+
+- [Twitter](https://twitter.com/linuxnetdata)
+- [YouTube](https://www.youtube.com/c/Netdata)
+- [Reddit](https://www.reddit.com/r/netdata/)
+- [LinkedIn](https://www.linkedin.com/company/netdata-cloud/)
+- [StackShare](https://stackshare.io/netdata)
+- [Product Hunt](https://www.producthunt.com/posts/netdata-monitoring-agent/)
+- [Repology](https://repology.org/metapackage/netdata/versions)
+- [Facebook](https://www.facebook.com/linuxnetdata/)
+
+## Contribute
+
+Contributions are the lifeblood of open-source projects. While we continue to invest in and improve Netdata, we need help to democratize monitoring!
+
+- Read our [Contributing Guide](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md), which contains all the information you need to contribute to Netdata, such as improving our documentation, engaging in the community, and developing new features. We've made it as frictionless as possible, but if you need help, just ping us on our community forums!
+- We have a whole category dedicated to contributing and extending Netdata on our [community forums](https://community.netdata.cloud/c/agent-development/9)
+- Found a bug? Open a [GitHub issue](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml&title=%5BBug%5D%3A+).
+- View our [Security Policy](https://github.com/netdata/netdata/security/policy).
+
+Package maintainers should read the guide on [building Netdata from source](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/source.md) for
+instructions on building each Netdata component from source and preparing a package.
+
+## License
+
+The Netdata Agent is an open source project distributed under [GPLv3+](https://github.com/netdata/netdata/blob/master/LICENSE). Netdata re-distributes other open-source tools and libraries. Please check the
+[third party licenses](https://github.com/netdata/netdata/blob/master/REDISTRIBUTED.md).
+
+## Is it any good?
+
+Yes.
+
+_When people first hear about a new product, they frequently ask if it is any good. A Hacker News user
+[remarked](https://news.ycombinator.com/item?id=3067434):_
+
+> Note to self: Starting immediately, all raganwald projects will have a “Is it any good?” section in the readme, and
+> the answer shall be “yes.".
+*******************************************************************************
diff --git a/docs/guidelines.md b/docs/guidelines.md
new file mode 100644
index 000000000..6c1c3ba7c
--- /dev/null
+++ b/docs/guidelines.md
@@ -0,0 +1,772 @@
+
+
+import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem';
+
+Welcome to our docs developer guidelines!
+
+This document will guide you to the process of contributing to our
+docs (**learn.netdata.cloud**)
+
+## Documentation architecture
+
+Netdata docs follows has two principals.
+
+1. Keep the documentation of each component _as close as you can to the codebase_
+2. Every component is analyzed via topic related docs.
+
+To this end:
+
+1. Documentation lives in every possible repo in the netdata organization. At the moment we contribute to:
+ - netdata/netdata
+ - netdata/learn (final site)
+ - netdata/go.d.plugin
+ - netdata/agent-service-discovery
+
+ In each of these repos you will find markdown files. These markdown files may or not be part of the final docs. You
+ understand what documents are part of the final docs in the following section:[_How to update documentation of
+ learn.netdata.cloud_](#how-to-update-documentation-of-learn-netdata-cloud)
+
+2. Netdata docs processes are inspired from
+ the [DITA 1.2 guidelines](http://docs.oasis-open.org/dita/v1.2/os/spec/archSpec/dita-1.2_technicalContent_overview.html)
+ for Technical content.
+
+## Topic types
+
+### Concepts
+
+A concept introduces a single feature or concept. A concept should answer the questions:
+
+- What is this?
+- Why would I use it?
+
+Concept topics:
+
+- Are abstract ideas
+- Explain meaning or benefit
+- Can stay when specifications change
+- Provide background information
+
+### Tasks
+
+Concept and reference topics exist to support tasks. _The goal for users … is not to understand a concept but to
+complete a task_. A task gives instructions for how to complete a procedure.
+
+Much of the uncertainty whether a topic is a concept or a reference disappears, when you have strong, solid task topics
+in place, furthermore topics directly address your users and their daily tasks and help them to get their job done. A
+task **must give an answer** to the **following questions**:
+
+- How do I create cool espresso drinks with my new coffee machine?
+- How do I clean the milk steamer?
+
+For the title text, use the structure active verb + noun. For example, for instance _Deploy the Agent_.
+
+### References
+
+The reference document and information types provide for the separation of fact-based information from concepts and
+tasks. \
+Factual information may include tables and lists of specifications, parameters, parts, commands, edit-files and other
+information that the users are likely to look up. The reference information type allows fact-based content to be
+maintained by those responsible for its accuracy and consistency.
+
+## Contribute to the documentation of learn.netdata.cloud
+
+### Encapsulate topics into markdown files.
+
+Netdata uses markdown files to document everything. To implement concrete sections of these [Topic types](#topic-types)
+we encapsulate this logic as follows. Every document is characterized by its topic type ('learn_topic_type' metadata
+field). To avoid breaking every single netdata concept into numerous small markdown files each document can be either a
+single `Reference` or `Concept` or `Task` or a group of `References`, `Concepts`, `Tasks`.
+
+To this end, every single topic is encapsulated into a `Heading 3 (###)` section. That means, when you have a single
+file you only make use of `Headings 4` and lower (`4, 5, 6`, for templated section or subsection). In case you want to
+includ multiple (`Concepts` let's say) in a single document, you use `Headings 3` to seperate each concept. `Headings 2`
+are used only in case you want to logically group topics inside a document.
+
+For instance:
+
+```markdown
+
+Small introduction of the document.
+
+### Concept A
+
+Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna
+aliqua.
+
+#### Field from template 1
+
+Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.
+
+#### Field from template 1
+
+Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.
+
+##### Subsection 1
+
+. . .
+
+### Concept A
+
+Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+
+#### Field from template 1
+
+. . .
+
+
+```
+
+This approach gives a clean and readable outlook in each document from a single sidebar.
+
+Here you can find the preferred templates for each topic type:
+
+
+
+
+
+ ```markdown
+ Small intro, give some context to the user of what you will cover on this document
+
+ ### concept title (omit if the document describes only one concept)
+
+ A concept introduces a single feature or concept. A concept should answer the questions:
+
+ 1. What is this?
+ 2. Why would I use it?
+
+ ```
+
+
+
+
+ ```markdown
+ Small intro, give some context to the user of what you will cover on this document
+
+ ### Task title (omit if the document describes only one task)
+
+ #### Prerequisite
+
+ Unordered list of what you will need.
+
+ #### Steps
+
+ Exact list of step the user must follow
+
+ #### Expected result
+
+ What you expect to see when you complete the steps above
+
+ #### Example
+
+ Example configuration/actions of the task
+
+ #### Related reference documentation
+
+ List of reference docs user needs to be aware of.
+ ```
+
+
+
+
+ ```markdown
+ Small intro, give some context to the user of what you will cover on this document
+
+ ### Reference name (omit if the document describes only one reference)
+
+ #### Requirements
+
+ Document any dependencies needed to run this module
+
+ #### Requirements on the monitored component
+
+ Document any steps user must take to sucessful monitor application,
+ for instance (create a user)
+
+ #### Configuration files
+
+ table with path and configuration files purpose
+ Columns: File name | Description (Purpose in a nutshell)
+
+ #### Data collection
+
+ To make changes, see `the ./edit-config task `
+
+ #### Auto discovery
+
+ ##### Single node installation
+
+ . . . we autodetect localhost:port and what configurations are defaults
+
+ ##### Kubernetes installations
+
+ . . . Service discovery, click here
+
+ #### Metrics
+
+ Columns: Metric (Context) | Scope | description (of the context) | dimensions | units (of the context) | Alert triggered
+
+
+ #### Alerts
+
+ Collapsible content for every alert, just like the alert guides
+
+ #### Configuration options
+
+ Table with all the configuration options available.
+
+ Columns: name | description | default | file_name
+
+ #### Configuration example
+
+ Default configuration example
+
+ #### Troubleshoot
+
+ backlink to the task to run this module in debug mode (here you provide the debug flags)
+
+
+```
+
+
+
+
+### Metadata fields
+
+All Docs that are supposed to be part of learn.netdata.cloud have **hidden** sections in the begining of document. These
+sections are plain lines of text and we call them metadata. Their represented as `key : "Value"` pairs. Some of them are
+needed from our statice website builder (docusaurus) others are needed for our internal pipelines to build docs
+(have prefix `learn_`).
+
+So let's go through the different necessary metadata tags to get a document properly published on Learn:
+
+| metadata_key | Value(s) | Frontmatter effect | Mandatory | Limitations |
+|:---------------------:|---------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------:|:---------:|:---------------------------------------:|
+| `title` | `String` | Title in each document | yes | |
+| `custom_edit_url` | `String` | The source GH link of the file | yes | |
+| `description` | `String or multiline String` | - | yes | |
+| `sidebar_label` | `String or multiline String` | Name in the TOC tree | yes | |
+| `sidebar_position` | `String or multiline String` | Global position in the TOC tree (local for per folder) | yes | |
+| `learn_status` | [`Published`, `Unpublished`, `Hidden`] | `Published`: Document visible in learn, `Unpublished`: Document archived in learn, `Hidden`: Documentplaced under learn_rel_path but it's hidden] | yes | |
+| `learn_topic_type` | [`Concepts`, `Tasks`, `References`, `Getting Started`] | | yes | |
+| `learn_rel_path` | `Path` (the path you want this file to appear in learn without the /docs prefix and the name of the file | | yes | |
+| `learn_autogenerated` | `Dictionary` (for internal use) | | no | Keys in the dictionary must be in `' '` |
+
+:::important
+
+1. In case any mandatory tags are missing or falsely inputted the file will remain unpublished. This is by design to
+ prevent non-properly tagged files from getting published.
+2. All metadata values must be included in `" "`. From `string` noted text inside the fields use `' ''`
+
+
+While Docusaurus can make use of more metadata tags than the above, these are the minimum we require to publish the file
+on Learn.
+
+:::
+
+### Placing a document in learn
+
+Here you can see how the metadata are parsed and create a markdown file in learn.
+
+![](https://user-images.githubusercontent.com/12612986/207310336-f7cc150b-543c-4f13-be98-5058a4d29284.png)
+
+### Before you get started
+
+Anyone interested in contributing to documentation should first read the [Netdata style guide](#styling-guide) further
+down below and the [Netdata Community Code of Conduct](https://github.com/netdata/.github/blob/main/CODE_OF_CONDUCT.md).
+
+Netdata's documentation uses Markdown syntax. If you're not familiar with Markdown, read
+the [Mastering Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub for the basics on
+creating paragraphs, styled text, lists, tables, and more, and read further down about some special
+occasions [while writing in MDX](#mdx-and-markdown).
+
+### Making your first contribution
+
+The easiest way to contribute to Netdata's documentation is to edit a file directly on GitHub. This is perfect for small
+fixes to a single document, such as fixing a typo or clarifying a confusing sentence.
+
+Click on the **Edit this page** button on any published document on [Netdata Learn](https://learn.netdata.cloud). Each
+page has two of these buttons: One beneath the table of contents, and another at the end of the document, which take you
+to GitHub's code editor. Make your suggested changes, keeping the [Netdata style guide](#styling-guide)
+in mind, and use the ***Preview changes*** button to ensure your Markdown syntax works as expected.
+
+Under the **Commit changes** header, write descriptive title for your requested change. Click the **Commit changes**
+button to initiate your pull request (PR).
+
+Jump down to our instructions on [PRs](#making-a-pull-request) for your next steps.
+
+**Note**: If you wish to contribute documentation that is more tailored from your specific infrastructure
+monitoring/troubleshooting experience, please consider submitting a blog post about your experience. Check
+the [README](https://github.com/netdata/blog/blob/master/README.md) in our blog repo! Any blog submissions that have
+widespread or universal application will be integrated into our permanent documentation.
+
+### Edit locally
+
+Editing documentation locally is the preferred method for complex changes that span multiple documents or change the
+documentation's style or structure.
+
+Create a fork of the Netdata Agent repository by visit the [Netdata repository](https://github.com/netdata/netdata) and
+clicking on the **Fork** button.
+
+GitHub will ask you where you want to clone the repository. When finished, you end up at the index of your forked
+Netdata Agent repository. Clone your fork to your local machine:
+
+```bash
+git clone https://github.com/YOUR-GITHUB-USERNAME/netdata.git
+```
+
+Create a new branch using `git checkout -b BRANCH-NAME`. Use your favorite text editor to make your changes, keeping
+the [Netdata style guide](https://github.com/netdata/netdata/blob/master/docs/contributing/style-guide.md) in mind. Add, commit, and push changes to your fork. When you're
+finished, visit the [Netdata Agent Pull requests](https://github.com/netdata/netdata/pulls) to create a new pull request
+based on the changes you made in the new branch of your fork.
+
+### Making a pull request
+
+Pull requests (PRs) should be concise and informative. See our [PR guidelines](/contribute/handbook#pr-guidelines) for
+specifics.
+
+- The title must follow the [imperative mood](https://en.wikipedia.org/wiki/Imperative_mood) and be no more than ~50
+ characters.
+- The description should explain what was changed and why. Verify that you tested any code or processes that you are
+ trying to change.
+
+The Netdata team will review your PR and assesses it for correctness, conciseness, and overall quality. We may point to
+specific sections and ask for additional information or other fixes.
+
+After merging your PR, the Netdata team rebuilds the [documentation site](https://learn.netdata.cloud) to publish the
+changed documentation.
+
+## Styling guide
+
+The *Netdata style guide* establishes editorial guidelines for any writing produced by the Netdata team or the Netdata
+community, including documentation, articles, in-product UX copy, and more. Both internal Netdata teams and external
+contributors to any of Netdata's open-source projects should reference and adhere to this style guide as much as
+possible.
+
+Netdata's writing should **empower** and **educate**. You want to help people understand Netdata's value, encourage them
+to learn more, and ultimately use Netdata's products to democratize monitoring in their organizations. To achieve these
+goals, your writing should be:
+
+- **Clear**. Use simple words and sentences. Use strong, direct, and active language that encourages readers to action.
+- **Concise**. Provide solutions and answers as quickly as possible. Give users the information they need right now,
+ along with opportunities to learn more.
+- **Universal**. Think of yourself as a guide giving a tour of Netdata's products, features, and capabilities to a
+ diverse group of users. Write to reach the widest possible audience.
+
+You can achieve these goals by reading and adhering to the principles outlined below.
+
+If you're not familiar with Markdown, read
+the [Mastering Markdown](https://guides.github.com/features/mastering-markdown/) guide from GitHub for the basics on
+creating paragraphs, styled text, lists, tables, and more.
+
+The following sections describe situations in which a specific syntax is required.
+
+#### Syntax standards (`remark-lint`)
+
+The Netdata team uses [`remark-lint`](https://github.com/remarkjs/remark-lint) for Markdown code styling.
+
+- Use a maximum of 120 characters per line.
+- Begin headings with hashes, such as `# H1 heading`, `## H2 heading`, and so on.
+- Use `_` for italics/emphasis.
+- Use `**` for bold.
+- Use dashes `-` to begin an unordered list, and put a single space after the dash.
+- Tables should be padded so that pipes line up vertically with added whitespace.
+
+If you want to see all the settings, open the
+[`remarkrc.js`](https://github.com/netdata/netdata/blob/master/.remarkrc.js) file in the `netdata/netdata` repository.
+
+#### MDX and markdown
+
+While writing in Docusaurus, you might want to take leverage of it's features that are supported in MDX formatted files.
+One of those that we use is [Tabs](https://docusaurus.io/docs/next/markdown-features/tabs). They use an HTML syntax,
+which requires some changes in the way we write markdown inside them.
+
+In detail:
+
+Due to a bug with docusaurus, we prefer to use `heading instead of # H1` so that docusaurus doesn't render the
+contents of all Tabs on the right hand side, while not being able to navigate
+them [relative link](https://github.com/facebook/docusaurus/issues/7008).
+
+You can use markdown syntax for every other styling you want to do except Admonitions:
+For admonitions, follow [this](https://docusaurus.io/docs/markdown-features/admonitions#usage-in-jsx) guide to use
+admonitions inside JSX. While writing in JSX, all the markdown stylings have to be in HTML format to be rendered
+properly.
+
+#### Admonitions
+
+Use admonitions cautiously. Admonitions may draw user's attention, to that end we advise you to use them only for side
+content/info, without significantly interrupting the document flow.
+
+You can find the supported admonitions in the docusaurus's [documentation](https://docusaurus.io/docs/markdown-features/admonitions).
+
+#### Images
+
+Don't rely on images to convey features, ideas, or instructions. Accompany every image with descriptive alt text.
+
+In Markdown, use the standard image syntax, `![](/docs/agent/contributing)`, and place the alt text between the
+brackets `[]`. Here's an example using our logo:
+
+```markdown
+![The Netdata logo](/docs/agent/web/gui/static/img/netdata-logomark.svg)
+```
+
+Reference in-product text, code samples, and terminal output with actual text content, not screen captures or other
+images. Place the text in an appropriate element, such as a blockquote or code block, so all users can parse the
+information.
+
+#### Syntax highlighting
+
+Our documentation site at [learn.netdata.cloud](https://learn.netdata.cloud) uses
+[Prism](https://v2.docusaurus.io/docs/markdown-features#syntax-highlighting) for syntax highlighting. Netdata can use
+any of
+the [supported languages by prism-react-renderer](https://github.com/FormidableLabs/prism-react-renderer/blob/master/src/vendor/prism/includeLangs.js)
+.
+
+If no language is specified, Prism tries to guess the language based on its content.
+
+Include the language directly after the three backticks (```` ``` ````) that start the code block. For highlighting C
+code, for example:
+
+````c
+```c
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_DIRECTORIES, "stock health config", buffer);
+}
+```
+````
+
+And the prettified result:
+
+```c
+inline char *health_stock_config_dir(void) {
+ char buffer[FILENAME_MAX + 1];
+ snprintfz(buffer, FILENAME_MAX, "%s/health.d", netdata_configured_stock_config_dir);
+ return config_get(CONFIG_SECTION_DIRECTORIES, "stock health config", buffer);
+}
+```
+
+Prism also supports titles and line highlighting. See
+the [Docusaurus documentation](https://v2.docusaurus.io/docs/markdown-features#code-blocks) for more information.
+
+## Language, grammar, and mechanics
+
+#### Voice and tone
+
+One way we write empowering, educational content is by using a consistent voice and an appropriate tone.
+
+*Voice* is like your personality, which doesn't really change day to day.
+
+*Tone* is how you express your personality. Your expression changes based on your attitude or mood, or based on who
+you're around. In writing, your reflect tone in your word choice, punctuation, sentence structure, or even the use of
+emoji.
+
+The same idea about voice and tone applies to organizations, too. Our voice shouldn't change much between two pieces of
+content, no matter who wrote each, but the tone might be quite different based on who we think is reading.
+
+For example, a [blog post](https://www.netdata.cloud/blog/) and a [press release](https://www.netdata.cloud/news/)
+should have a similar voice, despite most often being written by different people. However, blog posts are relaxed and
+witty, while press releases are focused and academic. You won't see any emoji in a press release.
+
+##### Voice
+
+Netdata's voice is authentic, passionate, playful, and respectful.
+
+- **Authentic** writing is honest and fact-driven. Focus on Netdata's strength while accurately communicating what
+ Netdata can and cannot do, and emphasize technical accuracy over hard sells and marketing jargon.
+- **Passionate** writing is strong and direct. Be a champion for the product or feature you're writing about, and let
+ your unique personality and writing style shine.
+- **Playful** writing is friendly, thoughtful, and engaging. Don't take yourself too seriously, as long as it's not at
+ the expense of Netdata or any of its users.
+- **Respectful** writing treats people the way you want to be treated. Prioritize giving solutions and answers as
+ quickly as possible.
+
+##### Tone
+
+Netdata's tone is fun and playful, but clarity and conciseness comes first. We also tend to be informal, and aren't
+afraid of a playful joke or two.
+
+While we have general standards for voice and tone, we do want every individual's unique writing style to reflect in
+published content.
+
+#### Universal communication
+
+Netdata is a global company in every sense, with employees, contributors, and users from around the world. We strive to
+communicate in a way that is clear and easily understood by everyone.
+
+Here are some guidelines, pointers, and questions to be aware of as you write to ensure your writing is universal. Some
+of these are expanded into individual sections in
+the [language, grammar, and mechanics](#language-grammar-and-mechanics) section below.
+
+- Would this language make sense to someone who doesn't work here?
+- Could someone quickly scan this document and understand the material?
+- Create an information hierarchy with key information presented first and clearly called out to improve scannability.
+- Avoid directional language like "sidebar on the right of the page" or "header at the top of the page" since
+ presentation elements may adapt for devices.
+- Use descriptive links rather than "click here" or "learn more".
+- Include alt text for images and image links.
+- Ensure any information contained within a graphic element is also available as plain text.
+- Avoid idioms that may not be familiar to the user or that may not make sense when translated.
+- Avoid local, cultural, or historical references that may be unfamiliar to users.
+- Prioritize active, direct language.
+- Avoid referring to someone's age unless it is directly relevant; likewise, avoid referring to people with age-related
+ descriptors like "young" or "elderly."
+- Avoid disability-related idioms like "lame" or "falling on deaf ears." Don't refer to a person's disability unless
+ it’s directly relevant to what you're writing.
+- Don't call groups of people "guys." Don't call women "girls."
+- Avoid gendered terms in favor of neutral alternatives, like "server" instead of "waitress" and "businessperson"
+ instead of "businessman."
+- When writing about a person, use their communicated pronouns. When in doubt, just ask or use their name. It's OK to
+ use "they" as a singular pronoun.
+
+> Some of these guidelines were adapted from MailChimp under the Creative Commons license.
+
+To ensure Netdata's writing is clear, concise, and universal, we have established standards for language, grammar, and
+certain writing mechanics. However, if you're writing about Netdata for an external publication, such as a guest blog
+post, follow that publication's style guide or standards, while keeping
+the [preferred spelling of Netdata terms](#netdata-specific-terms) in mind.
+
+#### Active voice
+
+Active voice is more concise and easier to understand compared to passive voice. When using active voice, the subject of
+the sentence is action. In passive voice, the subject is acted upon. A famous example of passive voice is the phrase
+"mistakes were made."
+
+| | |
+| --------------- | ----------------------------------------------------------------------------------------- |
+| Not recommended | When an alarm is triggered by a metric, a notification is sent by Netdata. |
+| **Recommended** | When a metric triggers an alarm, Netdata sends a notification to your preferred endpoint. |
+
+#### Second person
+
+Use the second person ("you") to give instructions or "talk" directly to users.
+
+In these situations, avoid "we," "I," "let's," and "us," particularly in documentation. The "you" pronoun can also be
+implied, depending on your sentence structure.
+
+One valid exception is when a member of the Netdata team or community wants to write about said team or community.
+
+| | |
+| ------------------------------ | ------------------------------------------------------------ |
+| Not recommended | To install Netdata, we should try the one-line installer... |
+| **Recommended** | To install Netdata, you should try the one-line installer... |
+| **Recommended**, implied "you" | To install Netdata, try the one-line installer... |
+
+#### "Easy" or "simple"
+
+Using words that imply the complexity of a task or feature goes against our policy
+of [universal communication](#universal-communication). If you claim that a task is easy and the reader struggles to
+complete it, you may inadvertently discourage them.
+
+However, if you give users two options and want to relay that one option is genuinely less complex than another, be
+specific about how and why.
+
+For example, don't write, "Netdata's one-line installer is the easiest way to install Netdata." Instead, you might want
+to say, "Netdata's one-line installer requires fewer steps than manually installing from source."
+
+#### Slang, metaphors, and jargon
+
+A particular word, phrase, or metaphor you're familiar with might not translate well to the other cultures featured
+among Netdata's global community. We recommended you avoid slang or colloquialisms in your writing.
+
+In addition, don't use abbreviations that have not yet been defined in the content. See our section on
+[abbreviations](#abbreviations-acronyms-and-initialisms) for additional guidance.
+
+If you must use industry jargon, such as "mean time to resolution," define the term as clearly and concisely as you can.
+
+> Netdata helps you reduce your organization's mean time to resolution (MTTR), which is the average time the responsible
+> team requires to repair a system and resolve an ongoing incident.
+
+#### Spelling
+
+While the Netdata team is mostly *not* American, we still aspire to use American spelling whenever possible, as it is
+the standard for the monitoring industry.
+
+See the [word list](#word-list) for spellings of specific words.
+
+#### Capitalization
+
+Follow the general [English standards](https://owl.purdue.edu/owl/general_writing/mechanics/help_with_capitals.html) for
+capitalization. In summary:
+
+- Capitalize the first word of every new sentence.
+- Don't use uppercase for emphasis. (Netdata is the BEST!)
+- Capitalize the names of brands, software, products, and companies according to their official guidelines. (Netdata,
+ Docker, Apache, NGINX)
+- Avoid camel case (NetData) or all caps (NETDATA).
+
+Whenever you refer to the company Netdata, Inc., or the open-source monitoring agent the company develops, capitalize
+**Netdata**.
+
+However, if you are referring to a process, user, or group on a Linux system, use lowercase and fence the word in an
+inline code block: `` `netdata` ``.
+
+| | |
+| --------------- | ---------------------------------------------------------------------------------------------- |
+| Not recommended | The netdata agent, which spawns the netdata process, is actively maintained by netdata, inc. |
+| **Recommended** | The Netdata Agent, which spawns the `netdata` process, is actively maintained by Netdata, Inc. |
+
+##### Capitalization of document titles and page headings
+
+Document titles and page headings should use sentence case. That means you should only capitalize the first word.
+
+If you need to use the name of a brand, software, product, and company, capitalize it according to their official
+guidelines.
+
+Also, don't put a period (`.`) or colon (`:`) at the end of a title or header.
+
+| | |
+| --------------- | --------------------------------------------------------------------------------------------------- |
+| Not recommended | Getting Started Guide Service Discovery and Auto-Detection: Install netdata with docker |
+| **
+Recommended** | Getting started guide Service discovery and auto-detection Install Netdata with Docker |
+
+#### Abbreviations (acronyms and initialisms)
+
+Use abbreviations (including [acronyms and initialisms](https://www.dictionary.com/e/acronym-vs-abbreviation/)) in
+documentation when one exists, when it's widely accepted within the monitoring/sysadmin community, and when it improves
+the readability of a document.
+
+When introducing an abbreviation to a document for the first time, give the reader both the spelled-out version and the
+shortened version at the same time. For example:
+
+> Use Netdata to monitor Extended Berkeley Packet Filter (eBPF) metrics in real-time. After you define an abbreviation, don't switch back and forth. Use only the abbreviation for the rest of the document.
+
+You can also use abbreviations in a document's title to keep the title short and relevant. If you do this, you should
+still introduce the spelled-out name alongside the abbreviation as soon as possible.
+
+#### Clause order
+
+When instructing users to take action, give them the context first. By placing the context in an initial clause at the
+beginning of the sentence, users can immediately know if they want to read more, follow a link, or skip ahead.
+
+| | |
+| --------------- | ------------------------------------------------------------------------------ |
+| Not recommended | Read the reference guide if you'd like to learn more about custom dashboards. |
+| **Recommended** | If you'd like to learn more about custom dashboards, read the reference guide. |
+
+#### Oxford comma
+
+The Oxford comma is the comma used after the second-to-last item in a list of three or more items. It appears just
+before "and" or "or."
+
+| | |
+| --------------- | ---------------------------------------------------------------------------- |
+| Not recommended | Netdata can monitor RAM, disk I/O, MySQL queries per second and lm-sensors. |
+| **Recommended** | Netdata can monitor RAM, disk I/O, MySQL queries per second, and lm-sensors. |
+
+#### Future releases or features
+
+Do not mention future releases or upcoming features in writing unless they have been previously communicated via a
+public roadmap.
+
+In particular, documentation must describe, as accurately as possible, the Netdata Agent _as of
+the [latest commit](https://github.com/netdata/netdata/commits/master) in the GitHub repository_. For Netdata Cloud,
+documentation must reflect the *current state* of [production](https://app.netdata.cloud).
+
+#### Informational links
+
+Every link should clearly state its destination. Don't use words like "here" to describe where a link will take your
+reader.
+
+| | |
+| --------------- | ------------------------------------------------------------------------------------------ |
+| Not recommended | To install Netdata, click [here](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md). |
+| **Recommended** | To install Netdata, read the [installation instructions](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md). |
+
+Use links as often as required to provide necessary context. Blog posts and guides require less hyperlinks than
+documentation. See the section on [linking between documentation](#linking-between-documentation) for guidance on the
+Markdown syntax and path structure of inter-documentation links.
+
+#### Contractions
+
+Contractions like "you'll" or "they're" are acceptable in most Netdata writing. They're both authentic and playful, and
+reinforce the idea that you, as a writer, are guiding users through a particular idea, process, or feature.
+
+Contractions are generally not used in press releases or other media engagements.
+
+#### Emoji
+
+Emoji can add fun and character to your writing, but should be used sparingly and only if it matches the content's tone
+and desired audience.
+
+#### Switching Linux users
+
+Netdata documentation often suggests that users switch from their normal user to the `netdata` user to run specific
+commands. Use the following command to instruct users to make the switch:
+
+```bash
+sudo su -s /bin/bash netdata
+```
+
+#### Hostname/IP address of a node
+
+Use `NODE` instead of an actual or example IP address/hostname when referencing the process of navigating to a dashboard
+or API endpoint in a browser.
+
+| | |
+| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Not recommended | Navigate to `http://example.com:19999` in your browser to see Netdata's dashboard. Navigate to `http://203.0.113.0:19999` in your browser to see Netdata's dashboard. |
+| **
+Recommended** | Navigate to `http://NODE:19999` in your browser to see Netdata's dashboard. |
+
+If you worry that `NODE` doesn't provide enough context for the user, particularly in documentation or guides designed
+for beginners, you can provide an explanation:
+
+> With the Netdata Agent running, visit `http://NODE:19999/api/v1/info` in your browser, replacing `NODE` with the IP
+> address or hostname of your Agent.
+
+#### Paths and running commands
+
+When instructing users to run a Netdata-specific command, don't assume the path to said command, because not every
+Netdata Agent installation will have commands under the same paths. When applicable, help them navigate to the correct
+path, providing a recommendation or instructions on how to view the running configuration, which includes the correct
+paths.
+
+For example, the [configuration](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) doc first teaches users how to find the Netdata config directory
+and navigate to it, then runs commands from the `/etc/netdata` path so that the instructions are more universal.
+
+Don't include full paths, beginning from the system's root (`/`), as these might not work on certain systems.
+
+| | |
+| --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Not recommended | Use `edit-config` to edit Netdata's configuration: `sudo /etc/netdata/edit-config netdata.conf`. |
+| **
+Recommended** | Use `edit-config` to edit Netdata's configuration by first navigating to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`, then running `sudo edit-config netdata.conf`. |
+
+#### `sudo`
+
+Include `sudo` before a command if you believe most Netdata users will need to elevate privileges to run it. This makes
+our writing more universal, and users on `sudo`-less systems are generally already aware that they need to run commands
+differently.
+
+For example, most users need to use `sudo` with the `edit-config` script, because the Netdata config directory is owned
+by the `netdata` user. Same goes for restarting the Netdata Agent with `systemctl`.
+
+| | |
+| --------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
+| Not recommended | Run `edit-config netdata.conf` to configure the Netdata Agent. Run `systemctl restart netdata` to restart the Netdata Agent. |
+| **
+Recommended** | Run `sudo edit-config netdata.conf` to configure the Netdata Agent. Run `sudo systemctl restart netdata` to restart the Netdata Agent. |
+
+## Deploy and test docs
+
+
+
+The Netdata team aggregates and publishes all documentation at [learn.netdata.cloud](/) using
+[Docusaurus](https://v2.docusaurus.io/) over at the [`netdata/learn` repository](https://github.com/netdata/learn).
+
+## Netdata-specific terms
+
+Consult the [Netdata Glossary](https://github.com/netdata/netdata/blob/master/docs/glossary.md) Netdata specific terms
\ No newline at end of file
diff --git a/docs/guides/collect-apache-nginx-web-logs.md b/docs/guides/collect-apache-nginx-web-logs.md
index a75a4b1cd..b4a525471 100644
--- a/docs/guides/collect-apache-nginx-web-logs.md
+++ b/docs/guides/collect-apache-nginx-web-logs.md
@@ -16,7 +16,7 @@ You can use the [LTSV log format](http://ltsv.org/), track TLS and cipher usage,
ever. In one test on a system with SSD storage, the collector consistently parsed the logs for 200,000 requests in
200ms, using ~30% of a single core.
-The [web_log](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/) collector is currently compatible
+The [web_log](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md) collector is currently compatible
with [Nginx](https://nginx.org/en/) and [Apache](https://httpd.apache.org/).
This guide will walk you through using the new Go-based web log collector to turn the logs these web servers
@@ -90,7 +90,7 @@ jobs:
```
Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system. Netdata should pick up your web server's access log and
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. Netdata should pick up your web server's access log and
begin showing real-time charts!
### Custom log formats and fields
@@ -99,7 +99,7 @@ The web log collector is capable of parsing custom Nginx and Apache log formats
leave that topic for a separate guide.
We do have [extensive
-documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/#custom-log-format) on how
+documentation](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md#custom-log-format) on how
to build custom parsing for Nginx and Apache logs.
## Tweak web log collector alarms
@@ -117,11 +117,11 @@ You can also edit this file directly with `edit-config`:
```
For more information about editing the defaults or writing new alarm entities, see our [health monitoring
-documentation](/health/README.md).
+documentation](https://github.com/netdata/netdata/blob/master/health/README.md).
## What's next?
-Now that you have web log collection up and running, we recommend you take a look at the collector's [documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog/) for some ideas of how you can turn these rather "boring" logs into powerful real-time tools for keeping your servers happy.
+Now that you have web log collection up and running, we recommend you take a look at the collector's [documentation](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md) for some ideas of how you can turn these rather "boring" logs into powerful real-time tools for keeping your servers happy.
Don't forget to give GitHub user [Wing924](https://github.com/Wing924) a big 👍 for his hard work in starting up the Go
refactoring effort.
diff --git a/docs/guides/collect-unbound-metrics.md b/docs/guides/collect-unbound-metrics.md
index 8edcab102..5400fd833 100644
--- a/docs/guides/collect-unbound-metrics.md
+++ b/docs/guides/collect-unbound-metrics.md
@@ -55,7 +55,7 @@ You may not need to do any more configuration to have Netdata collect your Unbou
If you followed the steps above to enable `remote-control` and make your Unbound files readable by Netdata, that should
be enough. Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system. You should see Unbound metrics in your Netdata
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. You should see Unbound metrics in your Netdata
dashboard!
![Some charts showing Unbound metrics in real-time](https://user-images.githubusercontent.com/1153921/69659974-93160f00-103c-11ea-88e6-27e9efcf8c0d.png)
@@ -100,7 +100,7 @@ Netdata will attempt to read `unbound.conf` to get the appropriate `address`, `c
`tls_key` parameters.
Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
### Manual setup for a remote Unbound server
diff --git a/docs/guides/configure/performance.md b/docs/guides/configure/performance.md
index cb52a1141..256d6e854 100644
--- a/docs/guides/configure/performance.md
+++ b/docs/guides/configure/performance.md
@@ -18,7 +18,7 @@ threads. Despite collecting 100,000 metrics every second, the Agent still only u
single core.
But not everyone has such powerful systems at their disposal. For example, you might run the Agent on a cloud VM with
-only 512 MiB of RAM, or an IoT device like a [Raspberry Pi](/docs/guides/monitor/pi-hole-raspberry-pi.md). In these
+only 512 MiB of RAM, or an IoT device like a [Raspberry Pi](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/pi-hole-raspberry-pi.md). In these
cases, reducing Netdata's footprint beyond its already diminutive size can pay big dividends, giving your services more
horsepower while still monitoring the health and the performance of the node, OS, hardware, and applications.
@@ -33,7 +33,7 @@ enabled, since we want you to experience the full thing.
- Familiarity with configuring the Netdata Agent with `edit-config`.
If you're not familiar with how to configure the Netdata Agent, read our [node configuration
-doc](/docs/configure/nodes.md) before continuing with this guide. This guide assumes familiarity with the Netdata config
+doc](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) before continuing with this guide. This guide assumes familiarity with the Netdata config
directory, using `edit-config`, and the process of uncommenting/editing various settings in `netdata.conf` and other
configuration files.
@@ -43,11 +43,11 @@ Netdata's performance is primarily affected by **data collection/retention** and
You can configure almost all aspects of data collection/retention, and certain aspects of clients accessing data. For
example, you can't control how many users might be viewing a local Agent dashboard, [viewing an
-infrastructure](/docs/visualize/overview-infrastructure.md) in real-time with Netdata Cloud, or running [Metric
-Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations).
+infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) in real-time with Netdata Cloud, or running [Metric
+Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md).
The Netdata Agent runs with the lowest possible [process scheduling
-policy](/daemon/README.md#netdata-process-scheduling-policy), which is `nice 19`, and uses the `idle` process scheduler.
+policy](https://github.com/netdata/netdata/blob/master/daemon/README.md#netdata-process-scheduling-policy), which is `nice 19`, and uses the `idle` process scheduler.
Together, these settings ensure that the Agent only gets CPU resources when the node has CPU resources to space. If the
node reaches 100% CPU utilization, the Agent is stopped first to ensure your applications get any available resources.
In addition, under heavy load, collectors that require disk I/O may stop and show gaps in charts.
@@ -80,10 +80,10 @@ seconds, respectively.
Every collector and plugin has its own `update every` setting, which you can also change in the `go.d.conf`,
`python.d.conf`, or `charts.d.conf` files, or in individual collector configuration files. If the `update
every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See the [enable
-or configure a collector](/docs/collect/enable-configure.md) doc for details.
+or configure a collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) doc for details.
To reduce the frequency of an [internal
-plugin/collector](/docs/collect/how-collectors-work.md#collector-architecture-and-terminology), open `netdata.conf` and
+plugin/collector](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md#collector-architecture-and-terminology), open `netdata.conf` and
find the appropriate section. For example, to reduce the frequency of the `apps` plugin, which collects and visualizes
metrics on application resource utilization:
@@ -92,7 +92,7 @@ metrics on application resource utilization:
update every = 5
```
-To [configure an individual collector](/docs/collect/enable-configure.md), open its specific configuration file with
+To [configure an individual collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md), open its specific configuration file with
`edit-config` and look for the `update_every` setting. For example, to reduce the frequency of the `nginx` collector,
run `sudo ./edit-config go.d/nginx.conf`:
@@ -104,7 +104,7 @@ update_every: 10
## Disable unneeded plugins or collectors
If you know that you don't need an [entire plugin or a specific
-collector](/docs/collect/how-collectors-work.md#collector-architecture-and-terminology), you can disable any of them.
+collector](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md#collector-architecture-and-terminology), you can disable any of them.
Keep in mind that if a plugin/collector has nothing to do, it simply shuts down and does not consume system resources.
You will only improve the Agent's performance by disabling plugins/collectors that are actively collecting metrics.
@@ -139,7 +139,7 @@ modules:
## Lower memory usage for metrics retention
-Reduce the disk space that the [database engine](/database/engine/README.md) uses to retain metrics by editing
+Reduce the disk space that the [database engine](https://github.com/netdata/netdata/blob/master/database/engine/README.md) uses to retain metrics by editing
the `dbengine multihost disk space` option in `netdata.conf`. The default value is `256`, but can be set to a minimum of
`64`. By reducing the disk space allocation, Netdata also needs to store less metadata in the node's memory.
@@ -147,7 +147,7 @@ The `page cache size` option also directly impacts Netdata's memory usage, but h
Reducing the value of `dbengine multihost disk space` does slim down Netdata's resource usage, but it also reduces how
long Netdata retains metrics. Find the right balance of performance and metrics retention by using the [dbengine
-calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics).
+calculator](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics).
All the settings are found in the `[global]` section of `netdata.conf`:
@@ -187,11 +187,11 @@ with the following:
## Run Netdata behind Nginx
-A dedicated web server like Nginx provides far more robustness than the Agent's internal [web server](/web/README.md).
+A dedicated web server like Nginx provides far more robustness than the Agent's internal [web server](https://github.com/netdata/netdata/blob/master/web/README.md).
Nginx can handle more concurrent connections, reuse idle connections, and use fast gzip compression to reduce payloads.
For details on installing Nginx as a proxy for the local Agent dashboard, see our [Nginx
-doc](/docs/Running-behind-nginx.md).
+doc](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md).
After you complete Nginx setup according to the doc linked above, we recommend setting `keepalive` to `1024`, and using
gzip compression with the following options in the `location /` block:
@@ -264,14 +264,14 @@ On the child nodes you should add to `netdata.conf` the following:
We hope this guide helped you better understand how to optimize the performance of the Netdata Agent.
-Now that your Agent is running smoothly, we recommend you [secure your nodes](/docs/configure/nodes.md) if you haven't
+Now that your Agent is running smoothly, we recommend you [secure your nodes](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) if you haven't
already.
Next, dive into some of Netdata's more complex features, such as configuring its health watchdog or exporting metrics to
an external time-series database.
-- [Interact with dashboards and charts](/docs/visualize/interact-dashboards-charts.md)
-- [Configure health alarms](/docs/monitor/configure-alarms.md)
-- [Export metrics to external time-series databases](/docs/export/external-databases.md)
+- [Interact with dashboards and charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md)
+- [Configure health alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md)
+- [Export metrics to external time-series databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md)
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fdocs%2Fguides%2Fconfigure%2Fperformance.md&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/docs/guides/deploy/ansible.md b/docs/guides/deploy/ansible.md
index 35c946021..0472bdc60 100644
--- a/docs/guides/deploy/ansible.md
+++ b/docs/guides/deploy/ansible.md
@@ -3,11 +3,15 @@ title: Deploy Netdata with Ansible
description: "Deploy an infrastructure monitoring solution in minutes with the Netdata Agent and Ansible. Use and customize a simple playbook for monitoring as code."
image: /img/seo/guides/deploy/ansible.png
custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/deploy/ansible.md
+sidebar_label: "Install Netdata with Ansible"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Installation"
-->
# Deploy Netdata with Ansible
-Netdata's [one-line kickstart](/docs/get-started.mdx) is zero-configuration, highly adaptable, and compatible with tons
+Netdata's [one-line kickstart](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) is zero-configuration, highly adaptable, and compatible with tons
of different operating systems and Linux distributions. You can use it on bare metal, VMs, containers, and everything
in-between.
@@ -101,8 +105,8 @@ two different SSH keys supplied by AWS.
### Edit the `vars/main.yml` file
In order to connect your node(s) to your Space in Netdata Cloud, and see all their metrics in real-time in [composite
-charts](/docs/visualize/overview-infrastructure.md) or perform [Metric
-Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations), you need to set the `claim_token`
+charts](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) or perform [Metric
+Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md), you need to set the `claim_token`
and `claim_room` variables.
To find your `claim_token` and `claim_room`, go to Netdata Cloud, then click on your Space's name in the top navigation,
@@ -127,7 +131,7 @@ hostname of the node, the playbook disables that local dashboard by setting `web
security boost by not allowing any unwanted access to the local dashboard.
You can read more about this decision, or other ways you might lock down the local dashboard, in our [node security
-doc](https://learn.netdata.cloud/docs/configure/secure-nodes).
+doc](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md).
> Curious about why Netdata's dashboard is open by default? Read our [blog
> post](https://www.netdata.cloud/blog/netdata-agent-dashboard/) on that zero-configuration design decision.
@@ -162,11 +166,11 @@ want to do with Netdata, so use those categories to dive in.
Some of the best places to start:
-- [Enable or configure a collector](/docs/collect/enable-configure.md)
-- [Supported collectors list](/collectors/COLLECTORS.md)
-- [See an overview of your infrastructure](/docs/visualize/overview-infrastructure.md)
-- [Interact with dashboards and charts](/docs/visualize/interact-dashboards-charts.md)
-- [Change how long Netdata stores metrics](/docs/store/change-metrics-storage.md)
+- [Enable or configure a collector](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md)
+- [Supported collectors list](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md)
+- [See an overview of your infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md)
+- [Interact with dashboards and charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md)
+- [Change how long Netdata stores metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md)
We're looking for more deployment and configuration management strategies, whether via Ansible or other
provisioning/infrastructure as code software, such as Chef or Puppet, in our [community
diff --git a/docs/guides/export/export-netdata-metrics-graphite.md b/docs/guides/export/export-netdata-metrics-graphite.md
index dd742e454..985ba2241 100644
--- a/docs/guides/export/export-netdata-metrics-graphite.md
+++ b/docs/guides/export/export-netdata-metrics-graphite.md
@@ -13,9 +13,10 @@ action on these metrics, you may need to develop a stack of monitoring tools tha
anomalies and discover root causes faster.
We designed Netdata with interoperability in mind. The Agent collects thousands of metrics every second, and then what
-you do with them is up to you. You can [store metrics in the database engine](/docs/guides/longer-metrics-storage.md),
-or send them to another time series database for long-term storage or further analysis using Netdata's [exporting
-engine](/docs/export/external-databases.md).
+you do with them is up to you. You
+can [store metrics in the database engine](https://github.com/netdata/netdata/blob/master/docs/guides/longer-metrics-storage.md),
+or send them to another time series database for long-term storage or further analysis using
+Netdata's [exporting engine](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md).
In this guide, we'll show you how to export Netdata metrics to [Graphite](https://graphiteapp.org/) for long-term
storage and further analysis. Graphite is a free open-source software (FOSS) tool that collects graphs numeric
@@ -29,7 +30,8 @@ Let's get started.
## Install the Netdata Agent
-If you don't have the Netdata Agent installed already, visit the [installation guide](/packaging/installer/README.md)
+If you don't have the Netdata Agent installed already, visit
+the [installation guide](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md)
for the recommended instructions for your system. In most cases, you can use the one-line installation script:
@@ -63,8 +65,7 @@ docker run -d \
Open your browser and navigate to `http://NODE`, to see the Graphite interface. Nothing yet, but we'll fix that soon
enough.
-![An empty Graphite
-dashboard](https://user-images.githubusercontent.com/1153921/83798958-ea371500-a659-11ea-8403-d46f77a05b78.png)
+![An empty Graphite dashboard](https://user-images.githubusercontent.com/1153921/83798958-ea371500-a659-11ea-8403-d46f77a05b78.png)
## Enable the Graphite exporting connector
@@ -115,7 +116,8 @@ the port accordingly.
```
We'll not worry about the rest of the settings for now. Restart the Agent using `sudo systemctl restart netdata`, or the
-[appropriate method](/docs/configure/start-stop-restart.md) for your system, to spin up the exporting engine.
+[appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your
+system, to spin up the exporting engine.
## See and organize Netdata metrics in Graphite
@@ -125,8 +127,7 @@ metrics. You can also navigate directly to `http://NODE/dashboard`.
Let's switch the interface to help you understand which metrics Netdata is exporting to Graphite. Click on **Dashboard**
and **Configure UI**, then choose the **Tree** option. Refresh your browser to change the UI.
-![Change the Graphite
-UI](https://user-images.githubusercontent.com/1153921/83798697-77c63500-a659-11ea-8ed5-5e274953c871.png)
+![Change the Graphite UI](https://user-images.githubusercontent.com/1153921/83798697-77c63500-a659-11ea-8ed5-5e274953c871.png)
You should now see a tree of available contexts, including one that matches the hostname of the Agent exporting metrics.
In this example, the Agent's hostname is `arcturus`.
@@ -138,46 +139,43 @@ in the dashboard. Add a few other system CPU charts to flesh things out.
Next, let's combine one or two of these charts. Click and drag one chart onto the other, and wait until the green **Drop
to merge** dialog appears. Release to merge the charts.
-![Merging charts in
-Graphite](https://user-images.githubusercontent.com/1153921/83817628-1bbfd880-a67a-11ea-81bc-05efc639b6ce.png)
+![Merging charts in Graphite](https://user-images.githubusercontent.com/1153921/83817628-1bbfd880-a67a-11ea-81bc-05efc639b6ce.png)
Finally, save your dashboard. Click **Dashboard**, then **Save As**, then choose a name. Your dashboard is now saved.
Of course, this is just the beginning of the customization you can do with Graphite. You can change the time range,
share your dashboard with others, or use the composer to customize the size and appearance of specific charts. Learn
-more about adding, modifying, and combining graphs in the [Graphite
-docs](https://graphite.readthedocs.io/en/latest/dashboard.html).
+more about adding, modifying, and combining graphs in
+the [Graphite docs](https://graphite.readthedocs.io/en/latest/dashboard.html).
## Monitor the exporting engine
As soon as the exporting engine begins, Netdata begins reporting metrics about the system's health and performance.
-![Graphs for monitoring the exporting
-engine](https://user-images.githubusercontent.com/1153921/83800787-e5c02b80-a65c-11ea-865a-c447d2ce4cbb.png)
+![Graphs for monitoring the exporting engine](https://user-images.githubusercontent.com/1153921/83800787-e5c02b80-a65c-11ea-865a-c447d2ce4cbb.png)
You can use these charts to verify that Netdata is properly exporting metrics to Graphite. You can even add these
exporting charts to your Graphite dashboard!
### Add exporting charts to Netdata Cloud
-You can also show these exporting engine metrics on Netdata Cloud. If you don't have an account already, go [sign
-in](https://app.netdata.cloud) and get started for free. If you need some help along the way, read the [get started with
-Cloud guide](https://learn.netdata.cloud/docs/cloud/get-started).
+You can also show these exporting engine metrics on Netdata Cloud. If you don't have an account already,
+go [sign in](https://app.netdata.cloud) and get started for free. If you need some help along the way, read
+the [get started with Cloud guide](https://github.com/netdata/netdata/blob/master/docs/cloud/get-started.mdx).
Add more metrics to a War Room's Nodes view by clicking on the **Add metric** button, then typing `exporting` into the
context field. Choose the exporting contexts you want to add, then click **Add**. You'll see these charts alongside any
others you've customized in Netdata Cloud.
-![Exporting engine metrics in Netdata
-Cloud](https://user-images.githubusercontent.com/1153921/83902769-db139e00-a711-11ea-828e-aa7e32b04c75.png)
+![Exporting engine metrics in Netdata Cloud](https://user-images.githubusercontent.com/1153921/83902769-db139e00-a711-11ea-828e-aa7e32b04c75.png)
## What's next?
What you do with your exported metrics is entirely up to you, but as you might have seen in the Graphite connector
configuration block, there are many other ways to tweak and customize which metrics you export to Graphite and how
-often.
+often.
-For full details about each configuration option and what it does, see the [exporting reference
-guide](/exporting/README.md).
+For full details about each configuration option and what it does, see
+the [exporting reference guide](https://github.com/netdata/netdata/blob/master/exporting/README.md).
diff --git a/docs/guides/monitor-cockroachdb.md b/docs/guides/monitor-cockroachdb.md
index 46dd2535e..3c6e1b2cf 100644
--- a/docs/guides/monitor-cockroachdb.md
+++ b/docs/guides/monitor-cockroachdb.md
@@ -6,8 +6,9 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/moni
# Monitor CockroachDB metrics with Netdata
[CockroachDB](https://github.com/cockroachdb/cockroach) is an open-source project that brings SQL databases into
-scalable, disaster-resilient cloud deployments. Thanks to a [new CockroachDB
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/cockroachdb/) released in
+scalable, disaster-resilient cloud deployments. Thanks to
+a [new CockroachDB collector](https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/README.md)
+released in
[v1.20](https://blog.netdata.cloud/posts/release-1.20/), you can now monitor any number of CockroachDB databases with
maximum granularity using Netdata. Collect more than 50 unique metrics and put them on interactive visualizations
designed for better visual anomaly detection.
@@ -19,9 +20,9 @@ Let's dive in and walk through the process of monitoring CockroachDB metrics wit
## What's in this guide
-- [Configure the CockroachDB collector](#configure-the-cockroachdb-collector)
- - [Manual setup for a local CockroachDB database](#manual-setup-for-a-local-cockroachdb-database)
-- [Tweak CockroachDB alarms](#tweak-cockroachdb-alarms)
+- [Configure the CockroachDB collector](#configure-the-cockroachdb-collector)
+ - [Manual setup for a local CockroachDB database](#manual-setup-for-a-local-cockroachdb-database)
+- [Tweak CockroachDB alarms](#tweak-cockroachdb-alarms)
## Configure the CockroachDB collector
@@ -31,7 +32,7 @@ display them on the dashboard.
If your CockroachDB instance is accessible through `http://localhost:8080/` or `http://127.0.0.1:8080`, your setup is
complete. Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, and refresh your browser. You should see CockroachDB
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, and refresh your browser. You should see CockroachDB
metrics in your Netdata dashboard!
@@ -59,8 +60,8 @@ edit, or create a new job with any of the parameters listed above in the file. B
required, and everything else is optional.
For a production cluster, you'll use either an IP address or the system's hostname. Be sure that your remote system
-allows TCP communication on port 8080, or whichever port you have configured CockroachDB's [Admin
-UI](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint) to listen on.
+allows TCP communication on port 8080, or whichever port you have configured CockroachDB's
+[Admin UI](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint) to listen on.
```yaml
# [ JOBS ]
@@ -80,7 +81,7 @@ jobs:
- name: remote
url: https://203.0.113.0:8080/_status/vars
tls_skip_verify: yes # If your certificate is self-signed
-
+
- name: remote_hostname
url: https://cockroachdb.example.com:8080/_status/vars
tls_skip_verify: yes # If your certificate is self-signed
@@ -109,28 +110,24 @@ cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /et
```
For more information about editing the defaults or writing new alarm entities, see our health monitoring [quickstart
-guide](/health/QUICKSTART.md).
+guide](https://github.com/netdata/netdata/blob/master/health/QUICKSTART.md).
## What's next?
Now that you're collecting metrics from your CockroachDB databases, let us know how it's working for you! There's always
room for improvement or refinement based on real-world use cases. Feel free to [file an
-issue](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml) with your
+issue](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml) with
+your
thoughts.
Also, be sure to check out these useful resources:
-- [Netdata's CockroachDB
- documentation](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/cockroachdb/)
-- [Netdata's CockroachDB
- configuration](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/cockroachdb.conf)
-- [Netdata's CockroachDB
- alarms](https://github.com/netdata/netdata/blob/29d9b5e51603792ee27ef5a21f1de0ba8e130158/health/health.d/cockroachdb.conf)
-- [CockroachDB homepage](https://www.cockroachlabs.com/product/)
-- [CockroachDB documentation](https://www.cockroachlabs.com/docs/stable/)
-- [`_status/vars` endpoint
- docs](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint)
-- [Monitor CockroachDB with
- Prometheus](https://www.cockroachlabs.com/docs/stable/monitor-cockroachdb-with-prometheus.html)
+- [Netdata's CockroachDB documentation](https://github.com/netdata/go.d.plugin/blob/master/modules/cockroachdb/README.md)
+- [Netdata's CockroachDB configuration](https://github.com/netdata/go.d.plugin/blob/master/config/go.d/cockroachdb.conf)
+- [Netdata's CockroachDB alarms](https://github.com/netdata/netdata/blob/29d9b5e51603792ee27ef5a21f1de0ba8e130158/health/health.d/cockroachdb.conf)
+- [CockroachDB homepage](https://www.cockroachlabs.com/product/)
+- [CockroachDB documentation](https://www.cockroachlabs.com/docs/stable/)
+- [`_status/vars` endpoint docs](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting.html#prometheus-endpoint)
+- [Monitor CockroachDB with Prometheus](https://www.cockroachlabs.com/docs/stable/monitor-cockroachdb-with-prometheus.html)
diff --git a/docs/guides/monitor-hadoop-cluster.md b/docs/guides/monitor-hadoop-cluster.md
index 62403f897..cce261fee 100644
--- a/docs/guides/monitor-hadoop-cluster.md
+++ b/docs/guides/monitor-hadoop-cluster.md
@@ -23,8 +23,8 @@ alternative, like the guide available from
For more specifics on the collection modules used in this guide, read the respective pages in our documentation:
-- [HDFS](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/hdfs)
-- [Zookeeper](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/zookeeper)
+- [HDFS](https://github.com/netdata/go.d.plugin/blob/master/modules/hdfs/README.md)
+- [Zookeeper](https://github.com/netdata/go.d.plugin/blob/master/modules/zookeeper/README.md)
## Set up your HDFS and Zookeeper installations
@@ -160,7 +160,7 @@ jobs:
address : 203.0.113.10:2182
```
-Finally, [restart Netdata](/docs/configure/start-stop-restart.md).
+Finally, [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
```sh
sudo systemctl restart netdata
@@ -185,7 +185,7 @@ sudo /etc/netdata/edit-config health.d/zookeeper.conf
```
For more information about editing the defaults or writing new alarm entities, see our [health monitoring
-documentation](/health/README.md).
+documentation](https://github.com/netdata/netdata/blob/master/health/README.md).
## What's next?
diff --git a/docs/guides/monitor/anomaly-detection-python.md b/docs/guides/monitor/anomaly-detection-python.md
index ad8398cc6..d6d27f4e5 100644
--- a/docs/guides/monitor/anomaly-detection-python.md
+++ b/docs/guides/monitor/anomaly-detection-python.md
@@ -23,7 +23,7 @@ library](https://github.com/yzhao062/pyod/tree/master), which periodically runs
quantify how anomalous certain charts are.
All these metrics and alarms are available for centralized monitoring in [Netdata Cloud](https://app.netdata.cloud). If
-you choose to sign up for Netdata Cloud and [connect your nodes](/claim/README.md), you will have the ability to run
+you choose to sign up for Netdata Cloud and [connect your nodes](https://github.com/netdata/netdata/blob/master/claim/README.md), you will have the ability to run
tailored anomaly detection on every node in your infrastructure, regardless of its purpose or workload.
In this guide, you'll learn how to set up the anomalies collector to instantly detect anomalies in an Nginx web server
@@ -35,9 +35,9 @@ server](https://user-images.githubusercontent.com/1153921/103586700-da5b0a00-4ea
## Prerequisites
-- A node running the Netdata Agent. If you don't yet have that, [get Netdata](/docs/get-started.mdx).
+- A node running the Netdata Agent. If you don't yet have that, [get Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx).
- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already.
-- Familiarity with configuring the Netdata Agent with [`edit-config`](/docs/configure/nodes.md).
+- Familiarity with configuring the Netdata Agent with [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md).
- _Optional_: An Nginx web server running on the same node to follow the example configuration steps.
## Install required Python packages
@@ -65,7 +65,7 @@ Use `exit` to become your normal user again.
## Enable the anomalies collector
-Navigate to your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) and use `edit-config`
+Navigate to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) and use `edit-config`
to open the `python.d.conf` file.
```bash
@@ -79,8 +79,8 @@ yourself if it doesn't already exist. Either way, the final result should look l
anomalies: yes
```
-[Restart the Agent](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to start up the anomalies collector. By default, the
+[Restart the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata`, or the [appropriate
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to start up the anomalies collector. By default, the
model training process runs every 30 minutes, and uses the previous 4 hours of metrics to establish a baseline for
health and performance across the default included charts.
@@ -105,7 +105,7 @@ involve tweaking the behavior of the ML training itself.
- `train_every_n`: How often to train the ML models.
- `train_n_secs`: The number of historical observations to train each model on. The default is 4 hours, but if your node
doesn't have historical metrics going back that far, consider [changing the metrics retention
- policy](/docs/store/change-metrics-storage.md) or reducing this window.
+ policy](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) or reducing this window.
- `custom_models`: A way to define custom models that you want anomaly probabilities for, including multi-node or
streaming setups.
@@ -119,8 +119,8 @@ involve tweaking the behavior of the ML training itself.
As mentioned above, this guide uses an Nginx web server to demonstrate how the anomalies collector works. You must
configure the collector to monitor charts from the
-[Nginx](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx) and [web
-log](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog) collectors.
+[Nginx](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md) and [web
+log](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md) collectors.
`charts_regex` allows for some basic regex, such as wildcards (`*`) to match all contexts with a certain pattern. For
example, `system\..*` matches with any chart with a context that begins with `system.`, and ends in any number of other
@@ -163,27 +163,27 @@ volume of requests/responses, not, for example, which type of 4xx response a use
dimensions](https://user-images.githubusercontent.com/1153921/102820642-d69f9180-4392-11eb-91c5-d3d166d40105.png)
Apply the ideas behind the collector's regex and exclude settings to any other
-[system](/docs/collect/system-metrics.md), [container](/docs/collect/container-metrics.md), or
-[application](/docs/collect/application-metrics.md) metrics you want to detect anomalies for.
+[system](https://github.com/netdata/netdata/blob/master/docs/collect/system-metrics.md), [container](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md), or
+[application](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md) metrics you want to detect anomalies for.
## What's next?
Now that you know how to set up unsupervised anomaly detection in the Netdata Agent, using an Nginx web server as an
example, it's time to apply that knowledge to other mission-critical parts of your infrastructure. If you're not sure
-what to monitor next, check out our list of [collectors](/collectors/COLLECTORS.md) to see what kind of metrics Netdata
+what to monitor next, check out our list of [collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) to see what kind of metrics Netdata
can collect from your systems, containers, and applications.
-Keep on moving to [part 2](/docs/guides/monitor/visualize-monitor-anomalies.md), which covers the charts and alarms
+Keep on moving to [part 2](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/visualize-monitor-anomalies.md), which covers the charts and alarms
Netdata creates for unsupervised anomaly detection.
For a different troubleshooting experience, try out the [Metric
-Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) feature in Netdata Cloud. Metric
+Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) feature in Netdata Cloud. Metric
Correlations helps you perform faster root cause analysis by narrowing a dashboard to only the charts most likely to be
related to an anomaly.
### Related reference documentation
-- [Netdata Agent · Anomalies collector](/collectors/python.d.plugin/anomalies/README.md)
-- [Netdata Agent · Nginx collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/nginx)
-- [Netdata Agent · web log collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog)
-- [Netdata Cloud · Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations)
+- [Netdata Agent · Anomalies collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md)
+- [Netdata Agent · Nginx collector](https://github.com/netdata/go.d.plugin/blob/master/modules/nginx/README.md)
+- [Netdata Agent · web log collector](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md)
+- [Netdata Cloud · Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md)
diff --git a/docs/guides/monitor/anomaly-detection.md b/docs/guides/monitor/anomaly-detection.md
index e98c5c02e..ce819d937 100644
--- a/docs/guides/monitor/anomaly-detection.md
+++ b/docs/guides/monitor/anomaly-detection.md
@@ -14,27 +14,27 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/moni
As of [`v1.32.0`](https://github.com/netdata/netdata/releases/tag/v1.32.0), Netdata comes with some ML powered [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) capabilities built into it and available to use out of the box, with zero configuration required (ML was enabled by default in `v1.35.0-29-nightly` in [this PR](https://github.com/netdata/netdata/pull/13158), previously it required a one line config change).
-This means that in addition to collecting raw value metrics, the Netdata agent will also produce an [`anomaly-bit`](https://learn.netdata.cloud/docs/agent/ml#anomaly-bit---100--anomalous-0--normal) every second which will be `100` when recent raw metric values are considered anomalous by Netdata and `0` when they look normal. Once we aggregate beyond one second intervals this aggregated `anomaly-bit` becomes an ["anomaly rate"](https://learn.netdata.cloud/docs/agent/ml#anomaly-rate---averageanomaly-bit).
+This means that in addition to collecting raw value metrics, the Netdata agent will also produce an [`anomaly-bit`](https://github.com/netdata/netdata/blob/master/ml/README.md#anomaly-bit---100--anomalous-0--normal) every second which will be `100` when recent raw metric values are considered anomalous by Netdata and `0` when they look normal. Once we aggregate beyond one second intervals this aggregated `anomaly-bit` becomes an ["anomaly rate"](https://github.com/netdata/netdata/blob/master/ml/README.md#anomaly-rate---averageanomaly-bit).
-To be as concrete as possible, the below api call shows how to access the raw anomaly bit of the `system.cpu` chart from the [london.my-netdata.io](https://london.my-netdata.io) Netdata demo server. Passing `options=anomaly-bit` returns the anomay bit instead of the raw metric value.
+To be as concrete as possible, the below api call shows how to access the raw anomaly bit of the `system.cpu` chart from the [london.my-netdata.io](https://london.my-netdata.io) Netdata demo server. Passing `options=anomaly-bit` returns the anomaly bit instead of the raw metric value.
```
https://london.my-netdata.io/api/v1/data?chart=system.cpu&options=anomaly-bit
```
-If we aggregate the above to just 1 point by adding `points=1` we get an "[Anomaly Rate](https://learn.netdata.cloud/docs/agent/ml#anomaly-rate---averageanomaly-bit)":
+If we aggregate the above to just 1 point by adding `points=1` we get an "[Anomaly Rate](https://github.com/netdata/netdata/blob/master/ml/README.md#anomaly-rate---averageanomaly-bit)":
```
https://london.my-netdata.io/api/v1/data?chart=system.cpu&options=anomaly-bit&points=1
```
-The fundamentals of Netdata's anomaly detection approach and implmentation are covered in lots more detail in the [agent ML documentation](https://learn.netdata.cloud/docs/agent/ml).
+The fundamentals of Netdata's anomaly detection approach and implementation are covered in lots more detail in the [agent ML documentation](https://github.com/netdata/netdata/blob/master/ml/README.md).
This guide will explain how to get started using these ML based anomaly detection capabilities within Netdata.
## Anomaly Advisor
-The [Anomaly Advisor](https://learn.netdata.cloud/docs/cloud/insights/anomaly-advisor) is the flagship anomaly detection feature within Netdata. In the "Anomalies" tab of Netdata you will see an overall "Anomaly Rate" chart that aggregates node level anomaly rate for all nodes in a space. The aim of this chart is to make it easy to quickly spot periods of time where the overall "[node anomaly rate](https://learn.netdata.cloud/docs/agent/ml#node-anomaly-rate)" is evelated in some unusual way and for what node or nodes this relates to.
+The [Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx) is the flagship anomaly detection feature within Netdata. In the "Anomalies" tab of Netdata you will see an overall "Anomaly Rate" chart that aggregates node level anomaly rate for all nodes in a space. The aim of this chart is to make it easy to quickly spot periods of time where the overall "[node anomaly rate](https://github.com/netdata/netdata/blob/master/ml/README.md#node-anomaly-rate)" is elevated in some unusual way and for what node or nodes this relates to.
![image](https://user-images.githubusercontent.com/2178292/175928290-490dd8b9-9c55-4724-927e-e145cb1cc837.png)
@@ -44,7 +44,7 @@ Once an area on the Anomaly Rate chart is highlighted netdata will append a "hea
## Embedded Anomaly Rate Charts
-Charts in both the [Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview) and [single node dashboard](https://learn.netdata.cloud/docs/cloud/visualize/overview#jump-to-single-node-dashboards) tabs also expose the underlying anomaly rates for each dimension so users can easily see if the raw metrics are considered anomalous or not by Netdata.
+Charts in both the [Overview](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md) and [single node dashboard](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md#jump-to-single-node-dashboards) tabs also expose the underlying anomaly rates for each dimension so users can easily see if the raw metrics are considered anomalous or not by Netdata.
Pressing the anomalies icon (next to the information icon in the chart header) will expand the anomaly rate chart to make it easy to see how the anomaly rate for any individual dimension corresponds to the raw underlying data. In the example below we can see that the spike in `system.pgpgio|in` corresponded in the anomaly rate for that dimension jumping to 100% for a small period of time until the spike passed.
@@ -65,9 +65,9 @@ You can see some example ML based alert configurations below:
Check out the resources below to learn more about how Netdata is approaching ML:
-- [Agent ML documentation](https://learn.netdata.cloud/docs/agent/ml).
-- [Anomaly Advisor documentation](https://learn.netdata.cloud/docs/cloud/insights/anomaly-advisor).
-- [Metric Correlations documentation](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations).
+- [Agent ML documentation](https://github.com/netdata/netdata/blob/master/ml/README.md).
+- [Anomaly Advisor documentation](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx).
+- [Metric Correlations documentation](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md).
- Anomaly Advisor [launch blog post](https://www.netdata.cloud/blog/introducing-anomaly-advisor-unsupervised-anomaly-detection-in-netdata/).
- Netdata Approach to ML [blog post](https://www.netdata.cloud/blog/our-approach-to-machine-learning/).
- `areal/ml` related [GitHub Discussions](https://github.com/netdata/netdata/discussions?discussions_q=label%3Aarea%2Fml).
diff --git a/docs/guides/monitor/dimension-templates.md b/docs/guides/monitor/dimension-templates.md
index 539127366..d2795a9c6 100644
--- a/docs/guides/monitor/dimension-templates.md
+++ b/docs/guides/monitor/dimension-templates.md
@@ -8,24 +8,27 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/moni
Your ability to monitor the health of your systems and applications relies on your ability to create and maintain
the best set of alarms for your particular needs.
-In v1.18 of Netdata, we introduced **dimension templates** for alarms, which simplifies the process of writing [alarm
-entities](/health/REFERENCE.md#health-entity-reference) for charts with many dimensions.
+In v1.18 of Netdata, we introduced **dimension templates** for alarms, which simplifies the process of
+writing [alarm entities](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#health-entity-reference) for
+charts with many dimensions.
Dimension templates can condense many individual entities into one—no more copy-pasting one entity and changing the
`alarm`/`template` and `lookup` lines for each dimension you'd like to monitor.
They are, however, an advanced health monitoring feature. For more basic instructions on creating your first alarm,
-check out our [health monitoring documentation](/health/README.md), which also includes
-[examples](/health/REFERENCE.md#example-alarms).
+check out our [health monitoring documentation](https://github.com/netdata/netdata/blob/master/health/README.md), which also includes
+[examples](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#example-alarms).
## The fundamentals of `foreach`
-Our dimension templates update creates a new `foreach` parameter to the existing [`lookup`
-line](/health/REFERENCE.md#alarm-line-lookup). This is where the magic happens.
+Our dimension templates update creates a new `foreach` parameter to the
+existing [`lookup` line](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-lookup). This
+is where the magic happens.
You use the `foreach` parameter to specify which dimensions you want to monitor with this single alarm. You can separate
-them with a comma (`,`) or a pipe (`|`). You can also use a [Netdata simple pattern](/libnetdata/simple_pattern/README.md)
-to create many alarms with a regex-like syntax.
+them with a comma (`,`) or a pipe (`|`). You can also use
+a [Netdata simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to create
+many alarms with a regex-like syntax.
The `foreach` parameter _has_ to be the last parameter in your `lookup` line, and if you have both `of` and `foreach` in
the same `lookup` line, Netdata will ignore the `of` parameter and use `foreach` instead.
@@ -95,7 +98,7 @@ Let's look at some other examples of how `foreach` works so you can best apply i
In the last example, we used `foreach system,user,nice` to create three distinct alarms using dimension templates. But
what if you want to quickly create alarms for _all_ the dimensions of a given chart?
-Use a [simple pattern](/libnetdata/simple_pattern/README.md)! One example of a simple pattern is a single wildcard
+Use a [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md)! One example of a simple pattern is a single wildcard
(`*`).
Instead of monitoring system CPU usage, let's monitor per-application CPU usage using the `apps.cpu` chart. Passing a
@@ -113,14 +116,15 @@ lookup: average -10m percentage foreach *
This entity will now create alarms for every dimension in the `apps.cpu` chart. Given that most `apps.cpu` charts have
10 or more dimensions, using the wildcard ensures you catch every CPU-hogging process.
-To learn more about how to use simple patterns with dimension templates, see our [simple patterns
-documentation](/libnetdata/simple_pattern/README.md).
+To learn more about how to use simple patterns with dimension templates, see
+our [simple patterns documentation](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md).
## Using `foreach` with alarm templates
-Dimension templates also work with [alarm templates](/health/REFERENCE.md#alarm-line-alarm-or-template). Alarm
-templates help you create alarms for all the charts with a given context—for example, all the cores of your system's
-CPU.
+Dimension templates also work
+with [alarm templates](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-alarm-or-template).
+Alarm templates help you create alarms for all the charts with a given context—for example, all the cores of your
+system's CPU.
By combining the two, you can create dozens of individual alarms with a single template entity. Here's how you would
create alarms for the `system`, `user`, and `nice` dimensions for every chart in the `cpu.cpu` context—or, in other
@@ -170,7 +174,8 @@ alarms that will help you better monitor the health of your systems.
Or, at the very least, simplify your configuration files.
-For information about other advanced features in Netdata's health monitoring toolkit, check out our [health
-documentation](/health/README.md). And if you have some cool alarms you built using dimension templates,
+For information about other advanced features in Netdata's health monitoring toolkit, check out
+our [health documentation](https://github.com/netdata/netdata/blob/master/health/README.md). And if you have some cool
+alarms you built using dimension templates,
diff --git a/docs/guides/monitor/kubernetes-k8s-netdata.md b/docs/guides/monitor/kubernetes-k8s-netdata.md
index 5cfefe892..5732fc96c 100644
--- a/docs/guides/monitor/kubernetes-k8s-netdata.md
+++ b/docs/guides/monitor/kubernetes-k8s-netdata.md
@@ -46,7 +46,7 @@ To follow this tutorial, you need:
- A free Netdata Cloud account. [Sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) if you don't have one
already.
- A working cluster running Kubernetes v1.9 or newer, with a Netdata deployment and connected parent/child nodes. See
- our [Kubernetes deployment process](/packaging/installer/methods/kubernetes.md) for details on deployment and
+ our [Kubernetes deployment process](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md) for details on deployment and
conneting to Cloud.
- The [`kubectl`](https://kubernetes.io/docs/reference/kubectl/overview/) command line tool, within [one minor version
difference](https://kubernetes.io/docs/tasks/tools/install-kubectl/#before-you-begin) of your cluster, on an
@@ -104,7 +104,7 @@ To get started, [sign in](https://app.netdata.cloud/sign-in?cloudRoute=/spaces)
to the War Room you connected your cluster to, if not **General**.
Netdata Cloud is already visualizing your Kubernetes metrics, streamed in real-time from each node, in the
-[Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview):
+[Overview](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md):
![Netdata's Kubernetes monitoring
dashboard](https://user-images.githubusercontent.com/1153921/109037415-eafc5500-7687-11eb-8773-9b95941e3328.png)
@@ -126,8 +126,8 @@ cluster](https://user-images.githubusercontent.com/1153921/109042169-19c8fa00-76
For example, the chart above shows a spike in the CPU utilization from `rabbitmq` every minute or so, along with a
baseline CPU utilization of 10-15% across the cluster.
-Read about the [Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview) and some best practices on [viewing
-an overview of your infrastructure](/docs/visualize/overview-infrastructure.md) for details on using composite charts to
+Read about the [Overview](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md) and some best practices on [viewing
+an overview of your infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) for details on using composite charts to
drill down into per-node performance metrics.
## Pod and container metrics
@@ -154,7 +154,7 @@ Let's explore the most colorful box by hovering over it.
container](https://user-images.githubusercontent.com/1153921/109049544-a8417980-7695-11eb-80a7-109b4a645a27.png)
The **Context** tab shows `rabbitmq-5bb66bb6c9-6xr5b` as the container's image name, which means this container is
-running a [RabbitMQ](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/rabbitmq) workload.
+running a [RabbitMQ](https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/README.md) workload.
Click the **Metrics** tab to see real-time metrics from that container. Unsurprisingly, it shows a spike in CPU
utilization at regular intervals.
@@ -173,7 +173,7 @@ different namespaces.
![Time-series Kubernetes monitoring in Netdata
Cloud](https://user-images.githubusercontent.com/1153921/109075210-126a1680-76b6-11eb-918d-5acdcdac152d.png)
-Each composite chart has a [definition bar](https://learn.netdata.cloud/docs/cloud/visualize/overview#definition-bar)
+Each composite chart has a [definition bar](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md#definition-bar)
for complete customization. For example, grouping the top chart by `k8s_container_name` reveals new information.
![Changing time-series charts](https://user-images.githubusercontent.com/1153921/109075212-139b4380-76b6-11eb-836f-939482ae55fc.png)
@@ -183,20 +183,20 @@ for complete customization. For example, grouping the top chart by `k8s_containe
Netdata has a [service discovery plugin](https://github.com/netdata/agent-service-discovery), which discovers and
creates configuration files for [compatible
services](https://github.com/netdata/helmchart#service-discovery-and-supported-services) and any endpoints covered by
-our [generic Prometheus collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus).
+our [generic Prometheus collector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md).
Netdata uses these files to collect metrics from any compatible application as they run _inside_ of a pod. Service
discovery happens without manual intervention as pods are created, destroyed, or moved between nodes.
Service metrics show up on the Overview as well, beneath the **Kubernetes** section, and are labeled according to the
service in question. For example, the **RabbitMQ** section has numerous charts from the [`rabbitmq`
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/rabbitmq):
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/rabbitmq/README.md):
![Finding service discovery
metrics](https://user-images.githubusercontent.com/1153921/109054511-2eac8a00-769b-11eb-97f1-da93acb4b5fe.png)
> The robot-shop cluster has more supported services, such as MySQL, which are not visible with zero configuration. This
> is usually because of services running on non-default ports, using non-default names, or required passwords. Read up
-> on [configuring service discovery](/packaging/installer/methods/kubernetes.md#configure-service-discovery) to collect
+> on [configuring service discovery](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md#configure-service-discovery) to collect
> more service metrics.
Service metrics are essential to infrastructure monitoring, as they're the best indicator of the end-user experience,
@@ -210,7 +210,7 @@ Netdata also automatically collects metrics from two essential Kubernetes proces
The **k8s kubelet** section visualizes metrics from the Kubernetes agent responsible for managing every pod on a given
node. This also happens without any configuration thanks to the [kubelet
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet).
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/README.md).
Monitoring each node's kubelet can be invaluable when diagnosing issues with your Kubernetes cluster. For example, you
can see if the number of running containers/pods has dropped, which could signal a fault or crash in a particular
@@ -226,7 +226,7 @@ configuration-related errors, and the actual vs. desired numbers of volumes, plu
The **k8s kube-proxy** section displays metrics about the network proxy that runs on each node in your Kubernetes
cluster. kube-proxy lets pods communicate with each other and accept sessions from outside your cluster. Its metrics are
collected by the [kube-proxy
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy).
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/README.md).
With Netdata, you can monitor how often your k8s proxies are syncing proxy rules between nodes. Dramatic changes in
these figures could indicate an anomaly in your cluster that's worthy of further investigation.
@@ -246,9 +246,9 @@ clusters of all sizes.
- [Netdata Helm chart](https://github.com/netdata/helmchart)
- [Netdata service discovery](https://github.com/netdata/agent-service-discovery)
- [Netdata Agent · `kubelet`
- collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubelet)
+ collector](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubelet/README.md)
- [Netdata Agent · `kube-proxy`
- collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/k8s_kubeproxy)
-- [Netdata Agent · `cgroups.plugin`](/collectors/cgroups.plugin/README.md)
+ collector](https://github.com/netdata/go.d.plugin/blob/master/modules/k8s_kubeproxy/README.md)
+- [Netdata Agent · `cgroups.plugin`](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md)
diff --git a/docs/guides/monitor/lamp-stack.md b/docs/guides/monitor/lamp-stack.md
index 29b35e142..165888c4b 100644
--- a/docs/guides/monitor/lamp-stack.md
+++ b/docs/guides/monitor/lamp-stack.md
@@ -58,7 +58,7 @@ To follow this tutorial, you need:
## Install the Netdata Agent
If you don't have the free, open-source Netdata monitoring agent installed on your node yet, get started with a [single
-kickstart command](/docs/get-started.mdx):
+kickstart command](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx):
@@ -68,15 +68,15 @@ replacing `NODE` with the hostname or IP address of your system.
## Enable hardware and Linux system monitoring
-There's nothing you need to do to enable [system monitoring](/docs/collect/system-metrics.md) and Linux monitoring with
+There's nothing you need to do to enable [system monitoring](https://github.com/netdata/netdata/blob/master/docs/collect/system-metrics.md) and Linux monitoring with
the Netdata Agent, which autodetects metrics from CPUs, memory, disks, networking devices, and Linux processes like
systemd without any configuration. If you're using containers, Netdata automatically collects resource utilization
-metrics from each using the [cgroups data collector](/collectors/cgroups.plugin/README.md).
+metrics from each using the [cgroups data collector](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md).
## Enable Apache monitoring
Let's begin by configuring Apache to work with Netdata's [Apache data
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache).
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/README.md).
Actually, there's nothing for you to do to enable Apache monitoring with Netdata.
@@ -87,7 +87,7 @@ metrics](https://httpd.apache.org/docs/2.4/mod/mod_status.html), which is just _
## Enable web log monitoring
The Netdata Agent also comes with a [web log
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog), which reads Apache's access
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md), which reads Apache's access
log file, processes each line, and converts them into per-second metrics. On Debian systems, it reads the file at
`/var/log/apache2/access.log`.
@@ -100,7 +100,7 @@ monitoring.
Because your MySQL database is password-protected, you do need to tell MySQL to allow the `netdata` user to connect to
without a password. Netdata's [MySQL data
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql) collects metrics in _read-only_
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md) collects metrics in _read-only_
mode, without being able to alter or affect operations in any way.
First, log into the MySQL shell. Then, run the following three commands, one at a time:
@@ -112,15 +112,15 @@ FLUSH PRIVILEGES;
```
Run `sudo systemctl restart netdata`, or the [appropriate alternative for your
-system](/docs/configure/start-stop-restart.md), to collect dozens of metrics every second for robust MySQL monitoring.
+system](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md), to collect dozens of metrics every second for robust MySQL monitoring.
## Enable PHP monitoring
Unlike Apache or MySQL, PHP isn't a service that you can monitor directly, unless you instrument a PHP-based application
-with [StatsD](/collectors/statsd.plugin/README.md).
+with [StatsD](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md).
However, if you use [PHP-FPM](https://php-fpm.org/) in your LAMP stack, you can monitor that process with our [PHP-FPM
-data collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpfpm).
+data collector](https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/README.md).
Open your PHP-FPM configuration for editing, replacing `7.4` with your version of PHP:
@@ -166,12 +166,12 @@ If the Netdata Agent isn't already open in your browser, open a new tab and navi
> If you [signed up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for Netdata Cloud earlier, you can also view
> the exact same LAMP stack metrics there, plus additional features, like drag-and-drop custom dashboards. Be sure to
-> [connecting your node](/claim/README.md) to start streaming metrics to your browser through Netdata Cloud.
+> [connecting your node](https://github.com/netdata/netdata/blob/master/claim/README.md) to start streaming metrics to your browser through Netdata Cloud.
Netdata automatically organizes all metrics and charts onto a single page for easy navigation. Peek at gauges to see
overall system performance, then scroll down to see more. Click-and-drag with your mouse to pan _all_ charts back and
forth through different time intervals, or hold `SHIFT` and use the scrollwheel (or two-finger scroll) to zoom in and
-out. Check out our doc on [interacting with charts](/docs/visualize/interact-dashboards-charts.md) for all the details.
+out. Check out our doc on [interacting with charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) for all the details.
![The Netdata
dashboard](https://user-images.githubusercontent.com/1153921/109520555-98e17800-7a69-11eb-86ec-16f689da4527.png)
@@ -205,15 +205,15 @@ Here's a quick reference for what charts you might want to focus on after settin
The Netdata Agent comes with hundreds of pre-configured alarms to help you keep tabs on your system, including 19 alarms
designed for smarter LAMP stack monitoring.
-Click the 🔔 icon in the top navigation to [see active alarms](/docs/monitor/view-active-alarms.md). The **Active** tabs
+Click the 🔔 icon in the top navigation to [see active alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/view-active-alarms.md). The **Active** tabs
shows any alarms currently triggered, while the **All** tab displays a list of _every_ pre-configured alarm. The
![An example of LAMP stack
alarms](https://user-images.githubusercontent.com/1153921/109524120-5883f900-7a6d-11eb-830e-0e7baaa28163.png)
-[Tweak alarms](/docs/monitor/configure-alarms.md) based on your infrastructure monitoring needs, and to see these alarms
+[Tweak alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) based on your infrastructure monitoring needs, and to see these alarms
in other places, like your inbox or a Slack channel, [enable a notification
-method](/docs/monitor/enable-notifications.md).
+method](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md).
## What's next?
@@ -223,7 +223,7 @@ services. The per-second metrics granularity means you have the most accurate in
any LAMP-related issues.
Another powerful way to monitor the availability of a LAMP stack is the [`httpcheck`
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/httpcheck), which pings a web server at
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/httpcheck/README.md), which pings a web server at
a regular interval and tells you whether if and how quickly it's responding. The `response_match` option also lets you
monitor when the web server's response isn't what you expect it to be, which might happen if PHP-FPM crashes, for
example.
@@ -233,14 +233,14 @@ we're not covering it here, but it _does_ work in a single-node setup. Just don'
node crashed.
If you're planning on managing more than one node, or want to take advantage of advanced features, like finding the
-source of issues faster with [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations),
+source of issues faster with [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md),
[sign up](https://app.netdata.cloud/sign-up?cloudRoute=/spaces) for a free Netdata Cloud account.
### Related reference documentation
-- [Netdata Agent · Get started](/docs/get-started.mdx)
-- [Netdata Agent · Apache data collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/apache)
-- [Netdata Agent · Web log collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/weblog)
-- [Netdata Agent · MySQL data collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql)
-- [Netdata Agent · PHP-FPM data collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/phpfpm)
+- [Netdata Agent · Get started](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx)
+- [Netdata Agent · Apache data collector](https://github.com/netdata/go.d.plugin/blob/master/modules/apache/README.md)
+- [Netdata Agent · Web log collector](https://github.com/netdata/go.d.plugin/blob/master/modules/weblog/README.md)
+- [Netdata Agent · MySQL data collector](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md)
+- [Netdata Agent · PHP-FPM data collector](https://github.com/netdata/go.d.plugin/blob/master/modules/phpfpm/README.md)
diff --git a/docs/guides/monitor/pi-hole-raspberry-pi.md b/docs/guides/monitor/pi-hole-raspberry-pi.md
index 1246d8ba1..5099d12b9 100644
--- a/docs/guides/monitor/pi-hole-raspberry-pi.md
+++ b/docs/guides/monitor/pi-hole-raspberry-pi.md
@@ -79,7 +79,7 @@ service](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi
finished setting up Pi-hole at this point.
As far as configuring Netdata to monitor Pi-hole metrics, there's nothing you actually need to do. Netdata's [Pi-hole
-collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/pihole) will autodetect the new service
+collector](https://github.com/netdata/go.d.plugin/blob/master/modules/pihole/README.md) will autodetect the new service
running on your Raspberry Pi and immediately start collecting metrics every second.
Restart Netdata with `sudo systemctl restart netdata`, which will then recognize that Pi-hole is running and start a
@@ -98,15 +98,15 @@ part of your system might affect another.
![The Netdata dashboard in
action](https://user-images.githubusercontent.com/1153921/80827388-b9fee100-8b98-11ea-8f60-0d7824667cd3.gif)
-If you're completely new to Netdata, look at our [step-by-step guide](/docs/guides/step-by-step/step-00.md) for a
-walkthrough of all its features. For a more expedited tour, see the [get started guide](/docs/get-started.mdx).
+If you're completely new to Netdata, look at our [step-by-step guide](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-00.md) for a
+walkthrough of all its features. For a more expedited tour, see the [get started guide](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx).
### Enable temperature sensor monitoring
You need to manually enable Netdata's built-in [temperature sensor
-collector](https://learn.netdata.cloud/docs/agent/collectors/charts.d.plugin/sensors) to start collecting metrics.
+collector](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md) to start collecting metrics.
-> Netdata uses a few plugins to manage its [collectors](/collectors/REFERENCE.md), each using a different language: Go,
+> Netdata uses a few plugins to manage its [collectors](https://github.com/netdata/netdata/blob/master/collectors/REFERENCE.md), each using a different language: Go,
> Python, Node.js, and Bash. While our Go collectors are undergoing the most active development, we still support the
> other languages. In this case, you need to enable a temperature sensor collector that's written in Bash.
@@ -124,7 +124,7 @@ Raspberry Pi temperature sensor monitoring.
### Storing historical metrics on your Raspberry Pi
By default, Netdata allocates 256 MiB in disk space to store historical metrics inside the [database
-engine](/database/engine/README.md). On the Raspberry Pi used for this guide, Netdata collects 1,500 metrics every
+engine](https://github.com/netdata/netdata/blob/master/database/engine/README.md). On the Raspberry Pi used for this guide, Netdata collects 1,500 metrics every
second, which equates to storing 3.5 days worth of historical metrics.
You can increase this allocation by editing `netdata.conf` and increasing the `dbengine multihost disk space` setting to
@@ -136,8 +136,8 @@ more than 256.
```
Use our [database sizing
-calculator](/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics)
-and [guide on storing historical metrics](/docs/guides/longer-metrics-storage.md) to help you determine the right
+calculator](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md#calculate-the-system-resources-ram-disk-space-needed-to-store-metrics)
+and [guide on storing historical metrics](https://github.com/netdata/netdata/blob/master/docs/guides/longer-metrics-storage.md) to help you determine the right
setting for your Raspberry Pi.
## What's next?
@@ -146,12 +146,12 @@ Now that you're monitoring Pi-hole and your Raspberry Pi with Netdata, you can e
configure Netdata to more specific goals.
Most importantly, you can always install additional services and instantly collect metrics from many of them with our
-[300+ integrations](/collectors/COLLECTORS.md).
+[300+ integrations](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md).
-- [Optimize performance](/docs/guides/configure/performance.md) using tweaks developed for IoT devices.
-- [Stream Raspberry Pi metrics](/streaming/README.md) to a parent host for easy access or longer-term storage.
-- [Tweak alarms](/health/QUICKSTART.md) for either Pi-hole or the health of your Raspberry Pi.
-- [Export metrics to external databases](/exporting/README.md) with the exporting engine.
+- [Optimize performance](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md) using tweaks developed for IoT devices.
+- [Stream Raspberry Pi metrics](https://github.com/netdata/netdata/blob/master/streaming/README.md) to a parent host for easy access or longer-term storage.
+- [Tweak alarms](https://github.com/netdata/netdata/blob/master/health/QUICKSTART.md) for either Pi-hole or the health of your Raspberry Pi.
+- [Export metrics to external databases](https://github.com/netdata/netdata/blob/master/exporting/README.md) with the exporting engine.
Or, head over to [our guides](https://learn.netdata.cloud/guides/) for even more experiments and insights into
troubleshooting the health of your systems and services.
diff --git a/docs/guides/monitor/process.md b/docs/guides/monitor/process.md
index 2f46d7abc..7cc327a01 100644
--- a/docs/guides/monitor/process.md
+++ b/docs/guides/monitor/process.md
@@ -23,38 +23,46 @@ SQL queries or know a bunch of arbitrary command-line flags.
With Netdata's process monitoring, you can:
-- Benchmark/optimize performance of standard applications, like web servers or databases
-- Benchmark/optimize performance of custom applications
-- Troubleshoot CPU/memory/disk utilization issues (why is my system's CPU spiking right now?)
-- Perform granular capacity planning based on the specific needs of your infrastructure
-- Search for leaking file descriptors
-- Investigate zombie processes
+- Benchmark/optimize performance of standard applications, like web servers or databases
+- Benchmark/optimize performance of custom applications
+- Troubleshoot CPU/memory/disk utilization issues (why is my system's CPU spiking right now?)
+- Perform granular capacity planning based on the specific needs of your infrastructure
+- Search for leaking file descriptors
+- Investigate zombie processes
... and much more. Let's get started.
## Prerequisites
-- One or more Linux nodes running [Netdata](/docs/get-started.mdx). If you need more time to understand Netdata before
- following this guide, see the [infrastructure](/docs/quickstart/infrastructure.md) or
- [single-node](/docs/quickstart/single-node.md) monitoring quickstarts.
-- A general understanding of how to [configure the Netdata Agent](/docs/configure/nodes.md) using `edit-config`.
-- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already.
+- One or more Linux nodes running [Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx). If you
+ need more time to understand Netdata before
+ following this guide, see
+ the [infrastructure](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md) or
+ [single-node](https://github.com/netdata/netdata/blob/master/docs/quickstart/single-node.md) monitoring quickstarts.
+- A general understanding of how
+ to [configure the Netdata Agent](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md)
+ using `edit-config`.
+- A Netdata Cloud account. [Sign up](https://app.netdata.cloud) if you don't have one already.
## How does Netdata do process monitoring?
-The Netdata Agent already knows to look for hundreds of [standard applications that we support via
-collectors](/collectors/COLLECTORS.md), and groups them based on their purpose. Let's say you want to monitor a MySQL
+The Netdata Agent already knows to look for hundreds
+of [standard applications that we support via collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md),
+and groups them based on their
+purpose. Let's say you want to monitor a MySQL
database using its process. The Netdata Agent already knows to look for processes with the string `mysqld` in their
name, along with a few others, and puts them into the `sql` group. This `sql` group then becomes a dimension in all
process-specific charts.
The process and groups settings are used by two unique and powerful collectors.
-[**`apps.plugin`**](/collectors/apps.plugin/README.md) looks at the Linux process tree every second, much like `top` or
+[**`apps.plugin`**](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) looks at the Linux
+process tree every second, much like `top` or
`ps fax`, and collects resource utilization information on every running process. It then automatically adds a layer of
meaningful visualization on top of these metrics, and creates per-process/application charts.
-[**`ebpf.plugin`**](/collectors/ebpf.plugin/README.md): Netdata's extended Berkeley Packet Filter (eBPF) collector
+[**`ebpf.plugin`**](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md): Netdata's extended
+Berkeley Packet Filter (eBPF) collector
monitors Linux kernel-level metrics for file descriptors, virtual filesystem IO, and process management, and then hands
process-specific metrics over to `apps.plugin` for visualization. The eBPF collector also collects and visualizes
metrics on an _event frequency_, which means it captures every kernel interaction, and not just the volume of
@@ -65,55 +73,55 @@ interaction at every second in time. That's even more precise than Netdata's sta
With these collectors working in parallel, Netdata visualizes the following per-second metrics for _any_ process on your
Linux systems:
-- CPU utilization (`apps.cpu`)
- - Total CPU usage
- - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
-- Disk I/O
- - Physical reads/writes (`apps.preads`/`apps.pwrites`)
- - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
- - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
-- Memory
- - Real Memory Used (non-shared, `apps.mem`)
- - Virtual Memory Allocated (`apps.vmem`)
- - Minor page faults (i.e. memory activity, `apps.minor_faults`)
-- Processes
- - Threads running (`apps.threads`)
- - Processes running (`apps.processes`)
- - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
- - Minimum uptime (`apps.uptime_min`)
- - Average uptime (`apps.uptime_average`)
- - Maximum uptime (`apps.uptime_max`)
- - Pipes open (`apps.pipes`)
-- Swap memory
- - Swap memory used (`apps.swap`)
- - Major page faults (i.e. swap activity, `apps.major_faults`)
-- Network
- - Sockets open (`apps.sockets`)
-- eBPF file
- - Number of calls to open files. (`apps.file_open`)
- - Number of files closed. (`apps.file_closed`)
- - Number of calls to open files that returned errors.
- - Number of calls to close files that returned errors.
-- eBPF syscall
- - Number of calls to delete files. (`apps.file_deleted`)
- - Number of calls to `vfs_write`. (`apps.vfs_write_call`)
- - Number of calls to `vfs_read`. (`apps.vfs_read_call`)
- - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`)
- - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`)
- - Number of calls to write a file that returned errors.
- - Number of calls to read a file that returned errors.
-- eBPF process
- - Number of process created with `do_fork`. (`apps.process_create`)
- - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel version. (`apps.thread_create`)
- - Number of times that a process called `do_exit`. (`apps.task_close`)
-- eBPF net
- - Number of bytes sent. (`apps.bandwidth_sent`)
- - Number of bytes received. (`apps.bandwidth_recv`)
+- CPU utilization (`apps.cpu`)
+ - Total CPU usage
+ - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
+- Disk I/O
+ - Physical reads/writes (`apps.preads`/`apps.pwrites`)
+ - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
+ - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
+- Memory
+ - Real Memory Used (non-shared, `apps.mem`)
+ - Virtual Memory Allocated (`apps.vmem`)
+ - Minor page faults (i.e. memory activity, `apps.minor_faults`)
+- Processes
+ - Threads running (`apps.threads`)
+ - Processes running (`apps.processes`)
+ - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
+ - Minimum uptime (`apps.uptime_min`)
+ - Average uptime (`apps.uptime_average`)
+ - Maximum uptime (`apps.uptime_max`)
+ - Pipes open (`apps.pipes`)
+- Swap memory
+ - Swap memory used (`apps.swap`)
+ - Major page faults (i.e. swap activity, `apps.major_faults`)
+- Network
+ - Sockets open (`apps.sockets`)
+- eBPF file
+ - Number of calls to open files. (`apps.file_open`)
+ - Number of files closed. (`apps.file_closed`)
+ - Number of calls to open files that returned errors.
+ - Number of calls to close files that returned errors.
+- eBPF syscall
+ - Number of calls to delete files. (`apps.file_deleted`)
+ - Number of calls to `vfs_write`. (`apps.vfs_write_call`)
+ - Number of calls to `vfs_read`. (`apps.vfs_read_call`)
+ - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`)
+ - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`)
+ - Number of calls to write a file that returned errors.
+ - Number of calls to read a file that returned errors.
+- eBPF process
+ - Number of process created with `do_fork`. (`apps.process_create`)
+ - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel
+ version. (`apps.thread_create`)
+ - Number of times that a process called `do_exit`. (`apps.task_close`)
+- eBPF net
+ - Number of bytes sent. (`apps.bandwidth_sent`)
+ - Number of bytes received. (`apps.bandwidth_recv`)
As an example, here's the per-process CPU utilization chart, including a `sql` group/dimension.
-![A per-process CPU utilization chart in Netdata
-Cloud](https://user-images.githubusercontent.com/1153921/101217226-3a5d5700-363e-11eb-8610-aa1640aefb5d.png)
+![A per-process CPU utilization chart in Netdata Cloud](https://user-images.githubusercontent.com/1153921/101217226-3a5d5700-363e-11eb-8610-aa1640aefb5d.png)
## Configure the Netdata Agent to recognize a specific process
@@ -123,7 +131,8 @@ aware of hundreds of processes, and collects metrics from them automatically.
But, if you want to change the grouping behavior, add an application that isn't yet supported in the Netdata Agent, or
monitor a custom application, you need to edit the `apps_groups.conf` configuration file.
-Navigate to your [Netdata config directory](/docs/configure/nodes.md) and use `edit-config` to edit the file.
+Navigate to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) and
+use `edit-config` to edit the file.
```bash
cd /etc/netdata # Replace this with your Netdata config directory if not at /etc/netdata.
@@ -138,7 +147,8 @@ others, and groups them into `sql`. That makes sense, since all these processes
sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
```
-These groups are then reflected as [dimensions](/web/README.md#dimensions) within Netdata's charts.
+These groups are then reflected as [dimensions](https://github.com/netdata/netdata/blob/master/web/README.md#dimensions)
+within Netdata's charts.
![An example per-process CPU utilization chart in Netdata
Cloud](https://user-images.githubusercontent.com/1153921/101369156-352e2100-3865-11eb-9f0d-b8fac162e034.png)
@@ -153,12 +163,13 @@ shouldn't need to configure it to discover them.
However, if you're using multiple applications that the Netdata Agent groups together you may want to separate them for
more precise monitoring. If you're not running any other types of SQL databases on that node, you don't need to change
-the grouping, since you know that any MySQL is the only process contributing to the `sql` group.
+the grouping, since you know that any MySQL is the only process contributing to the `sql` group.
Let's say you're using both MySQL and PostgreSQL databases on a single node, and want to monitor their processes
-independently. Open the `apps_groups.conf` file as explained in the [section
-above](#configure-the-netdata-agent-to-recognize-a-specific-process) and scroll down until you find the `database
-servers` section. Create new groups for MySQL and PostgreSQL, and move their process queries into the unique groups.
+independently. Open the `apps_groups.conf` file as explained in
+the [section above](#configure-the-netdata-agent-to-recognize-a-specific-process) and scroll down until you find
+the `database servers` section. Create new groups for MySQL and PostgreSQL, and move their process queries into the
+unique groups.
```conf
# -----------------------------------------------------------------------------
@@ -169,17 +180,18 @@ postgres: postgres*
sql: mariad* postmaster* oracle_* ora_* sqlservr
```
-Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to start collecting utilization metrics from your
-application. Time to [visualize your process metrics](#visualize-process-metrics).
+Restart Netdata with `sudo systemctl restart netdata`, or
+the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to start collecting utilization metrics
+from your application. Time to [visualize your process metrics](#visualize-process-metrics).
### Custom applications
Let's assume you have an application that runs on the process `custom-app`. To monitor eBPF metrics for that application
separate from any others, you need to create a new group in `apps_groups.conf` and associate that process name with it.
-Open the `apps_groups.conf` file as explained in the [section
-above](#configure-the-netdata-agent-to-recognize-a-specific-process). Scroll down to `# NETDATA processes accounting`.
+Open the `apps_groups.conf` file as explained in
+the [section above](#configure-the-netdata-agent-to-recognize-a-specific-process). Scroll down
+to `# NETDATA processes accounting`.
Above that, paste in the following text, which creates a new `custom-app` group with the `custom-app` process. Replace
`custom-app` with the name of your application's Linux process. `apps_groups.conf` should now look like this:
@@ -195,26 +207,25 @@ custom-app: custom-app
...
```
-Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to start collecting utilization metrics from your
-application.
+Restart Netdata with `sudo systemctl restart netdata`, or
+the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to start collecting utilization metrics
+from your application.
## Visualize process metrics
Now that you're collecting metrics for your process, you'll want to visualize them using Netdata's real-time,
-interactive charts. Find these visualizations in the same section regardless of whether you use [Netdata
-Cloud](https://app.netdata.cloud) for infrastructure monitoring, or single-node monitoring with the local Agent's
-dashboard at `http://localhost:19999`.
+interactive charts. Find these visualizations in the same section regardless of whether you
+use [Netdata Cloud](https://app.netdata.cloud) for infrastructure monitoring, or single-node monitoring with the local
+Agent's dashboard at `http://localhost:19999`.
-If you need a refresher on all the available per-process charts, see the [above
-list](#per-process-metrics-and-charts-in-netdata).
+If you need a refresher on all the available per-process charts, see
+the [above list](#per-process-metrics-and-charts-in-netdata).
### Using Netdata's application collector (`apps.plugin`)
`apps.plugin` puts all of its charts under the **Applications** section of any Netdata dashboard.
-![Screenshot of the Applications section on a Netdata
-dashboard](https://user-images.githubusercontent.com/1153921/101401172-2ceadb80-388f-11eb-9e9a-88443894c272.png)
+![Screenshot of the Applications section on a Netdata dashboard](https://user-images.githubusercontent.com/1153921/101401172-2ceadb80-388f-11eb-9e9a-88443894c272.png)
Let's continue with the MySQL example. We can create a [test
database](https://www.digitalocean.com/community/tutorials/how-to-measure-mysql-query-performance-with-mysqlslap) in
@@ -223,11 +234,9 @@ MySQL to generate load on the `mysql` process.
`apps.plugin` immediately collects and visualizes this activity `apps.cpu` chart, which shows an increase in CPU
utilization from the `sql` group. There is a parallel increase in `apps.pwrites`, which visualizes writes to disk.
-![Per-application CPU utilization
-metrics](https://user-images.githubusercontent.com/1153921/101409725-8527da80-389b-11eb-96e9-9f401535aafc.png)
+![Per-application CPU utilization metrics](https://user-images.githubusercontent.com/1153921/101409725-8527da80-389b-11eb-96e9-9f401535aafc.png)
-![Per-application disk writing
-metrics](https://user-images.githubusercontent.com/1153921/101409728-85c07100-389b-11eb-83fd-d79dd1545b5a.png)
+![Per-application disk writing metrics](https://user-images.githubusercontent.com/1153921/101409728-85c07100-389b-11eb-83fd-d79dd1545b5a.png)
Next, the `mysqlslap` utility queries the database to provide some benchmarking load on the MySQL database. It won't
look exactly like a production database executing lots of user queries, but it gives you an idea into the possibility of
@@ -240,8 +249,7 @@ sudo mysqlslap --user=sysadmin --password --host=localhost --concurrency=50 --i
The following per-process disk utilization charts show spikes under the `sql` group at the same time `mysqlslap` was run
numerous times, with slightly different concurrency and query options.
-![Per-application disk
-metrics](https://user-images.githubusercontent.com/1153921/101411810-d08fb800-389e-11eb-85b3-f3fa41f1f887.png)
+![Per-application disk metrics](https://user-images.githubusercontent.com/1153921/101411810-d08fb800-389e-11eb-85b3-f3fa41f1f887.png)
> 💡 Click on any dimension below a chart in Netdata Cloud (or to the right of a chart on a local Agent dashboard), to
> visualize only that dimension. This can be particularly useful in process monitoring to separate one process'
@@ -256,8 +264,7 @@ For example, running the above workload shows the entire "story" how MySQL inter
processes/threads to handle a large number of SQL queries, then subsequently close the tasks as each query returns the
relevant data.
-![Per-process eBPF
-charts](https://user-images.githubusercontent.com/1153921/101412395-c8844800-389f-11eb-86d2-20c8a0f7b3c0.png)
+![Per-process eBPF charts](https://user-images.githubusercontent.com/1153921/101412395-c8844800-389f-11eb-86d2-20c8a0f7b3c0.png)
`ebpf.plugin` visualizes additional eBPF metrics, which are system-wide and not per-process, under the **eBPF** section.
@@ -267,35 +274,39 @@ Now that you have `apps_groups.conf` configured correctly, and know where to fin
Netdata's ecosystem, you can precisely monitor the health and performance of any process on your node using per-second
metrics.
-For even more in-depth troubleshooting, see our guide on [monitoring and debugging applications with
-eBPF](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
+For even more in-depth troubleshooting, see our guide
+on [monitoring and debugging applications with eBPF](https://github.com/netdata/netdata/blob/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
-If the process you're monitoring also has a [supported collector](/collectors/COLLECTORS.md), now is a great time to set
+If the process you're monitoring also has
+a [supported collector](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md), now is a great time to
+set
that up if it wasn't autodetected. With both process utilization and application-specific metrics, you should have every
-piece of data needed to discover the root cause of an incident. See our [collector
-setup](/docs/collect/enable-configure.md) doc for details.
+piece of data needed to discover the root cause of an incident. See
+our [collector setup](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) doc for details.
-[Create new dashboards](/docs/visualize/create-dashboards.md) in Netdata Cloud using charts from `apps.plugin`,
+[Create new dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) in Netdata
+Cloud using charts from `apps.plugin`,
`ebpf.plugin`, and application-specific collectors to build targeted dashboards for monitoring key processes across your
infrastructure.
-Try running [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) on a node that's
-running the process(es) you're monitoring. Even if nothing is going wrong at the moment, Netdata Cloud's embedded
-intelligence helps you better understand how a MySQL database, for example, might influence a system's volume of memory
-page faults. And when an incident is afoot, use Metric Correlations to reduce mean time to resolution (MTTR) and
-cognitive load.
-
-If you want more specific metrics from your custom application, check out Netdata's [statsd
-support](/collectors/statsd.plugin/README.md). With statd, you can send detailed metrics from your application to
-Netdata and visualize them with per-second granularity. Netdata's statsd collector works with dozens of [statsd server
-implementations](https://github.com/etsy/statsd/wiki#client-implementations), which work with most application
+Try
+running [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md)
+on a node that's running the process(es) you're monitoring. Even if nothing is going wrong at the moment, Netdata
+Cloud's embedded intelligence helps you better understand how a MySQL database, for example, might influence a system's
+volume of memory page faults. And when an incident is afoot, use Metric Correlations to reduce mean time to resolution (
+MTTR) and cognitive load.
+
+If you want more specific metrics from your custom application, check out
+Netdata's [statsd support](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md). With statd, you can send detailed metrics from your
+application to Netdata and visualize them with per-second granularity. Netdata's statsd collector works with dozens of
+[statsd server implementations](https://github.com/etsy/statsd/wiki#client-implementations), which work with most application
frameworks.
### Related reference documentation
-- [Netdata Agent · `apps.plugin`](/collectors/apps.plugin/README.md)
-- [Netdata Agent · `ebpf.plugin`](/collectors/ebpf.plugin/README.md)
-- [Netdata Agent · Dashboards](/web/README.md#dimensions)
-- [Netdata Agent · MySQL collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/mysql)
+- [Netdata Agent · `apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md)
+- [Netdata Agent · `ebpf.plugin`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md)
+- [Netdata Agent · Dashboards](https://github.com/netdata/netdata/blob/master/web/README.md#dimensions)
+- [Netdata Agent · MySQL collector](https://github.com/netdata/go.d.plugin/blob/master/modules/mysql/README.md)
diff --git a/docs/guides/monitor/raspberry-pi-anomaly-detection.md b/docs/guides/monitor/raspberry-pi-anomaly-detection.md
index 73f57cd04..00b652bf2 100644
--- a/docs/guides/monitor/raspberry-pi-anomaly-detection.md
+++ b/docs/guides/monitor/raspberry-pi-anomaly-detection.md
@@ -12,7 +12,7 @@ We love IoT and edge at Netdata, we also love machine learning. Even better if w
of monitoring increasingly complex systems.
We recently explored what might be involved in enabling our Python-based [anomalies
-collector](/collectors/python.d.plugin/anomalies/README.md) on a Raspberry Pi. To our delight, it's actually quite
+collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md) on a Raspberry Pi. To our delight, it's actually quite
straightforward!
Read on to learn all the steps and enable unsupervised anomaly detection on your on Raspberry Pi(s).
@@ -23,14 +23,14 @@ Read on to learn all the steps and enable unsupervised anomaly detection on your
- A Raspberry Pi running Raspbian, which we'll call a _node_.
- The [open-source Netdata](https://github.com/netdata/netdata) monitoring agent. If you don't have it installed on your
- node yet, [get started now](/docs/get-started.mdx).
+ node yet, [get started now](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx).
## Install dependencies
First make sure Netdata is using Python 3 when it runs Python-based data collectors.
-Next, open `netdata.conf` using [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files)
-from within the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory). Scroll down to the
+Next, open `netdata.conf` using [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files)
+from within the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). Scroll down to the
`[plugin:python.d]` section to pass in the `-ppython3` command option.
```conf
@@ -59,7 +59,7 @@ LLVM_CONFIG=llvm-config-9 pip3 install --user llvmlite numpy==1.20.1 netdata-pan
## Enable the anomalies collector
-Now you're ready to enable the collector and [restart Netdata](/docs/configure/start-stop-restart.md).
+Now you're ready to enable the collector and [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
```bash
sudo ./edit-config python.d.conf
@@ -82,7 +82,7 @@ centralized cloud somewhere) is the resource utilization impact of running a mon
With the default configuration, the anomalies collector uses about 6.5% of CPU at each run. During the retraining step,
CPU utilization jumps to between 20-30% for a few seconds, but you can [configure
-retraining](/collectors/python.d.plugin/anomalies/README.md#configuration) to happen less often if you wish.
+retraining](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md#configuration) to happen less often if you wish.
![CPU utilization of anomaly detection on the Raspberry
Pi](https://user-images.githubusercontent.com/1153921/110149718-9d749c00-7d9b-11eb-9af8-46e2032cd1d0.png)
@@ -108,18 +108,18 @@ looks like a potentially useful addition to enable unsupervised anomaly detectio
See our two-part guide series for a more complete picture of configuring the anomalies collector, plus some best
practices on using the charts it automatically generates:
-- [_Detect anomalies in systems and applications_](/docs/guides/monitor/anomaly-detection-python.md)
-- [_Monitor and visualize anomalies with Netdata_](/docs/guides/monitor/visualize-monitor-anomalies.md)
+- [_Detect anomalies in systems and applications_](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md)
+- [_Monitor and visualize anomalies with Netdata_](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/visualize-monitor-anomalies.md)
If you're using your Raspberry Pi for other purposes, like blocking ads/trackers with Pi-hole, check out our companions
-Pi guide: [_Monitor Pi-hole (and a Raspberry Pi) with Netdata_](/docs/guides/monitor/pi-hole-raspberry-pi.md).
+Pi guide: [_Monitor Pi-hole (and a Raspberry Pi) with Netdata_](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/pi-hole-raspberry-pi.md).
Once you've had a chance to give unsupervised anomaly detection a go, share your use cases and let us know of any
feedback on our [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767).
### Related reference documentation
-- [Netdata Agent · Get Netdata](/docs/get-started.mdx)
-- [Netdata Agent · Anomalies collector](/collectors/python.d.plugin/anomalies/README.md)
+- [Netdata Agent · Get Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx)
+- [Netdata Agent · Anomalies collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md)
diff --git a/docs/guides/monitor/statsd.md b/docs/guides/monitor/statsd.md
index 3e2f0f85c..848e2649c 100644
--- a/docs/guides/monitor/statsd.md
+++ b/docs/guides/monitor/statsd.md
@@ -22,7 +22,7 @@ In general, the process for creating a StatsD collector can be summarized in 2 s
- Run an experiment by sending StatsD metrics to Netdata, without any prior configuration. This will create a chart per metric (called private charts) and will help you verify that everything works as expected from the application side of things.
- Make sure to reload the dashboard tab **after** you start sending data to Netdata.
-- Create a configuration file for your app using [edit-config](/docs/configure/nodes.md): `sudo ./edit-config
+- Create a configuration file for your app using [edit-config](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md): `sudo ./edit-config
statsd.d/myapp.conf`
- Each app will have it's own section in the right-hand menu.
@@ -30,7 +30,7 @@ Now, let's see the above process in detail.
## Prerequisites
-- A node with the [Netdata](/docs/get-started.mdx) installed.
+- A node with the [Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) installed.
- An application to instrument. For this guide, that will be [k6](https://k6.io/docs/getting-started/installation).
## Understanding the metrics
@@ -63,7 +63,7 @@ Here are some examples of default private charts. You can see that the histogram
## Create a new StatsD configuration file
-Start by creating a new configuration file under the `statsd.d/` folder in the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory). Use [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) to create a new file called `k6.conf`.
+Start by creating a new configuration file under the `statsd.d/` folder in the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). Use [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) to create a new file called `k6.conf`.
```bash=
sudo ./edit-config statsd.d/k6.conf
@@ -104,7 +104,7 @@ Families and context are additional ways to group metrics. Families control the
Context is a second way to group metrics, when the metrics are of the same nature but different origin. In our case, if we ran several different load testing experiments side-by-side, we could define the same app, but different context (e.g `http_requests.experiment1`, `http_requests.experiment2`).
-Find more details about family and context in our [documentation](/web/README.md#families).
+Find more details about family and context in our [documentation](https://github.com/netdata/netdata/blob/master/web/README.md#families).
### Dimension
@@ -115,7 +115,7 @@ Now, having decided on how we are going to group the charts, we need to define h
The dimension option has this syntax: `dimension = [pattern] METRIC NAME TYPE MULTIPLIER DIVIDER OPTIONS`
-- **pattern**: A keyword that tells the StatsD server the `METRIC` string is actually a [simple pattern].(/libnetdata/simple_pattern/README.md). We don't simple patterns in the example, but if we wanted to visualize all the `http_req` metrics, we could have a single dimension: `dimension = pattern 'k6.http_req*' last 1 1`. Find detailed examples with patterns in our [documentation](/collectors/statsd.plugin/README.md#dimension-patterns).
+- **pattern**: A keyword that tells the StatsD server the `METRIC` string is actually a [simple pattern].(/libnetdata/simple_pattern/README.md). We don't simple patterns in the example, but if we wanted to visualize all the `http_req` metrics, we could have a single dimension: `dimension = pattern 'k6.http_req*' last 1 1`. Find detailed examples with patterns in our [documentation](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md#dimension-patterns).
- **METRIC** The id of the metric as it comes from the client. You can easily find this in the private charts above, for example: `k6.http_req_connecting`.
- **NAME**: The name of the dimension. You can use the dictionary to expand this to something more human-readable.
- **TYPE**:
@@ -212,7 +212,7 @@ Following the above steps, we append to the `k6.conf` that we defined above, the
> Take note that Netdata will report the rate for metrics and counters, even if k6 or another application sends an _absolute_ number. For example, k6 sends absolute HTTP requests with `http_reqs`, but Netdat visualizes that in `requests/second`.
-To enable this StatsD configuration, [restart Netdata](/docs/configure/start-stop-restart.md).
+To enable this StatsD configuration, [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
## Final touches
@@ -293,6 +293,6 @@ Netdata allows you easily visualize any StatsD metric without any configuration,
### Related reference documentation
-- [Netdata Agent · StatsD](/collectors/statsd.plugin/README.md)
+- [Netdata Agent · StatsD](https://github.com/netdata/netdata/blob/master/collectors/statsd.plugin/README.md)
diff --git a/docs/guides/monitor/stop-notifications-alarms.md b/docs/guides/monitor/stop-notifications-alarms.md
index a8b73a86a..3c026a89b 100644
--- a/docs/guides/monitor/stop-notifications-alarms.md
+++ b/docs/guides/monitor/stop-notifications-alarms.md
@@ -13,7 +13,7 @@ relevant if you run Netdata on your laptop or a small virtual server. If they're
to real issues with health and performance.
Silencing individual alarms is an excellent solution for situations where you're not interested in seeing a specific
-alarm but don't want to disable a [notification system](/health/notifications/README.md) entirely.
+alarm but don't want to disable a [notification system](https://github.com/netdata/netdata/blob/master/health/notifications/README.md) entirely.
## Find the alarm configuration file
@@ -34,7 +34,7 @@ In the `source` row, you see that this chart is getting its configuration from
the file you need to edit if you want to silence this alarm.
For more information about editing or referencing health configuration files on your system, see the [health
-quickstart](/health/QUICKSTART.md#edit-health-configuration-files).
+quickstart](https://github.com/netdata/netdata/blob/master/health/QUICKSTART.md#edit-health-configuration-files).
## Edit the file to enable silencing
@@ -70,7 +70,7 @@ To silence this alarm, change `sysadmin` to `silent`.
to: silent
```
-Use one of the available [methods](/health/QUICKSTART.md#reload-health-configuration) to reload your health configuration
+Use one of the available [methods](https://github.com/netdata/netdata/blob/master/health/QUICKSTART.md#reload-health-configuration) to reload your health configuration
and ensure you get no more notifications about that alarm**.
You can add `to: silent` to any alarm you'd rather not bother you with notifications.
@@ -80,12 +80,12 @@ You can add `to: silent` to any alarm you'd rather not bother you with notificat
You should now know the fundamentals behind silencing any individual alarm in Netdata.
To learn about _all_ of Netdata's health configuration possibilities, visit the [health reference
-guide](/health/REFERENCE.md), or check out other [tutorials on health monitoring](/health/README.md#guides).
+guide](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md), or check out other [tutorials on health monitoring](https://github.com/netdata/netdata/blob/master/health/README.md#guides).
Or, take better control over how you get notified about alarms via the [notification
-system](/health/notifications/README.md).
+system](https://github.com/netdata/netdata/blob/master/health/notifications/README.md).
-You can also use Netdata's [Health Management API](/web/api/health/README.md#health-management-api) to control health
+You can also use Netdata's [Health Management API](https://github.com/netdata/netdata/blob/master/web/api/health/README.md#health-management-api) to control health
checks and notifications while Netdata runs. With this API, you can disable health checks during a maintenance window or
backup process, for example.
diff --git a/docs/guides/monitor/visualize-monitor-anomalies.md b/docs/guides/monitor/visualize-monitor-anomalies.md
index 1f8c2c8f8..90ce20a4b 100644
--- a/docs/guides/monitor/visualize-monitor-anomalies.md
+++ b/docs/guides/monitor/visualize-monitor-anomalies.md
@@ -10,7 +10,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/moni
Welcome to part 2 of our series of guides on using _unsupervised anomaly detection_ to detect issues with your systems,
containers, and applications using the open-source Netdata Agent. For an introduction to detecting anomalies and
-monitoring associated metrics, see [part 1](/docs/guides/monitor/anomaly-detection-python.md), which covers prerequisites and
+monitoring associated metrics, see [part 1](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md), which covers prerequisites and
configuration basics.
With anomaly detection in the Netdata Agent set up, you will now want to visualize and monitor which charts have
@@ -48,8 +48,8 @@ analysis (RCA).
The anomalies collector creates two "classes" of alarms for each chart captured by the `charts_regex` setting. All these
alarms are preconfigured based on your [configuration in
-`anomalies.conf`](/docs/guides/monitor/anomaly-detection-python.md#configure-the-anomalies-collector). With the `charts_regex`
-and `charts_to_exclude` settings from [part 1](/docs/guides/monitor/anomaly-detection-python.md) of this guide series, the
+`anomalies.conf`](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md#configure-the-anomalies-collector). With the `charts_regex`
+and `charts_to_exclude` settings from [part 1](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md) of this guide series, the
Netdata Agent creates 32 alarms driven by unsupervised anomaly detection.
The first class triggers warning alarms when the average anomaly probability for a given chart has stayed above 50% for
@@ -69,17 +69,17 @@ there's a full-blown incident, depending on what application/service you're usin
further investigation.
As you use the anomalies collector, you may find that the default settings provide too many or too few genuine alarms.
-In this case, [configure the alarm](/docs/monitor/configure-alarms.md) with `sudo ./edit-config
+In this case, [configure the alarm](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) with `sudo ./edit-config
health.d/anomalies.conf`. Take a look at the `lookup` line syntax in the [health
-reference](/health/REFERENCE.md#alarm-line-lookup) to understand how the anomalies collector automatically creates
+reference](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-lookup) to understand how the anomalies collector automatically creates
alarms for any dimension on the `anomalies_local.probability` and `anomalies_local.anomaly` charts.
## Visualize anomalies in charts
In either [Netdata Cloud](https://app.netdata.cloud) or the local Agent dashboard at `http://NODE:19999`, click on the
-**Anomalies** [section](/web/gui/README.md#sections) to see the pair of anomaly detection charts, which are
+**Anomalies** [section](https://github.com/netdata/netdata/blob/master/web/gui/README.md#sections) to see the pair of anomaly detection charts, which are
preconfigured to visualize per-second anomaly metrics based on your [configuration in
-`anomalies.conf`](/docs/guides/monitor/anomaly-detection-python.md#configure-the-anomalies-collector).
+`anomalies.conf`](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md#configure-the-anomalies-collector).
These charts have the contexts `anomalies.probability` and `anomalies.anomaly`. Together, these charts
create meaningful visualizations for immediately recognizing not only that something is going wrong on your node, but
@@ -88,7 +88,7 @@ give context as to where to look next.
The `anomalies_local.probability` chart shows the probability that the latest observed data is anomalous, based on the
trained model. The `anomalies_local.anomaly` chart visualizes 0→1 predictions based on whether the latest observed
data is anomalous based on the trained model. Both charts share the same dimensions, which you configured via
-`charts_regex` and `charts_to_exclude` in [part 1](/docs/guides/monitor/anomaly-detection-python.md).
+`charts_regex` and `charts_to_exclude` in [part 1](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md).
In other words, the `probability` chart shows the amplitude of the anomaly, whereas the `anomaly` chart provides quick
yes/no context.
@@ -108,7 +108,7 @@ dimensions that immediately shot to 100% anomaly probability, and remained there
## Build an anomaly detection dashboard
[Netdata Cloud](https://app.netdata.cloud) features a drag-and-drop [dashboard
-editor](/docs/visualize/create-dashboards.md) that helps you create entirely new dashboards with charts targeted for
+editor](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) that helps you create entirely new dashboards with charts targeted for
your specific applications.
For example, here's a dashboard designed for visualizing anomalies present in an Nginx web server, including
@@ -119,12 +119,12 @@ dashboard](https://user-images.githubusercontent.com/1153921/104226915-c6188f00-
Use the anomaly charts for instant visual identification of potential anomalies, and then Nginx-specific charts, in the
right column, to validate whether the probability and anomaly counters are showing a valid incident worth further
-investigation using [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations) to narrow
+investigation using [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) to narrow
the dashboard into only the charts relevant to what you're seeing from the anomalies collector.
## What's next?
-Between this guide and [part 1](/docs/guides/monitor/anomaly-detection-python.md), which covered setup and configuration, you
+Between this guide and [part 1](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/anomaly-detection-python.md), which covered setup and configuration, you
now have a fundamental understanding of how unsupervised anomaly detection in Netdata works, from root cause to alarms
to preconfigured or custom dashboards.
@@ -132,11 +132,11 @@ We'd love to hear your feedback on the anomalies collector. Hop over to the [com
forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767), and let us know if you're already getting value from
unsupervised anomaly detection, or would like to see something added to it. You might even post a custom configuration
that works well for monitoring some other popular application, like MySQL, PostgreSQL, Redis, or anything else we
-[support through collectors](/collectors/COLLECTORS.md).
+[support through collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md).
### Related reference documentation
-- [Netdata Agent · Anomalies collector](/collectors/python.d.plugin/anomalies/README.md)
-- [Netdata Cloud · Build new dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards)
+- [Netdata Agent · Anomalies collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md)
+- [Netdata Cloud · Build new dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md)
diff --git a/docs/guides/python-collector.md b/docs/guides/python-collector.md
index 920b9b9ef..e0e7a6041 100644
--- a/docs/guides/python-collector.md
+++ b/docs/guides/python-collector.md
@@ -10,9 +10,9 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/pyth
# Develop a custom data collector in Python
-The Netdata Agent uses [data collectors](/docs/collect/how-collectors-work.md) to fetch metrics from hundreds of system,
+The Netdata Agent uses [data collectors](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md) to fetch metrics from hundreds of system,
container, and service endpoints. While the Netdata team and community has built [powerful
-collectors](/collectors/COLLECTORS.md) for most system, container, and service/application endpoints, there are plenty
+collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) for most system, container, and service/application endpoints, there are plenty
of custom applications that can't be monitored by default.
## Problem
@@ -29,7 +29,7 @@ covered here, or use the included examples for collecting and organizing either
## What you need to get started
- A physical or virtual Linux system, which we'll call a _node_.
-- A working installation of the free and open-source [Netdata](/docs/get-started.mdx) monitoring agent.
+- A working installation of the free and open-source [Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) monitoring agent.
## Jobs and elements of a Python collector
@@ -90,7 +90,7 @@ context, charttype]`, where:
that is `A.B`, with `A` being the name of the collector, and `B` being the name of the specific metric.
- `charttype`: Either `line`, `area`, or `stacked`. If null line is the default value.
-You can read more about `family` and `context` in the [web dashboard](/web/README.md#families) doc.
+You can read more about `family` and `context` in the [web dashboard](https://github.com/netdata/netdata/blob/master/web/README.md#families) doc.
Once the chart has been defined, you should define the dimensions of the chart. Dimensions are basically the metrics to
be represented in this chart and each chart can have more than one dimension. In order to define the dimensions, the
@@ -166,7 +166,7 @@ class Service(UrlService):
In our use-case, we use the `SimpleService` framework, since there is no framework class that suits our needs.
-You can read more about the [framework classes](/collectors/python.d.plugin/README.md#how-to-write-a-new-module) from
+You can read more about the [framework classes](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) from
the Netdata documentation.
## An example collector using weather station data
@@ -348,7 +348,7 @@ ORDER = [
]
```
-[Restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see the new humidity
+[Restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see the new humidity
chart:
![A snapshot of the modified chart](https://i.imgur.com/XOeCBmg.png)
@@ -405,7 +405,7 @@ ORDER = [
]
```
-[Restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see the new
+[Restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see the new
min/max/average temperature chart with multiple dimensions:
![A snapshot of the modified chart](https://i.imgur.com/g7E8lnG.png)
@@ -459,7 +459,7 @@ variables and inform the user about the defaults. For example, take a look at th
[GitHub](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/example/example.conf).
You can read more about the configuration file on the [`python.d.plugin`
-documentation](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin).
+documentation](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md).
## What's next?
@@ -470,7 +470,7 @@ Now you are ready to start developing our Netdata python Collector and share it
- If you need help while developing your collector, join our [Netdata
Community](https://community.netdata.cloud/c/agent-development/9) to chat about it.
- Follow the
- [checklist](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin#pull-request-checklist-for-python-plugins)
+ [checklist](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#pull-request-checklist-for-python-plugins)
to contribute the collector to the Netdata Agent [repository](https://github.com/netdata/netdata).
- Check out the [example](https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/example) Python
collector, which is a minimal example collector you could also use as a starting point. Once comfortable with that,
diff --git a/docs/guides/step-by-step/step-00.md b/docs/guides/step-by-step/step-00.md
index 9f0fecac8..2f83ee9b4 100644
--- a/docs/guides/step-by-step/step-00.md
+++ b/docs/guides/step-by-step/step-00.md
@@ -18,7 +18,7 @@ completely new to Netdata, or have never tried health monitoring/performance tro
guide is perfect for you.
If you have monitoring experience, or would rather get straight into configuring Netdata to your needs, you can jump
-straight into code and configurations with our [getting started guide](/docs/get-started.mdx).
+straight into code and configurations with our [getting started guide](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx).
> This guide contains instructions for Netdata installed on a Linux system. Many of the instructions will work on
> other supported operating systems, like FreeBSD and macOS, but we can't make any guarantees.
@@ -44,7 +44,7 @@ The easiest way to install Netdata on a Linux system is our `kickstart.sh` one-l
and let it take care of the rest.
This script will install Netdata from source, keep it up to date with nightly releases, connects to the Netdata
-[registry](/registry/README.md), and sends [_anonymous statistics_](/docs/anonymous-statistics.md) about how you use
+[registry](https://github.com/netdata/netdata/blob/master/registry/README.md), and sends [_anonymous statistics_](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md) about how you use
Netdata. We use this information to better understand how we can improve the Netdata experience for all our users.
To install Netdata, run the following as your normal user:
@@ -60,7 +60,7 @@ Once finished, you'll have Netdata installed, and you'll be set up to get _night
improvements, and bugfixes.
If this method doesn't work for you, or you want to use a different process, visit our [installation
-documentation](/packaging/installer/README.md) for details.
+documentation](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md) for details.
## Netdata fundamentals
diff --git a/docs/guides/step-by-step/step-01.md b/docs/guides/step-by-step/step-01.md
index f5430e3a6..e60bb0769 100644
--- a/docs/guides/step-by-step/step-01.md
+++ b/docs/guides/step-by-step/step-01.md
@@ -139,7 +139,7 @@ easy!
We'll cover this quickly, as you're probably eager to get on with using Netdata itself.
We don't want to lock you in to using Netdata by itself, and forever. By supporting [archiving to
-external databases](/exporting/README.md) like Graphite, Prometheus, OpenTSDB, MongoDB, and others, you can use Netdata _in
+external databases](https://github.com/netdata/netdata/blob/master/exporting/README.md) like Graphite, Prometheus, OpenTSDB, MongoDB, and others, you can use Netdata _in
conjunction_ with software that might seem like our competitors.
We don't want to "wage war" with another monitoring solution, whether it's commercial, open-source, or anything in
diff --git a/docs/guides/step-by-step/step-02.md b/docs/guides/step-by-step/step-02.md
index 4b802ffd6..535f3cfa3 100644
--- a/docs/guides/step-by-step/step-02.md
+++ b/docs/guides/step-by-step/step-02.md
@@ -11,7 +11,7 @@ working with the dashboard directly.
This step-by-step guide assumes you've already installed Netdata on a system of yours. If you haven't yet, hop back over
to ["step 0"](step-00.md#before-we-get-started) for information about our one-line installer script. Or, view the
-[installation docs](/packaging/installer/README.md) to learn more. Once you have Netdata installed, you can hop back
+[installation docs](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md) to learn more. Once you have Netdata installed, you can hop back
over here and dig in.
## What you'll learn in this step
@@ -56,7 +56,7 @@ what it's collecting. If you run Netdata on many different systems using differe
menus and submenus may look a little different for each one.
To learn more about menus, see our documentation about [navigating the standard
-dashboard](/web/gui/README.md#metrics-menus).
+dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md#metrics-menus).
> ❗ By default, Netdata only creates and displays charts if the metrics are _not zero_. So, you may be missing some
> charts, menus, and submenus if those charts have zero metrics. You can change this by changing the **Which dimensions
@@ -106,7 +106,7 @@ looking at its name or hovering over the chart's date.
It's important to understand these differences, as Netdata uses charts, dimensions, families, and contexts to create
health alarms and configure collectors. To read even more about the differences between all these elements of the
dashboard, and how they affect other parts of Netdata, read our [dashboards
-documentation](/web/README.md#charts-contexts-families).
+documentation](https://github.com/netdata/netdata/blob/master/web/README.md#charts-contexts-families).
## Interact with charts
@@ -148,7 +148,7 @@ chart to its original height, double-click the same icon.
![Animated GIF of resizing a chart and resetting it to the default
height](https://user-images.githubusercontent.com/1153921/80842459-7d41e280-8bb6-11ea-9488-1bc29f94d7f2.gif)
-To learn more about other options and chart interactivity, read our [dashboard documentation](/web/README.md).
+To learn more about other options and chart interactivity, read our [dashboard documentation](https://github.com/netdata/netdata/blob/master/web/README.md).
## See raised alarms and the alarm log
diff --git a/docs/guides/step-by-step/step-03.md b/docs/guides/step-by-step/step-03.md
index c1d283ba0..3204765b4 100644
--- a/docs/guides/step-by-step/step-03.md
+++ b/docs/guides/step-by-step/step-03.md
@@ -14,7 +14,7 @@ You might be thinking, "So, now I have to remember all these IP addresses, and t
manually, to move from one system to another? Maybe I should just make a bunch of bookmarks. What's a few more tabs
on top of the hundred I have already?"
-We get it. That's why we built [Netdata Cloud](https://learn.netdata.cloud/docs/cloud/), which connects many distributed
+We get it. That's why we built [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx), which connects many distributed
agents for a seamless experience when monitoring an entire infrastructure of Netdata-monitored nodes.
![Animated GIF of Netdata
@@ -24,13 +24,16 @@ Cloud](https://user-images.githubusercontent.com/1153921/80828986-1ebb3b00-8b9b-
In this step of the Netdata guide, we'll talk about the following:
-- [Why you should use Netdata Cloud](#why-use-netdata-cloud)
-- [Get started with Netdata Cloud](#get-started-with-netdata-cloud)
-- [Navigate between dashboards with Visited Nodes](#navigate-between-dashboards-with-visited-nodes)
+- [Step 3. Monitor more than one system with Netdata](#step-3-monitor-more-than-one-system-with-netdata)
+ - [What you'll learn in this step](#what-youll-learn-in-this-step)
+ - [Why use Netdata Cloud?](#why-use-netdata-cloud)
+ - [Get started with Netdata Cloud](#get-started-with-netdata-cloud)
+ - [Navigate between dashboards with Visited Nodes](#navigate-between-dashboards-with-visited-nodes)
+ - [What's next?](#whats-next)
## Why use Netdata Cloud?
-Our [Cloud documentation](https://learn.netdata.cloud/docs/cloud/) does a good job (we think!) of explaining why Cloud
+Our [Cloud documentation](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx) does a good job (we think!) of explaining why Cloud
gives you a ton of value at no cost:
> Netdata Cloud gives you real-time visibility for your entire infrastructure. With Netdata Cloud, you can run all your
@@ -44,7 +47,7 @@ features, new collectors for more applications, and improved UI, so will Cloud.
## Get started with Netdata Cloud
Signing in, onboarding, and connecting your first nodes only takes a few minutes, and we have a [Get started with
-Cloud](https://learn.netdata.cloud/docs/cloud/get-started) guide to help you walk through every step.
+Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx) guide to help you walk through every step.
Or, if you're feeling confident, dive right in.
diff --git a/docs/guides/step-by-step/step-04.md b/docs/guides/step-by-step/step-04.md
index 37b4245be..fcd84ce6a 100644
--- a/docs/guides/step-by-step/step-04.md
+++ b/docs/guides/step-by-step/step-04.md
@@ -43,7 +43,7 @@ In the system represented by the screenshot, the line reads: `config directory =
`netdata.conf`, and all the other configuration files, can be found at `/etc/netdata`.
> For more details on where your Netdata config directory is, take a look at our [installation
-> instructions](/packaging/installer/README.md).
+> instructions](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md).
For the rest of this guide, we'll assume you're editing files or running scripts from _within_ your **Netdata
configuration directory**.
@@ -96,7 +96,7 @@ section and give it the value of `1`.
```
Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
Now, open up your browser and navigate to `http://HOST:19999/netdata.conf`. You'll see that Netdata has recognized
that our fake option isn't valid and added a notice that Netdata will ignore it.
@@ -124,8 +124,8 @@ Once you're done, restart Netdata and refresh the dashboard. Say hello to your r
netdata.conf](https://user-images.githubusercontent.com/1153921/80994808-1c065300-8df2-11ea-81af-d28dc3ba27c8.gif)
Netdata has dozens upon dozens of options you can change. To see them all, read our [daemon
-configuration](/daemon/config/README.md), or hop into our popular guide on [increasing long-term metrics
-storage](/docs/guides/longer-metrics-storage.md).
+configuration](https://github.com/netdata/netdata/blob/master/daemon/config/README.md), or hop into our popular guide on [increasing long-term metrics
+storage](https://github.com/netdata/netdata/blob/master/docs/guides/longer-metrics-storage.md).
## What's next?
diff --git a/docs/guides/step-by-step/step-05.md b/docs/guides/step-by-step/step-05.md
index 3cd8c5dbc..3ef498d40 100644
--- a/docs/guides/step-by-step/step-05.md
+++ b/docs/guides/step-by-step/step-05.md
@@ -32,8 +32,7 @@ The first chart you see on any Netdata dashboard is the `system.cpu` chart, whic
across all cores. To figure out which file you need to edit to tune this alarm, click the **Alarms** button at the top
of the dashboard, click on the **All** tab, and find the **system - cpu** alarm entity.
-![The system - cpu alarm
-entity](https://user-images.githubusercontent.com/1153921/67034648-ebb4cc80-f0cc-11e9-9d49-1023629924f5.png)
+![The system - cpu alarm entity](https://user-images.githubusercontent.com/1153921/67034648-ebb4cc80-f0cc-11e9-9d49-1023629924f5.png)
Look at the `source` row in the table. This means the `system.cpu` chart sources its health alarms from
`4@/usr/lib/netdata/conf.d/health.d/cpu.conf`. To tune these alarms, you'll need to edit the alarm file at
@@ -70,10 +69,10 @@ the `warn` and `crit` lines to the values of your choosing. For example:
```
You _can_ restart Netdata with `sudo systemctl restart netdata`, to enable your tune, but you can also reload _only_ the
-health monitoring component using one of the available [methods](/health/QUICKSTART.md#reload-health-configuration).
+health monitoring component using one of the available [methods](https://github.com/netdata/netdata/blob/master/health/QUICKSTART.md#reload-health-configuration).
You can also tune any other aspect of the default alarms. To better understand how each line in a health entity works,
-read our [health documentation](/health/README.md).
+read our [health documentation](https://github.com/netdata/netdata/blob/master/health/README.md).
### Silence an individual alarm
@@ -176,7 +175,7 @@ These lines will trigger a warning if that average RAM usage goes above 80%, and
> ❗ Most default Netdata alarms come with more complicated `warn` and `crit` lines. You may have noticed the line `warn:
> $this > (($status >= $WARNING) ? (75) : (85))` in one of the health entity examples above, which is an example of
-> using the [conditional operator for hysteresis](/health/REFERENCE.md#special-use-of-the-conditional-operator).
+> using the [conditional operator for hysteresis](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#special-use-of-the-conditional-operator).
> Hysteresis is used to keep Netdata from triggering a ton of alerts if the metric being tracked quickly goes above and
> then falls below the threshold. For this very simple example, we'll skip hysteresis, but recommend implementing it in
> your future health entities.
@@ -215,7 +214,7 @@ stress -m 1 --vm-bytes 8G --vm-keep
```
Netdata is capable of understanding much more complicated entities. To better understand how they work, read the [health
-documentation](/health/README.md), look at some [examples](/health/REFERENCE.md#example-alarms), and open the files
+documentation](https://github.com/netdata/netdata/blob/master/health/README.md), look at some [examples](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#example-alarms), and open the files
containing the default entities on your system.
## Enable Netdata's notification systems
@@ -224,7 +223,7 @@ Health alarms, while great on their own, are pretty useless without some way of
That's why Netdata comes with a notification system that supports more than a dozen services, such as email, Slack,
Discord, PagerDuty, Twilio, Amazon SNS, and much more.
-To see all the supported systems, visit our [notifications documentation](/health/notifications/README.md).
+To see all the supported systems, visit our [notifications documentation](https://github.com/netdata/netdata/blob/master/health/notifications/README.md).
We'll cover email and Slack notifications here, but with this knowledge you should be able to enable any other type of
notifications instead of or in addition to these.
@@ -330,9 +329,9 @@ applications.
To further configure your email or Slack notification setup, or to enable other notification systems, check out the
following documentation:
-- [Email notifications](/health/notifications/email/README.md)
-- [Slack notifications](/health/notifications/slack/README.md)
-- [Netdata's notification system](/health/notifications/README.md)
+- [Email notifications](https://github.com/netdata/netdata/blob/master/health/notifications/email/README.md)
+- [Slack notifications](https://github.com/netdata/netdata/blob/master/health/notifications/slack/README.md)
+- [Netdata's notification system](https://github.com/netdata/netdata/blob/master/health/notifications/README.md)
## What's next?
diff --git a/docs/guides/step-by-step/step-06.md b/docs/guides/step-by-step/step-06.md
index f04098fc1..b951a76bb 100644
--- a/docs/guides/step-by-step/step-06.md
+++ b/docs/guides/step-by-step/step-06.md
@@ -8,13 +8,13 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step
When Netdata _starts_, it auto-detects dozens of **data sources**, such as database servers, web servers, and more.
To auto-detect and collect metrics from a source you just installed, you need to restart Netdata using `sudo systemctl
-restart netdata`, or the [appropriate method](/docs/configure/start-stop-restart.md) for your system.
+restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
However, auto-detection only works if you installed the source using its standard installation
procedure. If Netdata isn't collecting metrics after a restart, your source probably isn't configured
correctly.
-Check out the [collectors that come pre-installed with Netdata](/collectors/COLLECTORS.md) to find the module for the
+Check out the [collectors that come pre-installed with Netdata](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) to find the module for the
source you want to monitor.
## What you'll learn in this step
@@ -37,8 +37,8 @@ are organized and manged by plugins. **Internal** plugins collect system metrics
non-system metrics, and **orchestrator** plugins group individual collectors together based on the programming language
they were built in.
-These modules are primarily written in [Go](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/) (`go.d`) and
-[Python](/collectors/python.d.plugin/README.md), although some use [Bash](/collectors/charts.d.plugin/README.md)
+These modules are primarily written in [Go](https://github.com/netdata/go.d.plugin/blob/master/README.md) (`go.d`) and
+[Python](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md), although some use [Bash](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/README.md)
(`charts.d`).
## Enable and disable plugins
@@ -100,7 +100,7 @@ Next, edit your `/etc/nginx/sites-enabled/default` file to include a `location`
```
Restart Netdata using `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, and Netdata will auto-detect metrics from your Nginx web
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, and Netdata will auto-detect metrics from your Nginx web
server!
While not necessary for most auto-detection and collection purposes, you can also configure the Nginx collector itself
diff --git a/docs/guides/step-by-step/step-07.md b/docs/guides/step-by-step/step-07.md
index 17a02cd46..8c5c21bee 100644
--- a/docs/guides/step-by-step/step-07.md
+++ b/docs/guides/step-by-step/step-07.md
@@ -9,7 +9,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step
Welcome to the seventh step of the Netdata guide!
This step of the guide aims to get you more familiar with the features of the dashboard not previously mentioned in
-[step 2](/docs/guides/step-by-step/step-02.md).
+[step 2](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-02.md).
## What you'll learn in this step
@@ -53,9 +53,9 @@ You can always check if there is an update available from the **Update** area of
If an update is available, you'll see a modal similar to the one above.
-When you use the [automatic one-line installer script](/packaging/installer/README.md) attempt to update every day. If
-you choose to update it manually, there are [several well-documented methods](/packaging/installer/UPDATE.md) to achieve
-that. However, it is best practice for you to first go over the [changelog](/CHANGELOG.md).
+When you use the [automatic one-line installer script](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md) attempt to update every day. If
+you choose to update it manually, there are [several well-documented methods](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md) to achieve
+that. However, it is best practice for you to first go over the [changelog](https://github.com/netdata/netdata/blob/master/CHANGELOG.md).
## Export and import a snapshot
diff --git a/docs/guides/step-by-step/step-08.md b/docs/guides/step-by-step/step-08.md
index e9c0f902c..7a8d417f1 100644
--- a/docs/guides/step-by-step/step-08.md
+++ b/docs/guides/step-by-step/step-08.md
@@ -145,7 +145,7 @@ charts on a single page.
### The chart unique ID (required)
You need to specify the unique ID of a chart to show it on your custom dashboard. If you forgot how to find the unique
-ID, head back over to [step 2](/docs/guides/step-by-step/step-02.md#understand-charts-dimensions-families-and-contexts)
+ID, head back over to [step 2](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-02.md#understand-charts-dimensions-families-and-contexts)
for a re-introduction.
You can then put this unique ID into a `` element with the `data-netdata` attribute. Put this in the `` of
@@ -385,11 +385,11 @@ In this guide, you learned the fundamentals of building a custom Netdata dashboa
charts to your `custom-dashboard.html`, change the charts that are already there, and size them according to your needs.
Of course, the custom dashboarding features covered here are just the beginning. Be sure to read up on our [custom
-dashboard documentation](/web/gui/custom/README.md) for details on how you can use other chart libraries, pull metrics
+dashboard documentation](https://github.com/netdata/netdata/blob/master/web/gui/custom/README.md) for details on how you can use other chart libraries, pull metrics
from multiple Netdata agents, and choose which dimensions a given chart shows.
Next, you'll learn how to store long-term historical metrics in Netdata!
-[Next: Long-term metrics storage →](/docs/guides/step-by-step/step-09.md)
+[Next: Long-term metrics storage →](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-09.md)
diff --git a/docs/guides/step-by-step/step-09.md b/docs/guides/step-by-step/step-09.md
index 8aacd7514..839115a27 100644
--- a/docs/guides/step-by-step/step-09.md
+++ b/docs/guides/step-by-step/step-09.md
@@ -5,7 +5,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/step
# Step 9. Long-term metrics storage
-By default, Netdata stores metrics in a custom database we call the [database engine](/database/engine/README.md), which
+By default, Netdata stores metrics in a custom database we call the [database engine](https://github.com/netdata/netdata/blob/master/database/engine/README.md), which
stores recent metrics in your system's RAM and "spills" historical metrics to disk. By using both RAM and disk, the
database engine helps you store a much larger dataset than the amount of RAM your system has.
@@ -51,7 +51,7 @@ the database engine to use. The higher those values, the more metrics Netdata wi
512, respectively, the database engine should store about four day's worth of data on a system collecting 2,000 metrics
every second.
-[**See our database engine calculator**](/docs/store/change-metrics-storage.md) to help you correctly set `dbengine disk
+[**See our database engine calculator**](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) to help you correctly set `dbengine disk
space` based on your needs. The calculator gives an accurate estimate based on how many child nodes you have, how many
metrics your Agent collects, and more.
@@ -63,7 +63,7 @@ metrics your Agent collects, and more.
```
After you've made your changes, restart Netdata using `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
To confirm the database engine is working, go to your Netdata dashboard and click on the **Netdata Monitoring** menu on
the right-hand side. You can find `dbengine` metrics after `queries`.
@@ -77,7 +77,7 @@ You can archive all the metrics collected by Netdata to **external databases**.
include Graphite, OpenTSDB, Prometheus, AWS Kinesis Data Streams, Google Cloud Pub/Sub, MongoDB, and the list is always
growing.
-As we said in [step 1](/docs/guides/step-by-step/step-01.md), we have only complimentary systems, not competitors! We're
+As we said in [step 1](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-01.md), we have only complimentary systems, not competitors! We're
happy to support these archiving methods and are always working to improve them.
A lot of Netdata users archive their metrics to one of these databases for long-term storage or further analysis. Since
@@ -117,7 +117,7 @@ use netdata
db.createCollection("netdata_metrics")
```
-Next, Netdata needs to be [reinstalled](/packaging/installer/REINSTALL.md) in order to detect that the required
+Next, Netdata needs to be [reinstalled](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md) in order to detect that the required
libraries to make this exporting connection exist. Since you most likely installed Netdata using the one-line installer
script, all you have to do is run that script again. Don't worry—any configuration changes you made along the way will
be retained!
@@ -140,14 +140,14 @@ Add the following section to the file:
```
Restart Netdata using `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to enable the MongoDB exporting connector. Click on the
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to enable the MongoDB exporting connector. Click on the
**Netdata Monitoring** menu and check out the **exporting my mongo instance** sub-menu. You should start seeing these
charts fill up with data about the exporting process!
![image](https://user-images.githubusercontent.com/1153921/70443852-25171200-1a56-11ea-8be3-494544b1c295.png)
If you'd like to try connecting Netdata to another database, such as Prometheus or OpenTSDB, read our [exporting
-documentation](/exporting/README.md).
+documentation](https://github.com/netdata/netdata/blob/master/exporting/README.md).
## What's next?
@@ -157,6 +157,6 @@ metrics to MongoDB for long-term storage.
In the last step of this step-by-step guide, we'll put our sysadmin hat on and use Nginx to proxy traffic to and from
our Netdata dashboard.
-[Next: Set up a proxy →](/docs/guides/step-by-step/step-10.md)
+[Next: Set up a proxy →](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-10.md)
diff --git a/docs/guides/step-by-step/step-10.md b/docs/guides/step-by-step/step-10.md
index c9acf5aaf..a24e803f7 100644
--- a/docs/guides/step-by-step/step-10.md
+++ b/docs/guides/step-by-step/step-10.md
@@ -219,9 +219,9 @@ You're a real sysadmin now!
If you want to configure your Nginx proxy further, check out the following:
-- [Running Netdata behind Nginx](/docs/Running-behind-nginx.md)
-- [How to optimize Netdata's performance](/docs/guides/configure/performance.md)
-- [Enabling TLS on Netdata's dashboard](/web/server/README.md#enabling-tls-support)
+- [Running Netdata behind Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
+- [How to optimize Netdata's performance](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md)
+- [Enabling TLS on Netdata's dashboard](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support)
And... you're _almost_ done with the Netdata guide.
diff --git a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md
index 3ebca5425..c79a038cc 100644
--- a/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md
+++ b/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md
@@ -9,7 +9,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/guides/trou
When trying to troubleshoot or debug a finicky application, there's no such thing as too much information. At Netdata,
we developed programs that connect to the [_extended Berkeley Packet Filter_ (eBPF) virtual
-machine](/collectors/ebpf.plugin/README.md) to help you see exactly how specific applications are interacting with the
+machine](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md) to help you see exactly how specific applications are interacting with the
Linux kernel. With these charts, you can root out bugs, discover optimizations, diagnose memory leaks, and much more.
This means you can see exactly how often, and in what volume, the application creates processes, opens files, writes to
@@ -26,7 +26,7 @@ To start troubleshooting an application with eBPF metrics, you need to ensure yo
displays those metrics independent from any other process.
You can use the `apps_groups.conf` file to configure which applications appear in charts generated by
-[`apps.plugin`](/collectors/apps.plugin/README.md). Once you edit this file and create a new group for the application
+[`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md). Once you edit this file and create a new group for the application
you want to monitor, you can see how it's interacting with the Linux kernel via real-time eBPF metrics.
Let's assume you have an application that runs on the process `custom-app`. To monitor eBPF metrics for that application
@@ -58,12 +58,12 @@ dev: custom-app
```
Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to begin seeing metrics for this particular
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to begin seeing metrics for this particular
group+process. You can also add additional processes to the same group.
You can set up `apps_groups.conf` to more show more precise eBPF metrics for any application or service running on your
system, even if it's a standard package like Redis, Apache, or any other [application/service Netdata collects
-from](/collectors/COLLECTORS.md).
+from](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md).
```conf
# -----------------------------------------------------------------------------
@@ -107,7 +107,7 @@ Replace `entry` with `return`:
```
Restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Get familiar with per-application eBPF metrics and charts
@@ -119,7 +119,7 @@ Pay particular attention to the charts in the **ebpf file**, **ebpf syscall**, *
sub-sections. These charts are populated by low-level Linux kernel metrics thanks to eBPF, and showcase the volume of
calls to open/close files, call functions like `do_fork`, IO activity on the VFS, and much more.
-See the [eBPF collector documentation](/collectors/ebpf.plugin/README.md#integration-with-appsplugin) for the full list
+See the [eBPF collector documentation](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md#integration-with-appsplugin) for the full list
of per-application charts.
Let's show some examples of how you can first identify normal eBPF patterns, then use that knowledge to identify
@@ -236,17 +236,17 @@ same application on multiple systems and want to correlate how it performs on ea
findings with someone else on your team.
If you don't already have a Netdata Cloud account, go [sign in](https://app.netdata.cloud) and get started for free.
-Read the [get started with Cloud guide](https://learn.netdata.cloud/docs/cloud/get-started) for a walkthrough of
+Read the [get started with Cloud guide](https://github.com/netdata/netdata/blob/master/docs/cloud/get-started.mdx) for a walkthrough of
connecting nodes to and other fundamentals.
Once you've added one or more nodes to a Space in Netdata Cloud, you can see aggregated eBPF metrics in the [Overview
-dashboard](/docs/visualize/overview-infrastructure.md) under the same **Applications** or **eBPF** sections that you
-find on the local Agent dashboard. Or, [create new dashboards](/docs/visualize/create-dashboards.md) using eBPF metrics
+dashboard](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) under the same **Applications** or **eBPF** sections that you
+find on the local Agent dashboard. Or, [create new dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) using eBPF metrics
from any number of distributed nodes to see how your application interacts with multiple Linux kernels on multiple Linux
systems.
Now that you can see eBPF metrics in Netdata Cloud, you can [invite your
-team](https://learn.netdata.cloud/docs/cloud/manage/invite-your-team) and share your findings with others.
+team](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md) and share your findings with others.
## What's next?
@@ -257,8 +257,8 @@ interacts with the Linux kernel.
If you're still trying to wrap your head around what we offer, be sure to read up on our accompanying documentation and
other resources on eBPF monitoring with Netdata:
-- [eBPF collector](/collectors/ebpf.plugin/README.md)
-- [eBPF's integration with `apps.plugin`](/collectors/apps.plugin/README.md#integration-with-ebpf)
+- [eBPF collector](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md)
+- [eBPF's integration with `apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md#integration-with-ebpf)
- [Linux eBPF monitoring with Netdata](https://www.netdata.cloud/blog/linux-ebpf-monitoring-with-netdata/)
The scenarios described above are just the beginning when it comes to troubleshooting with eBPF metrics. We're excited
diff --git a/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md b/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md
index 3bb5ace66..138182e01 100644
--- a/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md
+++ b/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md
@@ -51,7 +51,7 @@ and you must do it manually, using the following steps:
:::note
In some cases a simple restart of the Agent can fix the issue.
-Read more about [Starting, Stopping and Restarting the Agent](/docs/configure/start-stop-restart.md).
+Read more about [Starting, Stopping and Restarting the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
:::
@@ -59,7 +59,7 @@ Read more about [Starting, Stopping and Restarting the Agent](/docs/configure/st
Make sure that you are using the latest version of Netdata if you are using the [Claiming script](https://learn.netdata.cloud/docs/agent/claim#claiming-script).
-With the introduction of our new architecture, Agents running versions lower than `v1.32.0` can face claiming problems, so we recommend you [update the Netdata Agent](https://learn.netdata.cloud/docs/agent/packaging/installer/update) to the latest stable version.
+With the introduction of our new architecture, Agents running versions lower than `v1.32.0` can face claiming problems, so we recommend you [update the Netdata Agent](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md) to the latest stable version.
## Network issues while connecting to the Cloud
diff --git a/docs/guides/using-host-labels.md b/docs/guides/using-host-labels.md
index 7a5381e99..7937d589b 100644
--- a/docs/guides/using-host-labels.md
+++ b/docs/guides/using-host-labels.md
@@ -27,7 +27,7 @@ sudo ./edit-config netdata.conf
```
Create a new `[host labels]` section defining a new host label and its value for the system in question. Make sure not
-to violate any of the [host label naming rules](/docs/configure/common-changes.md#organize-nodes-with-host-labels).
+to violate any of the [host label naming rules](https://github.com/netdata/netdata/blob/master/docs/configure/common-changes.md#organize-nodes-with-host-labels).
```conf
[host labels]
@@ -101,9 +101,9 @@ child system. It's a vastly simplified way of accessing critical information abo
> ⚠️ Because automatic labels for child nodes are accessible via API calls, and contain sensitive information like
> kernel and operating system versions, you should secure streaming connections with SSL. See the [streaming
-> documentation](/streaming/README.md#securing-streaming-communications) for details. You may also want to use
-> [access lists](/web/server/README.md#access-lists) or [expose the API only to LAN/localhost
-> connections](/docs/netdata-security.md#expose-netdata-only-in-a-private-lan).
+> documentation](https://github.com/netdata/netdata/blob/master/streaming/README.md#securing-streaming-communications) for details. You may also want to use
+> [access lists](https://github.com/netdata/netdata/blob/master/web/server/README.md#access-lists) or [expose the API only to LAN/localhost
+> connections](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#expose-netdata-only-in-a-private-lan).
You can also use `_is_parent`, `_is_child`, and any other host labels in both health entities and metrics
exporting. Speaking of which...
@@ -154,11 +154,11 @@ Or when ephemeral Docker nodes are involved:
```
Of course, there are many more possibilities for intuitively organizing your systems with host labels. See the [health
-documentation](/health/REFERENCE.md#alarm-line-host-labels) for more details, and then get creative!
+documentation](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-host-labels) for more details, and then get creative!
## Host labels in metrics exporting
-If you have enabled any metrics exporting via our experimental [exporters](/exporting/README.md), any new host
+If you have enabled any metrics exporting via our experimental [exporters](https://github.com/netdata/netdata/blob/master/exporting/README.md), any new host
labels you created manually are sent to the destination database alongside metrics. You can change this behavior by
editing `exporting.conf`, and you can even send automatically-generated labels on with exported metrics.
@@ -183,7 +183,7 @@ send automatic labels = yes
```
By applying labels to exported metrics, you can more easily parse historical metrics with the labels applied. To learn
-more about exporting, read the [documentation](/exporting/README.md).
+more about exporting, read the [documentation](https://github.com/netdata/netdata/blob/master/exporting/README.md).
## What's next?
@@ -195,15 +195,15 @@ the Netdata team first kicked off this work.
It should be noted that while the Netdata dashboard does not expose either user-configured or automatic host labels, API
queries _do_ showcase this information. As always, we recommend you secure Netdata
-- [Expose Netdata only in a private LAN](/docs/netdata-security.md#expose-netdata-only-in-a-private-lan)
-- [Enable TLS/SSL for web/API requests](/web/server/README.md#enabling-tls-support)
+- [Expose Netdata only in a private LAN](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#expose-netdata-only-in-a-private-lan)
+- [Enable TLS/SSL for web/API requests](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support)
- Put Netdata behind a proxy
- [Use an authenticating web server in proxy
- mode](/docs/netdata-security.md#use-an-authenticating-web-server-in-proxy-mode)
- - [Nginx proxy](/docs/Running-behind-nginx.md)
- - [Apache proxy](/docs/Running-behind-apache.md)
- - [Lighttpd](/docs/Running-behind-lighttpd.md)
- - [Caddy](/docs/Running-behind-caddy.md)
+ mode](https://github.com/netdata/netdata/blob/master/docs/netdata-security.md#use-an-authenticating-web-server-in-proxy-mode)
+ - [Nginx proxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md)
+ - [Apache proxy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-apache.md)
+ - [Lighttpd](https://github.com/netdata/netdata/blob/master/docs/Running-behind-lighttpd.md)
+ - [Caddy](https://github.com/netdata/netdata/blob/master/docs/Running-behind-caddy.md)
If you have issues or questions around using host labels, don't hesitate to [file an
issue](https://github.com/netdata/netdata/issues/new?assignees=&labels=bug%2Cneeds+triage&template=BUG_REPORT.yml) on GitHub. We're
diff --git a/docs/metrics-storage-management/enable-streaming.mdx b/docs/metrics-storage-management/enable-streaming.mdx
index a737b07b6..3bcf19b40 100644
--- a/docs/metrics-storage-management/enable-streaming.mdx
+++ b/docs/metrics-storage-management/enable-streaming.mdx
@@ -1,8 +1,15 @@
---
title: "Enable streaming between nodes"
-description: "With metrics streaming enabled, you can not only replicate metrics data into a second database, but also view dashboards and trigger alarm notifications for multiple nodes in parallel."
-type: how-to
-custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/metrics-storage-management/enable-streaming.mdx
+description: >-
+ "With metrics streaming enabled, you can not only replicate metrics data
+ into a second database, but also view dashboards and trigger alarm notifications
+ for multiple nodes in parallel."
+type: "how-to"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.mdx"
+sidebar_label: "Enable streaming between nodes"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup"
---
# Enable streaming between nodes
@@ -13,7 +20,7 @@ parent node, and both nodes retain metrics in their own databases.
To configure replication, you need two nodes, each running Netdata. First you'll first enable streaming on your parent
node, then enable streaming on your child node. When you're finished, you'll be able to see the child node's metrics in
the parent node's dashboard, quickly switch between the two dashboards, and be able to serve [alarm
-notifications](/docs/monitor/enable-notifications.md) from either or both nodes.
+notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) from either or both nodes.
## Enable streaming on the parent node
@@ -24,8 +31,8 @@ itself while initiating a streaming connection. Copy that into a separate text f
> Find out how to [install `uuidgen`](https://command-not-found.com/uuidgen) on your node if you don't already have it.
-Next, open `stream.conf` using [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files)
-from within the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory).
+Next, open `stream.conf` using [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files)
+from within the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
```bash
cd /etc/netdata
@@ -49,7 +56,7 @@ simplified version of the configuration, minus the commented lines, looks like t
```
Save the file and close it, then restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Enable streaming on the child node
@@ -70,7 +77,7 @@ looks like the following:
```
Save the file and close it, then restart Netdata with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Enable TLS/SSL on streaming (optional)
@@ -90,7 +97,7 @@ sudo chown netdata:netdata /etc/netdata/ssl/cert.pem /etc/netdata/ssl/key.pem
Next, enforce TLS/SSL on the web server. Open `netdata.conf`, scroll down to the `[web]` section, and look for the `bind
to` setting. Add `^SSL=force` to turn on TLS/SSL. See the [web server
-reference](/web/server/README.md#enabling-tls-support) for other TLS/SSL options.
+reference](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) for other TLS/SSL options.
```conf
[web]
@@ -110,7 +117,7 @@ self-signed certificates.
```
Restart both the parent and child nodes with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system, to stream encrypted metrics using TLS/SSL.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to stream encrypted metrics using TLS/SSL.
## View streamed metrics in Netdata's dashboard
@@ -135,17 +142,17 @@ Now that you have a basic streaming setup with replication, you may want to twea
child database, disable the child dashboard, or enable SSL on the streaming connection between the parent and child.
See the [streaming reference
-doc](/docs/metrics-storage-management/reference-streaming.mdx#examples) for details about
+doc](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/reference-streaming.mdx#examples) for details about
other possible configurations.
When using Netdata's default TSDB (`dbengine`), the parent node maintains separate, parallel databases for itself and
every child node streaming to it. Each instance is sized identically based on the `dbengine multihost disk space`
-setting in `netdata.conf`. See our doc on [changing metrics retention](/docs/store/change-metrics-storage.md) for
+setting in `netdata.conf`. See our doc on [changing metrics retention](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) for
details.
### Related information & further reading
- Streaming
- - [How Netdata streams metrics](/docs/metrics-storage-management/how-streaming-works.mdx)
- - **[Enable streaming between nodes](/docs/metrics-storage-management/enable-streaming.mdx)**
- - [Streaming reference](/docs/metrics-storage-management/reference-streaming.mdx)
+ - [How Netdata streams metrics](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/how-streaming-works.mdx)
+ - **[Enable streaming between nodes](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.mdx)**
+ - [Streaming reference](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/reference-streaming.mdx)
diff --git a/docs/metrics-storage-management/how-streaming-works.mdx b/docs/metrics-storage-management/how-streaming-works.mdx
index ecbce39bc..f181d3769 100644
--- a/docs/metrics-storage-management/how-streaming-works.mdx
+++ b/docs/metrics-storage-management/how-streaming-works.mdx
@@ -1,8 +1,15 @@
---
title: "How metrics streaming works"
-description: "Netdata's real-time streaming allows you to replicate metrics data across multiple nodes, or centralize all your metrics data into a single time-series database (TSDB)."
-type: explanation
-custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/metrics-storage-management/how-streaming-works.mdx
+description: >-
+ "Netdata's real-time streaming allows you to replicate metrics data
+ across multiple nodes, or centralize all your metrics data into a single
+ time-series database (TSDB)."
+type: "explanation"
+custom_edit_url: "https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/how-streaming-works.mdx"
+sidebar_label: "How metrics streaming works"
+learn_status: "Published"
+learn_topic_type: "Concepts"
+learn_rel_path: "Concepts"
---
# How metrics streaming works
@@ -12,13 +19,13 @@ replicate metrics data across multiple nodes, or centralize all your metrics dat
(TSDB).
When one node streams metrics to another, the node receiving metrics can visualize them on the
-[dashboard](/docs/visualize/interact-dashboards-charts.md), run health checks to [trigger
-alarms](/docs/monitor/view-active-alarms.md) and [send notifications](/docs/monitor/enable-notifications.md), and
-[export](/docs/export/external-databases.md) all metrics to an external TSDB. When Netdata streams metrics to another
+[dashboard](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md), run health checks to [trigger
+alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/view-active-alarms.md) and [send notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md), and
+[export](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md) all metrics to an external TSDB. When Netdata streams metrics to another
Netdata, the receiving one is able to perform everything a Netdata instance is capable of.
Streaming lets you decide exactly how you want to store and maintain metrics data. While we believe Netdata's
-[distributed architecture](/docs/store/distributed-data-architecture.md) is ideal for speed and scale, streaming
+[distributed architecture](https://github.com/netdata/netdata/blob/master/docs/store/distributed-data-architecture.md) is ideal for speed and scale, streaming
provides centralization options for those who want to maintain only a single TSDB instance.
## Streaming basics
@@ -68,7 +75,7 @@ Here are a few example streaming configurations:
Parent nodes feature a **Replicated Nodes** section in the left-hand panel, which opens with the hamburger icon
![Hamburger icon](https://raw.githubusercontent.com/netdata/netdata-ui/master/src/components/icon/assets/hamburger.svg)
in the top navigation. The parent node, plus any child nodes, appear here. Click on any of the hostnames to switch
-between parent and child dashboards, all served by the parent's [web server](/web/server/README.md).
+between parent and child dashboards, all served by the parent's [web server](https://github.com/netdata/netdata/blob/master/web/server/README.md).
![Switching between
](https://user-images.githubusercontent.com/1153921/110043346-761ec000-7d04-11eb-8e58-77670ba39161.gif)
@@ -79,14 +86,14 @@ Each child dashboard is also available directly at the following URL pattern:
## What's next?
Now that you understand the fundamentals of streaming metrics between nodes, go ahead and [enable
-streaming](/docs/metrics-storage-management/enable-streaming.mdx) using a simple `parent-child` relationship. For all
-the details, see the [streaming reference](/docs/metrics-storage-management/reference-streaming.mdx) doc.
+streaming](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.mdx) using a simple `parent-child` relationship. For all
+the details, see the [streaming reference](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/reference-streaming.mdx) doc.
-Take your streaming setup even further by [exporting metrics](/docs/export/external-databases.md) to an external TSDB.
+Take your streaming setup even further by [exporting metrics](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md) to an external TSDB.
### Related information & further reading
- Streaming
- - **[How Netdata streams metrics](/docs/metrics-storage-management/how-streaming-works.mdx)**
- - [Enable streaming between nodes](/docs/metrics-storage-management/enable-streaming.mdx)
- - [Streaming reference](/docs/metrics-storage-management/reference-streaming.mdx)
\ No newline at end of file
+ - **[How Netdata streams metrics](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/how-streaming-works.mdx)**
+ - [Enable streaming between nodes](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.mdx)
+ - [Streaming reference](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/reference-streaming.mdx)
\ No newline at end of file
diff --git a/docs/metrics-storage-management/reference-streaming.mdx b/docs/metrics-storage-management/reference-streaming.mdx
index c77ceb37c..58c898639 100644
--- a/docs/metrics-storage-management/reference-streaming.mdx
+++ b/docs/metrics-storage-management/reference-streaming.mdx
@@ -1,24 +1,28 @@
---
title: "Streaming reference"
description: "Each node running Netdata can stream the metrics it collects, in real time, to another node. See all of the available settings in this reference document."
-type: reference
-custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/metrics-storage-management/reference-streaming.mdx
+type: "reference"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/docs/metrics-storage-management/reference-streaming.mdx"
+sidebar_label: "Streaming reference"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Configuration"
---
# Streaming reference
Each node running Netdata can stream the metrics it collects, in real time, to another node. To learn more, read about
-[how streaming works](/docs/metrics-storage-management/how-streaming-works.mdx).
+[how streaming works](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/how-streaming-works.mdx).
For a quickstart guide for enabling a simple `parent-child` streaming relationship, see our [stream metrics between
-nodes](/docs/metrics-storage-management/enable-streaming.mdx) doc. All other configuration options and scenarios are
+nodes](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.mdx) doc. All other configuration options and scenarios are
covered in the sections below.
## Configuration
There are two files responsible for configuring Netdata's streaming capabilities: `stream.conf` and `netdata.conf`.
-From within your Netdata config directory (typically `/etc/netdata`), [use `edit-config`](/docs/configure/nodes.md) to
+From within your Netdata config directory (typically `/etc/netdata`), [use `edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) to
open either `stream.conf` or `netdata.conf`.
```
@@ -53,7 +57,7 @@ node**. This file is automatically generated by Netdata the first time it is sta
| `api key` | ` ` | The `API_KEY` to use as the child node. |
| `timeout seconds` | `60` | The timeout to connect and send metrics to a parent. |
| `default port` | `19999` | The port to use if `destination` does not specify one. |
-| [`send charts matching`](#send-charts-matching) | `*` | A space-separated list of [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to filter which charts are streamed. [Read more →](#send-charts-matching) |
+| [`send charts matching`](#send-charts-matching) | `*` | A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to filter which charts are streamed. [Read more →](#send-charts-matching) |
| `buffer size bytes` | `10485760` | The size of the buffer to use when sending metrics. The default `10485760` equals a buffer of 10MB, which is good for 60 seconds of data. Increase this if you expect latencies higher than that. The buffer is flushed on reconnect. |
| `reconnect delay seconds` | `5` | How long to wait until retrying to connect to the parent node. |
| `initial clock resync iterations` | `60` | Sync the clock of charts for how many seconds when starting. |
@@ -63,9 +67,9 @@ node**. This file is automatically generated by Netdata the first time it is sta
| Setting | Default | Description |
| :---------------------------------------------- | :------------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `enabled` | `no` | Whether this API KEY enabled or disabled. |
-| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) |
+| [`allow from`](#allow-from) | `*` | A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) matching the IPs of nodes that will stream metrics using this API key. [Read more →](#allow-from) |
| `default history` | `3600` | The default amount of child metrics history to retain when using the `save`, `map`, or `ram` memory modes. |
-| [`default memory mode`](#default-memory-mode) | `ram` | The [database](/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `map`, `save`, `ram`, or `none`. [Read more →](#default-memory-mode) |
+| [`default memory mode`](#default-memory-mode) | `ram` | The [database](https://github.com/netdata/netdata/blob/master/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `map`, `save`, `ram`, or `none`. [Read more →](#default-memory-mode) |
| `health enabled by default` | `auto` | Whether alarms and notifications should be enabled for nodes using this `API_KEY`. `auto` enables alarms when the child is connected. `yes` enables alarms always, and `no` disables alarms. |
| `default postpone alarms on connect seconds` | `60` | Postpone alarms and notifications for a period of time after the child connects. |
| `default proxy enabled` | ` ` | Route metrics through a proxy. |
@@ -94,7 +98,7 @@ To enable TCP streaming to a parent node at `203.0.113.0` on port `20000` and wi
#### `send charts matching`
-A space-separated list of [Netdata simple patterns](/libnetdata/simple_pattern/README.md) to filter which charts are streamed.
+A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to filter which charts are streamed.
The default is a single wildcard `*`, which streams all charts.
@@ -115,7 +119,7 @@ To send all but a few charts, use `!` to create a negative match. To send _all_
#### `allow from`
-A space-separated list of [Netdata simple patterns](/libnetdata/simple_pattern/README.md) matching the IPs of nodes that
+A space-separated list of [Netdata simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) matching the IPs of nodes that
will stream metrics using this API key. The order is important, left to right, as the first positive or negative match is used.
The default is `*`, which accepts all requests including the `API_KEY`.
@@ -139,7 +143,7 @@ To allow all IPs starting with `10.*`, except `10.1.2.3`:
#### `default memory mode`
-The [database](/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`,
+The [database](https://github.com/netdata/netdata/blob/master/database/README.md) to use for all nodes using this `API_KEY`. Valid settings are `dbengine`, `ram`,
`save`, `map`, or `none`.
- `dbengine`: The default, recommended time-series database (TSDB) for Netdata. Stores recent metrics in memory, then
@@ -152,7 +156,7 @@ The [database](/database/README.md) to use for all nodes using this `API_KEY`. V
- `none`: No database.
When using `default memory mode = dbengine`, the parent node creates a separate instance of the TSDB to store metrics
-from child nodes. The [size of _each_ instance is configurable](/docs/store/change-metrics-storage.md) with the `page
+from child nodes. The [size of _each_ instance is configurable](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) with the `page
cache size` and `dbengine multihost disk space` settings in the `[global]` section in `netdata.conf`.
### `netdata.conf`
@@ -160,9 +164,9 @@ cache size` and `dbengine multihost disk space` settings in the `[global]` secti
| Setting | Default | Description |
| :----------------------------------------- | :---------------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **`[global]` section** | | |
-| `memory mode` | `dbengine` | Determines the [database type](/database/README.md) to be used on that node. Other options settings include `none`, `ram`, `save`, and `map`. `none` disables the database at this host. This also disables alarms and notifications, as those can't run without a database. |
+| `memory mode` | `dbengine` | Determines the [database type](https://github.com/netdata/netdata/blob/master/database/README.md) to be used on that node. Other options settings include `none`, `ram`, `save`, and `map`. `none` disables the database at this host. This also disables alarms and notifications, as those can't run without a database. |
| **`[web]` section** | | |
-| `mode` | `static-threaded` | Determines the [web server](/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. |
+| `mode` | `static-threaded` | Determines the [web server](https://github.com/netdata/netdata/blob/master/web/server/README.md) type. The other option is `none`, which disables the dashboard, API, and registry. |
| `accept a streaming request every seconds` | `0` | Set a limit on how often a parent node accepts streaming requests from child nodes. `0` equals no limit. If this is set, you may see `... too busy to accept new streaming request. Will be allowed in X secs` in Netdata's `error.log`. |
## Examples
@@ -191,7 +195,7 @@ default `dbengine` as specified by the `API_KEY`, and alarms are disabled.
### Securing streaming with TLS/SSL
Netdata does not activate TLS encryption by default. To encrypt streaming connections, you first need to [enable TLS
-support](/web/server/README.md#enabling-tls-support) on the parent. With encryption enabled on the receiving side, you
+support](https://github.com/netdata/netdata/blob/master/web/server/README.md#enabling-tls-support) on the parent. With encryption enabled on the receiving side, you
need to instruct the child to use TLS/SSL as well. On the child's `stream.conf`, configure the destination as follows:
```
@@ -450,7 +454,7 @@ ERROR : STREAM_SENDER[CHILD HOSTNAME] : STREAM child HOSTNAME [send to PARENT HO
Chart data needs to be consistent between child and parent nodes. If there are differences between chart data on
a parent and a child, such as gaps in metrics collection, it most often means your child's `memory mode`
does not match the parent's. To learn more about the different ways Netdata can store metrics, and thus keep chart
-data consistent, read our [memory mode documentation](/database/README.md).
+data consistent, read our [memory mode documentation](https://github.com/netdata/netdata/blob/master/database/README.md).
### Forbidding access
diff --git a/docs/monitor/configure-alarms.md b/docs/monitor/configure-alarms.md
index ac4581152..4b5b8134e 100644
--- a/docs/monitor/configure-alarms.md
+++ b/docs/monitor/configure-alarms.md
@@ -1,7 +1,11 @@
# Configure health alarms
@@ -10,19 +14,19 @@ Netdata's health watchdog is highly configurable, with support for dynamic thres
more. You can tweak any of the existing alarms based on your infrastructure's topology or specific monitoring needs, or
create new entities.
-You can use health alarms in conjunction with any of Netdata's [collectors](/docs/collect/how-collectors-work.md) (see
-the [supported collector list](/collectors/COLLECTORS.md)) to monitor the health of your systems, containers, and
+You can use health alarms in conjunction with any of Netdata's [collectors](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md) (see
+the [supported collector list](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md)) to monitor the health of your systems, containers, and
applications in real time.
While you can see active alarms both on the local dashboard and Netdata Cloud, all health alarms are configured _per
node_ via individual Netdata Agents. If you want to deploy a new alarm across your
-[infrastructure](/docs/quickstart/infrastructure.md), you must configure each node with the same health configuration
+[infrastructure](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md), you must configure each node with the same health configuration
files.
## Edit health configuration files
-All of Netdata's [health configuration files](/health/REFERENCE.md#health-configuration-files) are in Netdata's config
-directory, inside the `health.d/` directory. Navigate to your [Netdata config directory](/docs/configure/nodes.md) and
+All of Netdata's [health configuration files](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#health-configuration-files) are in Netdata's config
+directory, inside the `health.d/` directory. Navigate to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) and
use `edit-config` to make changes to any of these files.
For example, to edit the `cpu.conf` health configuration file, run:
@@ -73,10 +77,10 @@ one line in a given health entity. To silence any single alarm, change the `to:`
While tuning existing alarms may work in some cases, you may need to write entirely new health entities based on how
your systems, containers, and applications work.
-Read Netdata's [health reference](/health/REFERENCE.md#health-entity-reference) for a full listing of the format,
+Read Netdata's [health reference](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#health-entity-reference) for a full listing of the format,
syntax, and functionality of health entities.
-To write a new health entity into a new file, navigate to your [Netdata config directory](/docs/configure/nodes.md),
+To write a new health entity into a new file, navigate to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md),
then use `touch` to create a new file in the `health.d/` directory. Use `edit-config` to start editing the file.
As an example, let's create a `ram-usage.conf` file.
@@ -117,7 +121,7 @@ Let's look into each of the lines to see how they create a working health entity
- `every`: How often to perform the `lookup` calculation to decide whether or not to trigger this alarm.
- `warn`/`crit`: The value at which Netdata should trigger a warning or critical alarm. This example uses simple
syntax, but most pre-configured health entities use
- [hysteresis](/health/REFERENCE.md#special-use-of-the-conditional-operator) to avoid superfluous notifications.
+ [hysteresis](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#special-use-of-the-conditional-operator) to avoid superfluous notifications.
- `info`: A description of the alarm, which will appear in the dashboard and notifications.
In human-readable format:
@@ -140,9 +144,9 @@ without restarting all of Netdata, run `netdatacli reload-health` or `killall -U
## What's next?
With your health entities configured properly, it's time to [enable
-notifications](/docs/monitor/enable-notifications.md) to get notified whenever a node reaches a warning or critical
+notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to get notified whenever a node reaches a warning or critical
state.
-To build complex, dynamic alarms, read our guide on [dimension templates](/docs/guides/monitor/dimension-templates.md).
+To build complex, dynamic alarms, read our guide on [dimension templates](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/dimension-templates.md).
diff --git a/docs/monitor/enable-notifications.md b/docs/monitor/enable-notifications.md
index 438eef391..99c24b64e 100644
--- a/docs/monitor/enable-notifications.md
+++ b/docs/monitor/enable-notifications.md
@@ -1,7 +1,11 @@
# Enable alarm notifications
@@ -10,7 +14,7 @@ Netdata offers two ways to receive alarm notifications on external platforms. Th
parallel, which means you can enable both at the same time to send alarm notifications to any number of endpoints.
Both methods use a node's health alarms to generate the content of alarm notifications. Read the doc on [configuring
-alarms](/docs/monitor/configure-alarms.md) to change the preconfigured thresholds or to create tailored alarms for your
+alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) to change the preconfigured thresholds or to create tailored alarms for your
infrastructure.
Netdata Cloud offers [centralized alarm notifications](#netdata-cloud) via email, which leverages the health status
@@ -26,7 +30,7 @@ response process.
## Netdata Cloud
Netdata Cloud's [centralized alarm
-notifications](https://learn.netdata.cloud/docs/cloud/alerts-notifications/notifications) is a zero-configuration way to
+notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx) is a zero-configuration way to
get notified when an anomaly or incident strikes any node or application in your infrastructure. The advantage of using
centralized alarm notifications from Netdata Cloud is that you don't have to worry about configuring each node in your
infrastructure.
@@ -41,13 +45,13 @@ choose what types of notifications to receive from each War Room.
![Enabling and configuring alarm notifications in Netdata
Cloud](https://user-images.githubusercontent.com/1153921/101936280-93c50900-3b9d-11eb-9ba0-d6927fa872b7.gif)
-See the [centralized alarm notifications](https://learn.netdata.cloud/docs/cloud/alerts-notifications/notifications)
+See the [centralized alarm notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
reference doc for further details about what information is conveyed in an email notification, flood protection, and
more.
## Netdata Agent
-The Netdata Agent's [notification system](/health/notifications/README.md) runs on every node and dispatches
+The Netdata Agent's [notification system](https://github.com/netdata/netdata/blob/master/health/notifications/README.md) runs on every node and dispatches
notifications based on configured endpoints and roles. You can enable multiple endpoints on any one node _and_ use Agent
notifications in parallel with centralized alarm notifications in Netdata Cloud.
@@ -59,33 +63,33 @@ notification platform.
### Supported notification endpoints
-- [**alerta.io**](/health/notifications/alerta/README.md)
-- [**Amazon SNS**](/health/notifications/awssns/README.md)
-- [**Custom endpoint**](/health/notifications/custom/README.md)
-- [**Discord**](/health/notifications/discord/README.md)
-- [**Dynatrace**](/health/notifications/dynatrace/README.md)
-- [**Email**](/health/notifications/email/README.md)
-- [**Flock**](/health/notifications/flock/README.md)
-- [**Google Hangouts**](/health/notifications/hangouts/README.md)
-- [**Gotify**](/health/notifications/gotify/README.md)
-- [**IRC**](/health/notifications/irc/README.md)
-- [**Kavenegar**](/health/notifications/kavenegar/README.md)
-- [**Matrix**](/health/notifications/matrix/README.md)
-- [**Messagebird**](/health/notifications/messagebird/README.md)
-- [**Microsoft Teams**](/health/notifications/msteams/README.md)
-- [**Netdata Agent dashboard**](/health/notifications/web/README.md)
-- [**Opsgenie**](/health/notifications/opsgenie/README.md)
-- [**PagerDuty**](/health/notifications/pagerduty/README.md)
-- [**Prowl**](/health/notifications/prowl/README.md)
-- [**PushBullet**](/health/notifications/pushbullet/README.md)
-- [**PushOver**](/health/notifications/pushover/README.md)
-- [**Rocket.Chat**](/health/notifications/rocketchat/README.md)
-- [**Slack**](/health/notifications/slack/README.md)
-- [**SMS Server Tools 3**](/health/notifications/smstools3/README.md)
-- [**StackPulse**](/health/notifications/stackpulse/README.md)
-- [**Syslog**](/health/notifications/syslog/README.md)
-- [**Telegram**](/health/notifications/telegram/README.md)
-- [**Twilio**](/health/notifications/twilio/README.md)
+- [**alerta.io**](https://github.com/netdata/netdata/blob/master/health/notifications/alerta/README.md)
+- [**Amazon SNS**](https://github.com/netdata/netdata/blob/master/health/notifications/awssns/README.md)
+- [**Custom endpoint**](https://github.com/netdata/netdata/blob/master/health/notifications/custom/README.md)
+- [**Discord**](https://github.com/netdata/netdata/blob/master/health/notifications/discord/README.md)
+- [**Dynatrace**](https://github.com/netdata/netdata/blob/master/health/notifications/dynatrace/README.md)
+- [**Email**](https://github.com/netdata/netdata/blob/master/health/notifications/email/README.md)
+- [**Flock**](https://github.com/netdata/netdata/blob/master/health/notifications/flock/README.md)
+- [**Google Hangouts**](https://github.com/netdata/netdata/blob/master/health/notifications/hangouts/README.md)
+- [**Gotify**](https://github.com/netdata/netdata/blob/master/health/notifications/gotify/README.md)
+- [**IRC**](https://github.com/netdata/netdata/blob/master/health/notifications/irc/README.md)
+- [**Kavenegar**](https://github.com/netdata/netdata/blob/master/health/notifications/kavenegar/README.md)
+- [**Matrix**](https://github.com/netdata/netdata/blob/master/health/notifications/matrix/README.md)
+- [**Messagebird**](https://github.com/netdata/netdata/blob/master/health/notifications/messagebird/README.md)
+- [**Microsoft Teams**](https://github.com/netdata/netdata/blob/master/health/notifications/msteams/README.md)
+- [**Netdata Agent dashboard**](https://github.com/netdata/netdata/blob/master/health/notifications/web/README.md)
+- [**Opsgenie**](https://github.com/netdata/netdata/blob/master/health/notifications/opsgenie/README.md)
+- [**PagerDuty**](https://github.com/netdata/netdata/blob/master/health/notifications/pagerduty/README.md)
+- [**Prowl**](https://github.com/netdata/netdata/blob/master/health/notifications/prowl/README.md)
+- [**PushBullet**](https://github.com/netdata/netdata/blob/master/health/notifications/pushbullet/README.md)
+- [**PushOver**](https://github.com/netdata/netdata/blob/master/health/notifications/pushover/README.md)
+- [**Rocket.Chat**](https://github.com/netdata/netdata/blob/master/health/notifications/rocketchat/README.md)
+- [**Slack**](https://github.com/netdata/netdata/blob/master/health/notifications/slack/README.md)
+- [**SMS Server Tools 3**](https://github.com/netdata/netdata/blob/master/health/notifications/smstools3/README.md)
+- [**StackPulse**](https://github.com/netdata/netdata/blob/master/health/notifications/stackpulse/README.md)
+- [**Syslog**](https://github.com/netdata/netdata/blob/master/health/notifications/syslog/README.md)
+- [**Telegram**](https://github.com/netdata/netdata/blob/master/health/notifications/telegram/README.md)
+- [**Twilio**](https://github.com/netdata/netdata/blob/master/health/notifications/twilio/README.md)
### Enable Slack notifications
@@ -95,7 +99,7 @@ want to see alarm notifications from Netdata. Click the green **Add to Slack** b
On the following page, you'll receive a **Webhook URL**. That's what you'll need to configure Netdata, so keep it handy.
-Navigate to your [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) and use `edit-config` to
+Navigate to your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) and use `edit-config` to
open the `health_alarm_notify.conf` file:
```bash
@@ -130,7 +134,7 @@ Next, run the `alarm-notify` script using the `test` option.
You should receive three notifications in your Slack channel for each health status change: `WARNING`, `CRITICAL`, and
`CLEAR`.
-See the [Agent Slack notifications](/health/notifications/slack/README.md) doc for more options and information.
+See the [Agent Slack notifications](https://github.com/netdata/netdata/blob/master/health/notifications/slack/README.md) doc for more options and information.
## What's next?
@@ -138,10 +142,10 @@ Now that you have health entities configured to your infrastructure's needs and
or incidents, your health monitoring setup is complete.
To make your dashboards most useful during root cause analysis, use Netdata's [distributed data
-architecture](/docs/store/distributed-data-architecture.md) for the best-in-class performance and scalability.
+architecture](https://github.com/netdata/netdata/blob/master/docs/store/distributed-data-architecture.md) for the best-in-class performance and scalability.
### Related reference documentation
-- [Netdata Cloud · Alarm notifications](https://learn.netdata.cloud/docs/cloud/alerts-notifications/notifications)
-- [Netdata Agent · Notifications](/health/notifications/README.md)
+- [Netdata Cloud · Alarm notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.mdx)
+- [Netdata Agent · Notifications](https://github.com/netdata/netdata/blob/master/health/notifications/README.md)
diff --git a/docs/monitor/view-active-alarms.md b/docs/monitor/view-active-alarms.md
index be2182683..07c22fe12 100644
--- a/docs/monitor/view-active-alarms.md
+++ b/docs/monitor/view-active-alarms.md
@@ -1,7 +1,11 @@
# View active health alarms
@@ -14,7 +18,7 @@ performance issue affects your node or the applications it runs.
A War Room's [alarms indicator](https://learn.netdata.cloud/docs/cloud/war-rooms#indicators) displays the number of
active `critical` (red) and `warning` (yellow) alerts for the nodes in this War Room. Click on either the critical or
warning badges to open a pre-filtered modal displaying only those types of [active
-alarms](https://learn.netdata.cloud/docs/cloud/alerts-notifications/view-active-alerts).
+alarms](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/view-active-alerts.mdx).
![The Alarms panel in Netdata
Cloud](https://user-images.githubusercontent.com/1153921/108564747-d2bfbb00-72c0-11eb-97b9-5863ad3324eb.png)
@@ -61,15 +65,15 @@ With the three icons beneath that and the **role** designation, you can:
3. Copy the code to embed the badge onto another web page using an `
` element.
The table on the right-hand side displays information about the health entity that triggered the alarm, which you can
-use as a reference to [configure alarms](/docs/monitor/configure-alarms.md).
+use as a reference to [configure alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md).
## What's next?
With the information that appears on Netdata Cloud and the local dashboard about active alarms, you can [configure
-alarms](/docs/monitor/configure-alarms.md) to match your infrastructure's needs or your team's goals.
+alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) to match your infrastructure's needs or your team's goals.
If you're happy with the pre-configured alarms, skip ahead to [enable
-notifications](/docs/monitor/enable-notifications.md) to use Netdata Cloud's centralized alarm notifications and/or
+notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to use Netdata Cloud's centralized alarm notifications and/or
per-node notifications to endpoints like Slack, PagerDuty, Twilio, and more.
diff --git a/docs/netdata-for-IoT.md b/docs/netdata-for-IoT.md
index 8d5bb21ba..87b307b97 100644
--- a/docs/netdata-for-IoT.md
+++ b/docs/netdata-for-IoT.md
@@ -10,22 +10,23 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/netdata-for
> New to Netdata? Check its demo: ** **
>
>[![User
->Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
->[![Monitored
->Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
->[![Sessions
->Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
+> Base](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&label=user%20base&units=null&value_color=blue&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
+> [![Monitored
+> Servers](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&label=servers%20monitored&units=null&value_color=orange&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
+> [![Sessions
+> Served](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&label=sessions%20served&units=null&value_color=yellowgreen&precision=0&v41)](https://registry.my-netdata.io/#netdata_registry)
>
>[![New Users
->Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
->[![New Machines
->Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
->[![Sessions
->Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
+> Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=persons&after=-86400&options=unaligned&group=incremental-sum&label=new%20users%20today&units=null&value_color=blue&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
+> [![New Machines
+> Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_entries&dimensions=machines&group=incremental-sum&after=-86400&options=unaligned&label=servers%20added%20today&units=null&value_color=orange&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
+> [![Sessions
+> Today](https://registry.my-netdata.io/api/v1/badge.svg?chart=netdata.registry_sessions&after=-86400&group=incremental-sum&options=unaligned&label=sessions%20served%20today&units=null&value_color=yellowgreen&precision=0&v40)](https://registry.my-netdata.io/#netdata_registry)
---
-Netdata is a [very efficient](/docs/guides/configure/performance.md) server performance monitoring solution. When running in server hardware, it can collect
+Netdata is a [very efficient](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md)
+server performance monitoring solution. When running in server hardware, it can collect
thousands of system and application metrics **per second** with just 1% CPU utilization of a single core. Its web server
responds to most data requests in about **half a millisecond** making its web dashboards spontaneous, amazingly fast!
@@ -43,8 +44,8 @@ provider so it can directly be used by google sheets, google charts, google widg
![sensors](https://cloud.githubusercontent.com/assets/2662304/15339745/8be84540-1c8e-11e6-9e9a-106dea7539b6.gif)
Although Netdata has been significantly optimized to lower the CPU and RAM resources it consumes, the plethora of data
-collection plugins may be inappropriate for weak IoT devices. Please follow the [Netdata Agent performance
-guide](/docs/guides/configure/performance.md)
+collection plugins may be inappropriate for weak IoT devices. Please follow
+the [Netdata Agent performance guide](https://github.com/netdata/netdata/blob/master/docs/guides/configure/performance.md)
## Monitoring RPi temperature
diff --git a/docs/netdata-security.md b/docs/netdata-security.md
index 9bb26ad23..511bc7721 100644
--- a/docs/netdata-security.md
+++ b/docs/netdata-security.md
@@ -200,12 +200,12 @@ Of course, there are many more methods you could use to protect Netdata:
### Registry or how to not send any information to a third party server
-The default configuration uses a public registry under registry.my-netdata.io (more information about the registry here: [mynetdata-menu-item](/registry/README.md) ). Please be aware that if you use that public registry, you submit the following information to a third party server:
+The default configuration uses a public registry under registry.my-netdata.io (more information about the registry here: [mynetdata-menu-item](https://github.com/netdata/netdata/blob/master/registry/README.md) ). Please be aware that if you use that public registry, you submit the following information to a third party server:
- The url where you open the web-ui in the browser (via http request referrer)
- The hostnames of the Netdata servers
-If sending this information to the central Netdata registry violates your security policies, you can configure Netdata to [run your own registry](/registry/README.md#run-your-own-registry).
+If sending this information to the central Netdata registry violates your security policies, you can configure Netdata to [run your own registry](https://github.com/netdata/netdata/blob/master/registry/README.md#run-your-own-registry).
### Opt-out of anonymous statistics
diff --git a/docs/overview/netdata-monitoring-stack.md b/docs/overview/netdata-monitoring-stack.md
index ae9252272..36f5b5f06 100644
--- a/docs/overview/netdata-monitoring-stack.md
+++ b/docs/overview/netdata-monitoring-stack.md
@@ -22,7 +22,7 @@ Here are a few ways to enrich your existing monitoring and troubleshooting stack
## Collect metrics from Prometheus endpoints
Netdata automatically detects 600 popular endpoints and collects per-second metrics from them via the [generic
-Prometheus collector](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/prometheus). This even
+Prometheus collector](https://github.com/netdata/go.d.plugin/blob/master/modules/prometheus/README.md). This even
includes support for Windows 10 via [`windows_exporter`](https://github.com/prometheus-community/windows_exporter).
This collector is installed and enabled on all Agent installations by default, so you don't need to waste time
@@ -35,8 +35,8 @@ troubleshoot anomalies.
Netdata can send its per-second metrics to external time-series databases, such as InfluxDB, Prometheus, Graphite,
TimescaleDB, ElasticSearch, AWS Kinesis Data Streams, Google Cloud Pub/Sub Service, and many others.
-To [export metrics to external time-series databases](/docs/export/external-databases.md), you configure an [exporting
-_connector_](/docs/export/enable-connector.md). These connectors support filtering and resampling for granular control
+To [export metrics to external time-series databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md), you configure an [exporting
+_connector_](https://github.com/netdata/netdata/blob/master/docs/export/enable-connector.md). These connectors support filtering and resampling for granular control
over which metrics you export, and at what volume. You can export resampled metrics as collected, as averages, or the
sum of interpolated values based on your needs and other monitoring tools.
@@ -57,6 +57,6 @@ charts, or use Netdata's health watchdog to send notifications whenever an anoma
## What's next?
Whether you're using Netdata standalone or as part of a larger monitoring stack, the next step is the same: [**Get
-Netdata**](/docs/get-started.mdx).
+Netdata**](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx).
diff --git a/docs/overview/what-is-netdata.md b/docs/overview/what-is-netdata.md
index 3df1d949b..f8e67159b 100644
--- a/docs/overview/what-is-netdata.md
+++ b/docs/overview/what-is-netdata.md
@@ -18,7 +18,8 @@ Netdata's distributed monitoring Agent collects thousands of metrics from system
configuration. It runs permanently on all your physical/virtual servers, containers, cloud deployments, and edge/IoT
devices.
-You can [install](/docs/get-started.mdx) Netdata on most Linux distributions (Ubuntu, Debian, CentOS, and more),
+You can [install](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) Netdata on most Linux
+distributions (Ubuntu, Debian, CentOS, and more),
container/microservice platforms (Kubernetes clusters, Docker), and many other operating systems (FreeBSD, macOS), with
no `sudo` required.
@@ -46,29 +47,30 @@ you're viewing the Netdata Cloud interface.
Netdata is designed to be both simple to use and flexible for every monitoring, visualization, and troubleshooting use
case:
-- **Collect**: Netdata collects all available metrics from your system and applications with 300+ collectors,
- Kubernetes service discovery, and in-depth container monitoring, all while using only 1% CPU and a few MB of RAM. It
- even collects metrics from Windows machines.
-- **Visualize**: The dashboard meaningfully presents charts to help you understand the relationships between your
- hardware, operating system, running apps/services, and the rest of your infrastructure. Add nodes to Netdata Cloud
- for a complete view of your infrastructure from a single pane of glass.
-- **Monitor**: Netdata's health watchdog uses hundreds of preconfigured alarms to notify you via Slack, email,
- PagerDuty and more when an anomaly strikes. Customize with dynamic thresholds, hysteresis, alarm templates, and
- role-based notifications.
-- **Troubleshoot**: 1s granularity helps you detect and analyze anomalies other monitoring platforms might have
- missed. Interactive visualizations reduce your reliance on the console, and historical metrics help you trace issues
- back to their root cause.
-- **Store**: Netdata's efficient database engine efficiently stores per-second metrics for days, weeks, or even
- months. Every distributed node stores metrics locally, simplifying deployment, slashing costs, and enriching
- Netdata's interactive dashboards.
-- **Export**: Integrate per-second metrics with other time-series databases like Graphite, Prometheus, InfluxDB,
- TimescaleDB, and more with Netdata's interoperable and extensible core.
-- **Stream**: Aggregate metrics from any number of distributed nodes in one place for in-depth analysis, including
- ephemeral nodes in a Kubernetes cluster.
+- **Collect**: Netdata collects all available metrics from your system and applications with 300+ collectors,
+ Kubernetes service discovery, and in-depth container monitoring, all while using only 1% CPU and a few MB of RAM. It
+ even collects metrics from Windows machines.
+- **Visualize**: The dashboard meaningfully presents charts to help you understand the relationships between your
+ hardware, operating system, running apps/services, and the rest of your infrastructure. Add nodes to Netdata Cloud
+ for a complete view of your infrastructure from a single pane of glass.
+- **Monitor**: Netdata's health watchdog uses hundreds of preconfigured alarms to notify you via Slack, email,
+ PagerDuty and more when an anomaly strikes. Customize with dynamic thresholds, hysteresis, alarm templates, and
+ role-based notifications.
+- **Troubleshoot**: 1s granularity helps you detect and analyze anomalies other monitoring platforms might have
+ missed. Interactive visualizations reduce your reliance on the console, and historical metrics help you trace issues
+ back to their root cause.
+- **Store**: Netdata's efficient database engine efficiently stores per-second metrics for days, weeks, or even
+ months. Every distributed node stores metrics locally, simplifying deployment, slashing costs, and enriching
+ Netdata's interactive dashboards.
+- **Export**: Integrate per-second metrics with other time-series databases like Graphite, Prometheus, InfluxDB,
+ TimescaleDB, and more with Netdata's interoperable and extensible core.
+- **Stream**: Aggregate metrics from any number of distributed nodes in one place for in-depth analysis, including
+ ephemeral nodes in a Kubernetes cluster.
## What's next?
-Learn more about [why you should use Netdata](/docs/overview/why-netdata.md), or [how Netdata works with your existing
-monitoring stack](/docs/overview/netdata-monitoring-stack.md).
+Learn more
+about [why you should use Netdata](https://github.com/netdata/netdata/blob/master/docs/overview/why-netdata.md),
+or [how Netdata works with your existing monitoring stack](https://github.com/netdata/netdata/blob/master/docs/overview/netdata-monitoring-stack.md).
diff --git a/docs/overview/why-netdata.md b/docs/overview/why-netdata.md
index 9a308f25c..158bc50df 100644
--- a/docs/overview/why-netdata.md
+++ b/docs/overview/why-netdata.md
@@ -58,6 +58,6 @@ open-source tools.
Whether you already have a monitoring stack you want to integrate Netdata into, or are building something from the
ground-up, you should read more on how Netdata can work either [standalone or as an interoperable part of a monitoring
-stack](/docs/overview/netdata-monitoring-stack.md).
+stack](https://github.com/netdata/netdata/blob/master/docs/overview/netdata-monitoring-stack.md).
diff --git a/docs/quickstart/infrastructure.md b/docs/quickstart/infrastructure.md
index 9db66c052..23986b002 100644
--- a/docs/quickstart/infrastructure.md
+++ b/docs/quickstart/infrastructure.md
@@ -12,7 +12,7 @@ nodes running the Netdata Agent. A node is any system in your infrastructure tha
physical or virtual machine (VM), container, cloud deployment, or edge/IoT device.
The Netdata Agent uses zero-configuration collectors to gather metrics from every application and container instantly,
-and uses Netdata's [distributed data architecture](/docs/store/distributed-data-architecture.md) to store metrics
+and uses Netdata's [distributed data architecture](https://github.com/netdata/netdata/blob/master/docs/store/distributed-data-architecture.md) to store metrics
locally. Without a slow and troublesome centralized data lake for your infrastructure's metrics, you reduce the
resources you need to invest in, and the complexity of, monitoring your infrastructure.
@@ -27,12 +27,12 @@ your nodes to maximize the value you get from Netdata.
This quickstart assumes you've installed the Netdata Agent on more than one node in your infrastructure, and connected
those nodes to your Space in Netdata Cloud. If you haven't yet, see the [Netdata
-Cloud](https://learn.netdata.cloud/docs/cloud) docs for details on signing up for Netdata Cloud, installation, and
+Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/cloud.mdx) docs for details on signing up for Netdata Cloud, installation, and
connection process.
> If you want to monitor a Kubernetes cluster with Netdata, see our [k8s installation
-> doc](/packaging/installer/methods/kubernetes.md) for setup details, and then read our guide, [_Monitor a Kubernetes
-> cluster with Netdata_](/docs/guides/monitor/kubernetes-k8s-netdata.md).
+> doc](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md) for setup details, and then read our guide, [_Monitor a Kubernetes
+> cluster with Netdata_](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/kubernetes-k8s-netdata.md).
## Set up your Netdata Cloud experience
@@ -49,11 +49,11 @@ SRE team for the user-facing SaaS application, and a second IT team for managing
don't monitor the same nodes, they can work in separate Spaces and then further organize their nodes into War Rooms.
Next, set up War Rooms. Netdata Cloud creates dashboards and visualizations based on the nodes added to a given War
-Room. You can [organize War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms#war-room-organization) in any way
+Room. You can [organize War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md#war-room-organization) in any way
you want, such as by the application type, for end-to-end application monitoring, or as an incident response tool.
-Learn more about [Spaces](https://learn.netdata.cloud/docs/cloud/spaces) and [War
-Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms), including how to manage each, in their respective reference
+Learn more about [Spaces](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md) and [War
+Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md), including how to manage each, in their respective reference
documentation.
### Invite your team
@@ -63,25 +63,25 @@ inviting others, you can better synchronize with your team or colleagues to unde
When something goes wrong, you'll be ready to collaboratively troubleshoot complex performance problems from a single
pane of glass.
-To [invite new users](https://learn.netdata.cloud/docs/cloud/manage/invite-your-team), click on **Invite Users** in the
+To [invite new users](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md), click on **Invite Users** in the
Space management Area. Choose which War Rooms to add this user to, then click **Send**.
If your team members have trouble signing in, direct them to the [Netdata Cloud sign
-in](https://learn.netdata.cloud/docs/cloud/manage/sign-in) doc.
+in](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.mdx) doc.
### See an overview of your infrastructure
The default way to visualize the health and performance of an infrastructure with Netdata Cloud is the
-[**Overview**](/docs/visualize/overview-infrastructure.md), which is the default interface of every War Room. The
+[**Overview**](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md), which is the default interface of every War Room. The
Overview features composite charts, which display aggregated metrics from every node in a given War Room. These metrics
are streamed on-demand from individual nodes and composited onto a single, familiar dashboard.
![The War Room
Overview](https://user-images.githubusercontent.com/1153921/108732681-09791980-74eb-11eb-9ba2-98cb1b6608de.png)
-Read more about the Overview in the [infrastructure overview](/docs/visualize/overview-infrastructure.md) doc.
+Read more about the Overview in the [infrastructure overview](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) doc.
-Netdata Cloud also features the [**Nodes view**](https://learn.netdata.cloud/docs/cloud/visualize/nodes), which you can
+Netdata Cloud also features the [**Nodes view**](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md), which you can
use to configure and see a few key metrics from every node in the War Room, view health status, and more.
### Drill down to specific nodes
@@ -91,8 +91,8 @@ single-node dashboards in Netdata Cloud to drill down on specific issues, scrub
historical data, and see like metrics presented meaningfully to help you troubleshoot performance problems.
Read about the process in the [infrastructure
-overview](/docs/visualize/overview-infrastructure.md#drill-down-with-single-node-dashboards) doc, then learn about [interacting with
-dashboards and charts](/docs/visualize/interact-dashboards-charts.md) to get the most from all of Netdata's real-time
+overview](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md#drill-down-with-single-node-dashboards) doc, then learn about [interacting with
+dashboards and charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) to get the most from all of Netdata's real-time
metrics.
### Create new dashboards
@@ -104,7 +104,7 @@ from every node in your infrastructure on a single dashboard.
![An example system CPU
dashboard](https://user-images.githubusercontent.com/1153921/108732974-4b09c480-74eb-11eb-87a2-c67e569c08b6.png)
-Read more about [creating new dashboards](/docs/visualize/create-dashboards.md) for more details about the process and
+Read more about [creating new dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) for more details about the process and
additional tips on best leveraging the feature to help you troubleshoot complex performance problems.
## Set up your nodes
@@ -131,25 +131,25 @@ cd /etc/netdata
sudo ./edit-config netdata.conf
```
-Our [configuration basics doc](/docs/configure/nodes.md) contains more information about `netdata.conf`, `edit-config`,
+Our [configuration basics doc](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) contains more information about `netdata.conf`, `edit-config`,
along with simple examples to get you familiar with editing your node's configuration.
-After you've learned the basics, you should [secure your infrastructure's nodes](/docs/configure/secure-nodes.md) using
+After you've learned the basics, you should [secure your infrastructure's nodes](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md) using
one of our recommended methods. These security best practices ensure no untrusted parties gain access to the metrics
collected on any of your nodes.
### Collect metrics from systems and applications
-Netdata has [300+ pre-installed collectors](/collectors/COLLECTORS.md) that gather thousands of metrics with zero
+Netdata has [300+ pre-installed collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) that gather thousands of metrics with zero
configuration. Collectors search each of your nodes in default locations and ports to find running applications and
gather as many metrics as they can without you having to configure them individually.
Most collectors work without configuration, but you should read up on [how collectors
-work](/docs/collect/how-collectors-work.md) and [how to enable/configure](/docs/collect/enable-configure.md) them so
+work](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md) and [how to enable/configure](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) them so
that you can see metrics from those applications in Netdata Cloud.
-In addition, find detailed information about which [system](/docs/collect/system-metrics.md),
-[container](/docs/collect/container-metrics.md), and [application](/docs/collect/application-metrics.md) metrics you can
+In addition, find detailed information about which [system](https://github.com/netdata/netdata/blob/master/docs/collect/system-metrics.md),
+[container](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md), and [application](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md) metrics you can
collect from across your infrastructure with Netdata.
## What's next?
@@ -158,28 +158,28 @@ Netdata has many features that help you monitor the health of your nodes and tro
Once you have a handle on configuration and are collecting all the right metrics, try out some of Netdata's other
infrastructure-focused features:
-- [See an overview of your infrastructure](/docs/visualize/overview-infrastructure.md) using Netdata Cloud's composite
+- [See an overview of your infrastructure](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md) using Netdata Cloud's composite
charts and real-time visualizations.
-- [Create new dashboards](/docs/visualize/create-dashboards.md) from any number of nodes and metrics in Netdata Cloud.
+- [Create new dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) from any number of nodes and metrics in Netdata Cloud.
To change how the Netdata Agent runs on each node, dig in to configuration files:
-- [Change how long nodes in your infrastructure retain metrics](/docs/store/change-metrics-storage.md) based on how
+- [Change how long nodes in your infrastructure retain metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) based on how
many metrics each node collects, your preferred retention period, and the resources you want to dedicate toward
long-term metrics retention.
-- [Create new alarms](/docs/monitor/configure-alarms.md), or tweak some of the pre-configured alarms, to stay on top
+- [Create new alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md), or tweak some of the pre-configured alarms, to stay on top
of anomalies.
-- [Enable notifications](/docs/monitor/enable-notifications.md) to Slack, PagerDuty, email, and 30+ other services.
-- [Export metrics](/docs/export/external-databases.md) to an external time-series database to use Netdata alongside
+- [Enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to Slack, PagerDuty, email, and 30+ other services.
+- [Export metrics](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md) to an external time-series database to use Netdata alongside
other monitoring and troubleshooting tools.
### Related reference documentation
-- [Netdata Cloud · Spaces](https://learn.netdata.cloud/docs/cloud/spaces)
-- [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms)
-- [Netdata Cloud · Invite your team](https://learn.netdata.cloud/docs/cloud/manage/invite-your-team)
+- [Netdata Cloud · Spaces](https://github.com/netdata/netdata/blob/master/docs/cloud/spaces.md)
+- [Netdata Cloud · War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md)
+- [Netdata Cloud · Invite your team](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/invite-your-team.md)
- [Netdata Cloud · Sign in or sign up with email, Google, or
- GitHub](https://learn.netdata.cloud/docs/cloud/manage/sign-in)
-- [Netdata Cloud · Nodes view](https://learn.netdata.cloud/docs/cloud/visualize/nodes)
+ GitHub](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/sign-in.mdx)
+- [Netdata Cloud · Nodes view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md)
diff --git a/docs/quickstart/single-node.md b/docs/quickstart/single-node.md
index 7855a4876..293731911 100644
--- a/docs/quickstart/single-node.md
+++ b/docs/quickstart/single-node.md
@@ -36,7 +36,7 @@ To see a node's dashboard in Netdata Cloud, [sign in](https://app.netdata.cloud)
dashboard](https://user-images.githubusercontent.com/1153921/87457036-9b678e00-c5bc-11ea-977d-ad561a73beef.png)
Once you've decided which dashboard you prefer, learn about [interacting with dashboards and
-charts](/docs/visualize/interact-dashboards-charts.md) to get the most from Netdata's real-time metrics.
+charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) to get the most from Netdata's real-time metrics.
## Configure your node
@@ -50,26 +50,26 @@ cd /etc/netdata
sudo ./edit-config netdata.conf
```
-Our [configuration basics doc](/docs/configure/nodes.md) contains more information about `netdata.conf`, `edit-config`,
+Our [configuration basics doc](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) contains more information about `netdata.conf`, `edit-config`,
along with simple examples to get you familiar with editing your node's configuration.
-After you've learned the basics, you should [secure your node](/docs/configure/secure-nodes.md) using one of our
+After you've learned the basics, you should [secure your node](https://github.com/netdata/netdata/blob/master/docs/configure/secure-nodes.md) using one of our
recommended methods. These security best practices ensure no untrusted parties gain access to your dashboard or its
metrics.
## Collect metrics from your system and applications
-Netdata has [300+ pre-installed collectors](/collectors/COLLECTORS.md) that gather thousands of metrics with zero
+Netdata has [300+ pre-installed collectors](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md) that gather thousands of metrics with zero
configuration. Collectors search your node in default locations and ports to find running applications and gather as
many metrics as possible without you having to configure them individually.
These metrics enrich both the local and Netdata Cloud dashboards.
Most collectors work without configuration, but you should read up on [how collectors
-work](/docs/collect/how-collectors-work.md) and [how to enable/configure](/docs/collect/enable-configure.md) them.
+work](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md) and [how to enable/configure](https://github.com/netdata/netdata/blob/master/docs/collect/enable-configure.md) them.
-In addition, find detailed information about which [system](/docs/collect/system-metrics.md),
-[container](/docs/collect/container-metrics.md), and [application](/docs/collect/application-metrics.md) metrics you can
+In addition, find detailed information about which [system](https://github.com/netdata/netdata/blob/master/docs/collect/system-metrics.md),
+[container](https://github.com/netdata/netdata/blob/master/docs/collect/container-metrics.md), and [application](https://github.com/netdata/netdata/blob/master/docs/collect/application-metrics.md) metrics you can
collect from across your infrastructure with Netdata.
## What's next?
@@ -78,15 +78,15 @@ Netdata has many features that help you monitor the health of your node and trou
Once you understand configuration, and are certain Netdata is collecting all the important metrics from your node, try
out some of Netdata's other visualization and health monitoring features:
-- [Build new dashboards](/docs/visualize/create-dashboards.md) to put disparate but relevant metrics onto a single
+- [Build new dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) to put disparate but relevant metrics onto a single
interface.
-- [Create new alarms](/docs/monitor/configure-alarms.md), or tweak some of the pre-configured alarms, to stay on top
+- [Create new alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md), or tweak some of the pre-configured alarms, to stay on top
of anomalies.
-- [Enable notifications](/docs/monitor/enable-notifications.md) to Slack, PagerDuty, email, and 30+ other services.
-- [Change how long your node stores metrics](/docs/store/change-metrics-storage.md) based on how many metrics it
+- [Enable notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md) to Slack, PagerDuty, email, and 30+ other services.
+- [Change how long your node stores metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) based on how many metrics it
collects, your preferred retention period, and the resources you want to dedicate toward long-term metrics
retention.
-- [Export metrics](/docs/export/external-databases.md) to an external time-series database to use Netdata alongside
+- [Export metrics](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md) to an external time-series database to use Netdata alongside
other monitoring and troubleshooting tools.
diff --git a/docs/store/change-metrics-storage.md b/docs/store/change-metrics-storage.md
index c4b77d9af..e82393a65 100644
--- a/docs/store/change-metrics-storage.md
+++ b/docs/store/change-metrics-storage.md
@@ -1,12 +1,16 @@
# Change how long Netdata stores metrics
-The Netdata Agent uses a custom made time-series database (TSDB), named the [`dbengine`](/database/engine/README.md), to store metrics.
+The Netdata Agent uses a custom made time-series database (TSDB), named the [`dbengine`](https://github.com/netdata/netdata/blob/master/database/engine/README.md), to store metrics.
The default settings retain approximately two day's worth of metrics on a system collecting 2,000 metrics every second,
but the Netdata Agent is highly configurable if you want your nodes to store days, weeks, or months worth of per-second
@@ -39,7 +43,7 @@ if you want to store more metrics _specifically in memory_, you can increase the
:::tip
-We advise you to visit the [tiering mechanism](/database/engine/README.md#tiering) reference. This will help you
+We advise you to visit the [tiering mechanism](https://github.com/netdata/netdata/blob/master/database/engine/README.md#tiering) reference. This will help you
configure the Agent to retain metrics for longer periods.
:::
@@ -57,7 +61,7 @@ data retention according to your preferences.
## Edit `netdata.conf` with recommended database engine settings
Now that you have a recommended setting for your Agent's `dbengine`, open `netdata.conf` with
-[`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) and look for the `[db]`
+[`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) and look for the `[db]`
subsection. Change it to the recommended values you calculated from the calculator. For example:
```conf
@@ -76,23 +80,23 @@ subsection. Change it to the recommended values you calculated from the calculat
```
Save the file and restart the Agent with `sudo systemctl restart netdata`, or
-the [appropriate method](/docs/configure/start-stop-restart.md) for your system, to change the database engine's size.
+the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system, to change the database engine's size.
## What's next?
If you have multiple nodes with the Netdata Agent installed, you
-can [stream metrics](/docs/metrics-storage-management/how-streaming-works.mdx) from any number of _child_ nodes to a _
+can [stream metrics](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/how-streaming-works.mdx) from any number of _child_ nodes to a _
parent_ node and store metrics using a centralized time-series database. Streaming allows you to centralize your data,
run Agents as headless collectors, replicate data, and more.
Storing metrics with the database engine is completely interoperable
-with [exporting to other time-series databases](/docs/export/external-databases.md). With exporting, you can use the
-node's resources to surface metrics when [viewing dashboards](/docs/visualize/interact-dashboards-charts.md), while also
+with [exporting to other time-series databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md). With exporting, you can use the
+node's resources to surface metrics when [viewing dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md), while also
archiving metrics elsewhere for further analysis, visualization, or correlation with other tools.
### Related reference documentation
-- [Netdata Agent · Database engine](/database/engine/README.md)
-- [Netdata Agent · Database engine configuration option](/daemon/config/README.md#[db]-section-options)
+- [Netdata Agent · Database engine](https://github.com/netdata/netdata/blob/master/database/engine/README.md)
+- [Netdata Agent · Database engine configuration option](https://github.com/netdata/netdata/blob/master/daemon/config/README.md#[db]-section-options)
diff --git a/docs/store/distributed-data-architecture.md b/docs/store/distributed-data-architecture.md
index 62933cfe5..96ae4d999 100644
--- a/docs/store/distributed-data-architecture.md
+++ b/docs/store/distributed-data-architecture.md
@@ -1,7 +1,11 @@
# Distributed data architecture
@@ -10,7 +14,7 @@ Netdata uses a distributed data architecture to help you collect and store per-s
Every node in your infrastructure, whether it's one or a thousand, stores the metrics it collects.
Netdata Cloud bridges the gap between many distributed databases by _centralizing the interface_ you use to query and
-visualize your nodes' metrics. When you [look at charts in Netdata Cloud](/docs/visualize/interact-dashboards-charts.md)
+visualize your nodes' metrics. When you [look at charts in Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md)
, the metrics values are queried directly from that node's database and securely streamed to Netdata Cloud, which
proxies them to your browser.
@@ -18,7 +22,7 @@ Netdata's distributed data architecture has a number of benefits:
- **Performance**: Every query to a node's database takes only a few milliseconds to complete for responsiveness when
viewing dashboards or using features
- like [Metric Correlations](https://learn.netdata.cloud/docs/cloud/insights/metric-correlations).
+ like [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md).
- **Scalability**: As your infrastructure scales, install the Netdata Agent on every new node to immediately add it to
your monitoring solution without adding cost or complexity.
- **1-second granularity**: Without an expensive centralized data lake, you can store all of your nodes' per-second
@@ -53,17 +57,17 @@ of the Netdata Agent, without affecting disk space or memory requirements.
Any node running the Netdata Agent can store long-term metrics for any retention period, given you allocate the
appropriate amount of RAM and disk space.
-Read our document on changing [how long Netdata stores metrics](/docs/store/change-metrics-storage.md) on your nodes for
+Read our document on changing [how long Netdata stores metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md) on your nodes for
details.
-You can also stream between nodes using [streaming](/streaming/README.md), allowing to replicate databases and create
+You can also stream between nodes using [streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md), allowing to replicate databases and create
your own centralized data lake of metrics, if you choose to do so.
While a distributed data architecture is the default when monitoring infrastructure with Netdata, you can also configure
its behavior based on your needs or the type of infrastructure you manage.
To archive metrics to an external time-series database, such as InfluxDB, Graphite, OpenTSDB, Elasticsearch,
-TimescaleDB, and many others, see details on [integrating Netdata via exporting](/docs/export/external-databases.md).
+TimescaleDB, and many others, see details on [integrating Netdata via exporting](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md).
When you use the database engine to store your metrics, you can always perform a quick backup of a node's
`/var/cache/netdata/dbengine/` folder using the tool of your choice.
@@ -72,7 +76,7 @@ When you use the database engine to store your metrics, you can always perform a
Netdata Cloud does not store metric values.
-To enable certain features, such as [viewing active alarms](/docs/monitor/view-active-alarms.md)
+To enable certain features, such as [viewing active alarms](https://github.com/netdata/netdata/blob/master/docs/monitor/view-active-alarms.md)
or [filtering by hostname/service](https://learn.netdata.cloud/docs/cloud/war-rooms#node-filter), Netdata Cloud does
store configured alarms, their status, and a list of active collectors.
@@ -81,7 +85,7 @@ Netdata does not and never will sell your personal data or data about your deplo
## What's next?
You can configure the Netdata Agent to store days, weeks, or months worth of distributed, per-second data by
-[configuring the database engine](/docs/store/change-metrics-storage.md). Use our calculator to determine the system
+[configuring the database engine](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md). Use our calculator to determine the system
resources required to retain your desired amount of metrics, and expand or contract the database by editing a single
setting.
diff --git a/docs/visualize/create-dashboards.md b/docs/visualize/create-dashboards.md
index 696cd1a74..f4306f335 100644
--- a/docs/visualize/create-dashboards.md
+++ b/docs/visualize/create-dashboards.md
@@ -14,16 +14,18 @@ In the War Room you want to monitor with this dashboard, click on your War Room'
Add** button next to **Dashboards**. In the panel, give your new dashboard a name, and click **+ Add**.
Click the **Add Chart** button to add your first chart card. From the dropdown, select the node you want to add the
-chart from, then the context. Netdata Cloud shows you a preview of the chart before you finish adding it.
+chart from, then the context. Netdata Cloud shows you a preview of the chart before you finish adding it.
The **Add Text** button creates a new card with user-defined text, which you can use to describe or document a
particular dashboard's meaning and purpose. Enrich the dashboards you create with documentation or procedures on how to
-respond
+respond
![A bird's eye dashboard for a single
node](https://user-images.githubusercontent.com/1153921/102650776-a654ba80-4128-11eb-9a65-4f9801b03d4b.png)
-Charts in dashboards are [fully interactive](/docs/visualize/interact-dashboards-charts.md) and synchronized. You can
+Charts in dashboards
+are [fully interactive](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) and
+synchronized. You can
pan through time, zoom, highlight specific timeframes, and more.
Move any card by clicking on their top panel and dragging them to a new location. Other cards re-sort to the grid system
@@ -38,7 +40,8 @@ more detail when troubleshooting an issue. Quickly jump to any node's dashboard
of any card to open a menu. Hit the **Go to Chart** item.
Netdata Cloud takes you to the same chart on that node's dashboard. You can now navigate all that node's metrics and
-[interact with charts](/docs/visualize/interact-dashboards-charts.md) to further investigate anomalies or troubleshoot
+[interact with charts](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) to
+further investigate anomalies or troubleshoot
complex performance problems.
When viewing a single-node Cloud dashboard, you can also click on the add to dashboard icon ⚠️ There is a new version of charts that is currently **only** available on [Netdata Cloud](https://learn.netdata.cloud/docs/cloud/visualize/interact-new-charts). We didn't
+> ⚠️ There is a new version of charts that is currently **only** available on [Netdata Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md). We didn't
> want to keep this valuable feature from you, so after we get this into your hands on the Cloud, we will collect and implement your feedback to make sure we are providing the best possible version of the feature on the Netdata Agent dashboard as quickly as possible.
You can find Netdata's dashboards in two places: locally served at `http://NODE:19999` by the Netdata Agent, and in
Netdata Cloud. While you access these dashboards differently, they have similar interfaces, identical charts and
metrics, and you interact with both of them the same way.
-> If you're not sure which option is best for you, see our [single-node](/docs/quickstart/single-node.md) and
-> [infrastructure](/docs/quickstart/infrastructure.md) quickstart guides.
+> If you're not sure which option is best for you, see our [single-node](https://github.com/netdata/netdata/blob/master/docs/quickstart/single-node.md) and
+> [infrastructure](https://github.com/netdata/netdata/blob/master/docs/quickstart/infrastructure.md) quickstart guides.
Netdata dashboards are single, scrollable pages with many charts stacked on top of one another. As you scroll up or
down, charts appearing in your browser's viewport automatically load and update every second.
The dashboard is broken up into multiple **sections**, such as **System Overview**, **CPU**, **Disk**, which are
-automatically generated based on which [collectors](/docs/collect/how-collectors-work.md) begin collecting metrics when
+automatically generated based on which [collectors](https://github.com/netdata/netdata/blob/master/docs/collect/how-collectors-work.md) begin collecting metrics when
Netdata starts up. Sections also appear in the right-hand **menu**, along with submenus based on the contexts and
families Netdata creates for your node.
## Choose timeframes to visualize
Both the local Agent dashboard and Netdata Cloud feature time & date pickers to help you visualize specific points in
-time. In Netdata Cloud, the picker appears in the [Overview](/docs/visualize/overview-infrastructure.md), [Nodes
-view](https://learn.netdata.cloud/docs/cloud/visualize/nodes), [new
-dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards), and any single-node dashboards you visit.
+time. In Netdata Cloud, the picker appears in the [Overview](https://github.com/netdata/netdata/blob/master/docs/visualize/overview-infrastructure.md), [Nodes
+view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md), [new
+dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md), and any single-node dashboards you visit.
Local Agent dashboard:
@@ -45,8 +45,8 @@ Their behavior is identical. Use the Quick Selector to visualize generic timefra
select days, hours, minutes or seconds. Click **Apply** to re-render all visualizations with new metrics data, or
**Clear** to restore the default timeframe.
-See reference documentation for the [local Agent dashboard](/web/gui/README.md#time--date-picker) and [Netdata
-Cloud](https://learn.netdata.cloud/docs/cloud/war-rooms#time--date-picker) for additional context about how the time &
+See reference documentation for the [local Agent dashboard](https://github.com/netdata/netdata/blob/master/web/gui/README.md#time--date-picker) and [Netdata
+Cloud](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md#time--date-picker) for additional context about how the time &
date picker behaves in each environment.
## Charts, dimensions, families, and contexts
@@ -68,7 +68,7 @@ A **context** groups several charts based on the types of metrics being collecte
this context to create individual charts and then groups them by family. You can always see the context of any chart by
looking at its name or hovering over the chart's date.
-See our [dashboard docs](/web/README.md#charts-contexts-families) for more information about the above distinctions
+See our [dashboard docs](https://github.com/netdata/netdata/blob/master/web/README.md#charts-contexts-families) for more information about the above distinctions
and how they're used across Netdata to meaningfully organize and present metrics.
## Interact with charts
@@ -107,25 +107,25 @@ height](https://user-images.githubusercontent.com/1153921/102652691-24b25c00-412
Netdata Cloud now supports composite charts in the Overview interface. Composite charts come with a few additional UI
elements and varied interactions, such as the location of dimensions and a utility bar for configuring the state of
individual composite charts. All of these details are covered in the [Overview
-reference](https://learn.netdata.cloud/docs/cloud/visualize/overview) doc.
+reference](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md) doc.
## What's next?
-Netdata Cloud users can [build new dashboards](/docs/visualize/create-dashboards.md) in just a few clicks. By
+Netdata Cloud users can [build new dashboards](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md) in just a few clicks. By
aggregating relevant metrics from any number of nodes onto a single interface, you can respond faster to anomalies,
perform more targeted troubleshooting, or keep tabs on a bird's eye view of your infrastructure.
If you're finished with dashboards for now, skip to Netdata's health watchdog for information on [creating or
-configuring](/docs/monitor/configure-alarms.md) alarms, and [send notifications](/docs/monitor/enable-notifications.md)
+configuring](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) alarms, and [send notifications](https://github.com/netdata/netdata/blob/master/docs/monitor/enable-notifications.md)
to get informed when something goes wrong in your infrastructure.
### Related reference documentation
-- [Netdata Agent · Web dashboards overview](/web/README.md)
-- [Netdata Cloud · Interact with new charts](https://learn.netdata.cloud/docs/cloud/visualize/interact-new-charts)
-- [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms)
-- [Netdata Cloud · Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview)
-- [Netdata Cloud · Nodes](https://learn.netdata.cloud/docs/cloud/visualize/nodes)
-- [Netdata Cloud · Build new dashboards](https://learn.netdata.cloud/docs/cloud/visualize/dashboards)
+- [Netdata Agent · Web dashboards overview](https://github.com/netdata/netdata/blob/master/web/README.md)
+- [Netdata Cloud · Interact with new charts](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md)
+- [Netdata Cloud · War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md)
+- [Netdata Cloud · Overview](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md)
+- [Netdata Cloud · Nodes](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md)
+- [Netdata Cloud · Build new dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md)
diff --git a/docs/visualize/overview-infrastructure.md b/docs/visualize/overview-infrastructure.md
index 4edbb0f3a..0daddd97a 100644
--- a/docs/visualize/overview-infrastructure.md
+++ b/docs/visualize/overview-infrastructure.md
@@ -7,7 +7,7 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/visualize/o
# See an overview of your infrastructure
In Netdata Cloud, your nodes are organized into War Rooms. One of the two available views for a War Room is the
-[**Overview**](https://learn.netdata.cloud/docs/cloud/visualize/overview), which uses composite charts to display
+[**Overview**](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md), which uses composite charts to display
real-time, aggregated metrics from all the nodes (or a filtered selection) in a given War Room.
With Overview's composite charts, you can see your infrastructure from a single pane of glass, discover trends or
@@ -15,7 +15,7 @@ anomalies, then drill down with filtering or single-node dashboards to see more.
each chart visualizes average or sum metrics values from across 5 distributed nodes.
Netdata also supports robust Kubernetes monitoring using the Overview. Read our [deployment
-doc](/packaging/installer/methods/kubernetes.md) for details on visualizing Kubernetes metrics in Netdata Cloud.
+doc](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/kubernetes.md) for details on visualizing Kubernetes metrics in Netdata Cloud.
![The War Room
Overview](https://user-images.githubusercontent.com/1153921/108732681-09791980-74eb-11eb-9ba2-98cb1b6608de.png)
@@ -32,8 +32,8 @@ Let's walk through some examples of using the Overview to monitor and troublesho
### Filter nodes and pick relevant times
While not exclusive to Overview, you can use two important features, [node
-filtering](https://learn.netdata.cloud/docs/cloud/war-rooms#node-filter) and the [time & date
-picker](https://learn.netdata.cloud/docs/cloud/war-rooms#time--date-picker), to widen or narrow your infrastructure
+filtering](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md#node-filter) and the [time & date
+picker](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md#time--date-picker), to widen or narrow your infrastructure
monitoring focus.
By default, the Overview shows composite charts aggregated from every node in the War Room, but you can change that
@@ -48,7 +48,7 @@ establishing a baseline of infrastructure performance or targeted root cause ana
For example, use the **Quick Selector** options to pick the 12-hour option first thing in the morning to check your
infrastructure for any odd behavior overnight. Use the 7-day option to observe trends between various days of the week.
-See the [War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms) docs for more details on both features.
+See the [War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md) docs for more details on both features.
### Configure composite charts to identify problems
@@ -60,7 +60,7 @@ affects a single node, a subset of nodes, or an entire infrastructure.
![Composite charts showing available and committed RAM across an
infrastructure](https://user-images.githubusercontent.com/1153921/99314892-0bae4680-281f-11eb-823e-071a1da25dc7.png)
-Use [_group by node_](https://learn.netdata.cloud/docs/cloud/visualize/overview#group-by-dimension-or-node) to visualize
+Use [_group by node_](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md#group-by-dimension-or-node) to visualize
a single metric across all contributing nodes. If the composite chart has 5 contributing nodes, there will be 5
lines/areas, one for the most relevant dimension from each node.
@@ -80,32 +80,32 @@ given node to quickly _jump to the same chart in that node's single-node dashboa
You can use single-node dashboards in Netdata Cloud to drill down on specific issues, scrub backward in time to
investigate historical data, and see like metrics presented meaningfully to help you troubleshoot performance problems.
-All of the familiar [interactions](/docs/visualize/interact-dashboards-charts.md) are available, as is adding any chart
-to a [new dashboard](/docs/visualize/create-dashboards.md).
+All of the familiar [interactions](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) are available, as is adding any chart
+to a [new dashboard](https://github.com/netdata/netdata/blob/master/docs/visualize/create-dashboards.md).
## Nodes view
You can also use the **Nodes view** to monitor the health status and user-configurable key metrics from multiple nodes
-in a War Room. Read the [Nodes view doc](https://learn.netdata.cloud/docs/cloud/visualize/nodes) for details.
+in a War Room. Read the [Nodes view doc](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md) for details.
![The Nodes view](https://user-images.githubusercontent.com/1153921/108733066-5fe65800-74eb-11eb-98e0-abaccd36deaf.png)
## What's next?
To troubleshoot complex performance issues using Netdata, you need to understand how to interact with its meaningful
-visualizations. Learn more about [interaction](/docs/visualize/interact-dashboards-charts.md) to see historical metrics,
+visualizations. Learn more about [interaction](https://github.com/netdata/netdata/blob/master/docs/visualize/interact-dashboards-charts.md) to see historical metrics,
highlight timeframes for targeted analysis, and more.
If you're a Kubernetes user, read about Netdata's [Kubernetes
-visualizations](https://learn.netdata.cloud/docs/cloud/visualize/kubernetes) for details about the health map and
+visualizations](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md) for details about the health map and
time-series k8s charts, and our tutorial, [_Kubernetes monitoring with Netdata: Overview and
-visualizations_](/docs/guides/monitor/kubernetes-k8s-netdata.md), for a full walkthrough.
+visualizations_](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/kubernetes-k8s-netdata.md), for a full walkthrough.
### Related reference documentation
-- [Netdata Cloud · War Rooms](https://learn.netdata.cloud/docs/cloud/war-rooms)
-- [Netdata Cloud · Overview](https://learn.netdata.cloud/docs/cloud/visualize/overview)
-- [Netdata Cloud · Nodes view](https://learn.netdata.cloud/docs/cloud/visualize/nodes)
-- [Netdata Cloud · Kubernetes visualizations](https://learn.netdata.cloud/docs/cloud/visualize/kubernetes)
+- [Netdata Cloud · War Rooms](https://github.com/netdata/netdata/blob/master/docs/cloud/war-rooms.md)
+- [Netdata Cloud · Overview](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md)
+- [Netdata Cloud · Nodes view](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/nodes.md)
+- [Netdata Cloud · Kubernetes visualizations](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/kubernetes.md)
diff --git a/docs/why-netdata/README.md b/docs/why-netdata/README.md
index c482ee944..9c3af5e7d 100644
--- a/docs/why-netdata/README.md
+++ b/docs/why-netdata/README.md
@@ -11,19 +11,19 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/docs/why-netdata
Netdata is built around 4 principles:
-1. **[Per second data collection for all metrics.](/docs/why-netdata/1s-granularity.md)**
+1. **[Per second data collection for all metrics.](https://github.com/netdata/netdata/blob/master/docs/why-netdata/1s-granularity.md)**
_It is impossible to monitor a 2 second SLA, with 10 second metrics._
-2. **[Collect and visualize all the metrics from all possible sources.](/docs/why-netdata/unlimited-metrics.md)**
+2. **[Collect and visualize all the metrics from all possible sources.](https://github.com/netdata/netdata/blob/master/docs/why-netdata/unlimited-metrics.md)**
_To troubleshoot slowdowns, we need all the available metrics. The console should not provide more metrics._
-3. **[Meaningful presentation, optimized for visual anomaly detection.](/docs/why-netdata/meaningful-presentation.md)**
+3. **[Meaningful presentation, optimized for visual anomaly detection.](https://github.com/netdata/netdata/blob/master/docs/why-netdata/meaningful-presentation.md)**
_Metrics are a lot more than name-value pairs over time. The monitoring tool should know all the metrics. Users should not!_
-4. **[Immediate results, just install and use.](/docs/why-netdata/immediate-results.md)**
+4. **[Immediate results, just install and use.](https://github.com/netdata/netdata/blob/master/docs/why-netdata/immediate-results.md)**
_Most of our infrastructure is standardized. There is no point to configure everything metric by metric._
diff --git a/exporting/README.md b/exporting/README.md
index 60028a38a..bc3ca1c7d 100644
--- a/exporting/README.md
+++ b/exporting/README.md
@@ -1,8 +1,12 @@
# Exporting reference
@@ -12,13 +16,13 @@ configuring, and monitoring Netdata's exporting engine, which allows you to send
databases.
For a quick introduction to the exporting engine's features, read our doc on [exporting metrics to time-series
-databases](/docs/export/external-databases.md), or jump in to [enabling a connector](/docs/export/enable-connector.md).
+databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md), or jump in to [enabling a connector](https://github.com/netdata/netdata/blob/master/docs/export/enable-connector.md).
The exporting engine has a modular structure and supports metric exporting via multiple exporting connector instances at
the same time. You can have different update intervals and filters configured for every exporting connector instance.
When you enable the exporting engine and a connector, the Netdata Agent exports metrics _beginning from the time you
-restart its process_, not the entire [database of long-term metrics](/docs/store/change-metrics-storage.md).
+restart its process_, not the entire [database of long-term metrics](https://github.com/netdata/netdata/blob/master/docs/store/change-metrics-storage.md).
Since Netdata collects thousands of metrics per server per second, which would easily congest any database server when
several Netdata servers are sending data to it, Netdata allows sending metrics at a lower frequency, by resampling them.
@@ -31,27 +35,27 @@ X seconds (though, it can send them per second if you need it to).
### Integration
The exporting engine uses a number of connectors to send Netdata metrics to external time-series databases. See our
-[list of supported databases](/docs/export/external-databases.md#supported-databases) for information on which
+[list of supported databases](https://github.com/netdata/netdata/blob/master/docs/export/external-databases.md#supported-databases) for information on which
connector to enable and configure for your database of choice.
-- [**AWS Kinesis Data Streams**](/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
+- [**AWS Kinesis Data Streams**](https://github.com/netdata/netdata/blob/master/exporting/aws_kinesis/README.md): Metrics are sent to the service in `JSON`
format.
-- [**Google Cloud Pub/Sub Service**](/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
+- [**Google Cloud Pub/Sub Service**](https://github.com/netdata/netdata/blob/master/exporting/pubsub/README.md): Metrics are sent to the service in `JSON`
format.
-- [**Graphite**](/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
+- [**Graphite**](https://github.com/netdata/netdata/blob/master/exporting/graphite/README.md): A plaintext interface. Metrics are sent to the database server as
`prefix.hostname.chart.dimension`. `prefix` is configured below, `hostname` is the hostname of the machine (can
also be configured). Learn more in our guide to [export and visualize Netdata metrics in
- Graphite](/docs/guides/export/export-netdata-metrics-graphite.md).
-- [**JSON** document databases](/exporting/json/README.md)
-- [**OpenTSDB**](/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
+ Graphite](https://github.com/netdata/netdata/blob/master/docs/guides/export/export-netdata-metrics-graphite.md).
+- [**JSON** document databases](https://github.com/netdata/netdata/blob/master/exporting/json/README.md)
+- [**OpenTSDB**](https://github.com/netdata/netdata/blob/master/exporting/opentsdb/README.md): Use a plaintext or HTTP interfaces. Metrics are sent to
OpenTSDB as `prefix.chart.dimension` with tag `host=hostname`.
-- [**MongoDB**](/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
-- [**Prometheus**](/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
+- [**MongoDB**](https://github.com/netdata/netdata/blob/master/exporting/mongodb/README.md): Metrics are sent to the database in `JSON` format.
+- [**Prometheus**](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md): Use an existing Prometheus installation to scrape metrics
from node using the Netdata API.
-- [**Prometheus remote write**](/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
+- [**Prometheus remote write**](https://github.com/netdata/netdata/blob/master/exporting/prometheus/remote_write/README.md). A binary snappy-compressed protocol
buffer encoding over HTTP. Supports many [storage
providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
-- [**TimescaleDB**](/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
+- [**TimescaleDB**](https://github.com/netdata/netdata/blob/master/exporting/TIMESCALE.md): Use a community-built connector that takes JSON streams from a
Netdata client and writes them to a TimescaleDB table.
### Chart filtering
@@ -292,7 +296,7 @@ Configure individual connectors and override any global settings with the follow
Netdata can send metrics to external databases using the TLS/SSL protocol. Unfortunately, some of
them does not support encrypted connections, so you will have to configure a reverse proxy to enable
HTTPS communication between Netdata and an external database. You can set up a reverse proxy with
-[Nginx](/docs/Running-behind-nginx.md).
+[Nginx](https://github.com/netdata/netdata/blob/master/docs/Running-behind-nginx.md).
## Exporting engine monitoring
diff --git a/exporting/TIMESCALE.md b/exporting/TIMESCALE.md
index 07aa1b7a2..2bd6db8c5 100644
--- a/exporting/TIMESCALE.md
+++ b/exporting/TIMESCALE.md
@@ -1,8 +1,12 @@
# Writing metrics to TimescaleDB
diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md
index 0612b298a..5afd26045 100644
--- a/exporting/WALKTHROUGH.md
+++ b/exporting/WALKTHROUGH.md
@@ -1,8 +1,11 @@
# Netdata, Prometheus, Grafana stack
@@ -64,7 +67,7 @@ command to run (`/bin/bash`) and then chooses the base container images (`centos
be sitting inside the shell of the container.
After we have entered the shell we can install Netdata. This process could not be easier. If you take a look at [this
-link](/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
+link](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md), the Netdata devs give us several one-liners to install Netdata. I have not had
any issues with these one liners and their bootstrapping scripts so far (If you guys run into anything do share). Run
the following command in your container.
@@ -223,7 +226,7 @@ the `chart` dimension. If you'd like you can combine the `chart` and `instance`
Let's give this a try: `netdata_system_cpu_percentage_average{chart="system.cpu", instance="netdata:19999"}`
This is the basics of using Prometheus to query Netdata. I'd advise everyone at this point to read [this
-page](/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
+page](https://github.com/netdata/netdata/blob/master/exporting/prometheus/README.md#using-netdata-with-prometheus). The key point here is that Netdata can export metrics from
its internal DB or can send metrics _as-collected_ by specifying the `source=as-collected` URL parameter like so.
If you choose to use
this method you will need to use Prometheus's set of functions here: to
diff --git a/exporting/aws_kinesis/README.md b/exporting/aws_kinesis/README.md
index 29dd3438e..7921a2654 100644
--- a/exporting/aws_kinesis/README.md
+++ b/exporting/aws_kinesis/README.md
@@ -1,8 +1,12 @@
# Export metrics to AWS Kinesis Data Streams
@@ -50,7 +54,8 @@ Set AWS credentials and stream name:
stream name = your_stream_name
```
-Alternatively, you can set AWS credentials for the `netdata` user using AWS SDK for C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
+Alternatively, you can set AWS credentials for the `netdata` user using AWS SDK for
+C++ [standard methods](https://docs.aws.amazon.com/sdk-for-cpp/v1/developer-guide/credentials.html).
Netdata automatically computes a partition key for every record with the purpose to distribute records across
available shards evenly.
diff --git a/exporting/aws_kinesis/aws_kinesis.c b/exporting/aws_kinesis/aws_kinesis.c
index 1d89cc79a..c7d7a9d34 100644
--- a/exporting/aws_kinesis/aws_kinesis.c
+++ b/exporting/aws_kinesis/aws_kinesis.c
@@ -52,7 +52,7 @@ int init_aws_kinesis_instance(struct instance *instance)
instance->prepare_header = NULL;
instance->check_response = NULL;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for AWS Kinesis exporting connector instance %s", instance->config.name);
return 1;
diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c
index fd16d982b..2ad8cdd96 100644
--- a/exporting/exporting_engine.c
+++ b/exporting/exporting_engine.c
@@ -197,7 +197,7 @@ void *exporting_main(void *ptr)
heartbeat_t hb;
heartbeat_init(&hb);
- while (!netdata_exit) {
+ while (service_running(SERVICE_EXPORTERS)) {
heartbeat_next(&hb, step_ut);
engine->now = now_realtime_sec();
diff --git a/exporting/graphite/README.md b/exporting/graphite/README.md
index 6c96c78c9..afcdf7984 100644
--- a/exporting/graphite/README.md
+++ b/exporting/graphite/README.md
@@ -1,14 +1,19 @@
# Export metrics to Graphite providers
-You can use the Graphite connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to
-Graphite providers for long-term storage, further analysis, or correlation with data from other sources.
+You can use the Graphite connector for
+the [exporting engine](https://github.com/netdata/netdata/blob/master/exporting/README.md) to archive your agent's
+metrics to Graphite providers for long-term storage, further analysis, or correlation with data from other sources.
## Configuration
@@ -21,7 +26,8 @@ directory and set the following options:
destination = localhost:2003
```
-Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `graphite:http:my_graphite_instance`,
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For
+example: `graphite:http:my_graphite_instance`,
`graphite:https:my_graphite_instance`. You can set basic HTTP authentication credentials using
```conf
@@ -29,7 +35,7 @@ Add `:http` or `:https` modifiers to the connector type if you need to use other
password = my_password
```
-The Graphite connector is further configurable using additional settings. See the [exporting reference
-doc](/exporting/README.md#options) for details.
+The Graphite connector is further configurable using additional settings. See
+the [exporting reference doc](https://github.com/netdata/netdata/blob/master/exporting/README.md#options) for details.
diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c
index 0b33f6428..f1964f3e5 100644
--- a/exporting/graphite/graphite.c
+++ b/exporting/graphite/graphite.c
@@ -48,7 +48,7 @@ int init_graphite_instance(struct instance *instance)
instance->check_response = exporting_discard_response;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for graphite exporting connector instance %s", instance->config.name);
return 1;
@@ -96,7 +96,7 @@ void sanitize_graphite_label_value(char *dst, const char *src, size_t len)
int format_host_labels_graphite_plaintext(struct instance *instance, RRDHOST *host)
{
if (!instance->labels_buffer)
- instance->labels_buffer = buffer_create(1024);
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
if (unlikely(!sending_labels_configured(instance)))
return 0;
diff --git a/exporting/init_connectors.c b/exporting/init_connectors.c
index bfb6525ea..15e1951f8 100644
--- a/exporting/init_connectors.c
+++ b/exporting/init_connectors.c
@@ -171,8 +171,8 @@ void simple_connector_init(struct instance *instance)
if (connector_specific_data->first_buffer)
return;
- connector_specific_data->header = buffer_create(0);
- connector_specific_data->buffer = buffer_create(0);
+ connector_specific_data->header = buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
+ connector_specific_data->buffer = buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
// create a ring buffer
struct simple_connector_buffer *first_buffer = NULL;
@@ -195,7 +195,7 @@ void simple_connector_init(struct instance *instance)
connector_specific_data->last_buffer = connector_specific_data->first_buffer;
if (*instance->config.username || *instance->config.password) {
- BUFFER *auth_string = buffer_create(0);
+ BUFFER *auth_string = buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
buffer_sprintf(auth_string, "%s:%s", instance->config.username, instance->config.password);
diff --git a/exporting/json/README.md b/exporting/json/README.md
index d129ffbd7..23ff555cb 100644
--- a/exporting/json/README.md
+++ b/exporting/json/README.md
@@ -1,13 +1,17 @@
# Export metrics to JSON document databases
-You can use the JSON connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to JSON
+You can use the JSON connector for the [exporting engine](https://github.com/netdata/netdata/blob/master/exporting/README.md) to archive your agent's metrics to JSON
document databases for long-term storage, further analysis, or correlation with data from other sources.
## Configuration
@@ -29,7 +33,7 @@ Add `:http` or `:https` modifiers to the connector type if you need to use other
password = my_password
```
-The JSON connector is further configurable using additional settings. See the [exporting reference
-doc](/exporting/README.md#options) for details.
+The JSON connector is further configurable using additional settings. See
+the [exporting reference doc](https://github.com/netdata/netdata/blob/master/exporting/README.md#options) for details.
diff --git a/exporting/json/json.c b/exporting/json/json.c
index dd53f6f0a..4cafd4c04 100644
--- a/exporting/json/json.c
+++ b/exporting/json/json.c
@@ -37,7 +37,7 @@ int init_json_instance(struct instance *instance)
instance->check_response = exporting_discard_response;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for json exporting connector instance %s", instance->config.name);
return 1;
@@ -96,7 +96,7 @@ int init_json_http_instance(struct instance *instance)
instance->check_response = exporting_discard_response;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
simple_connector_init(instance);
@@ -119,7 +119,7 @@ int init_json_http_instance(struct instance *instance)
int format_host_labels_json_plaintext(struct instance *instance, RRDHOST *host)
{
if (!instance->labels_buffer)
- instance->labels_buffer = buffer_create(1024);
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
if (unlikely(!sending_labels_configured(instance)))
return 0;
diff --git a/exporting/mongodb/README.md b/exporting/mongodb/README.md
index b10d54716..0cbe8f059 100644
--- a/exporting/mongodb/README.md
+++ b/exporting/mongodb/README.md
@@ -1,14 +1,19 @@
# Export metrics to MongoDB
-You can use the MongoDB connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to a
-MongoDB database for long-term storage, further analysis, or correlation with data from other sources.
+You can use the MongoDB connector for
+the [exporting engine](https://github.com/netdata/netdata/blob/master/exporting/README.md) to archive your agent's
+metrics to a MongoDB database for long-term storage, further analysis, or correlation with data from other sources.
## Prerequisites
diff --git a/exporting/mongodb/mongodb.c b/exporting/mongodb/mongodb.c
index 850d07fb3..186a7dcfd 100644
--- a/exporting/mongodb/mongodb.c
+++ b/exporting/mongodb/mongodb.c
@@ -106,7 +106,7 @@ int init_mongodb_instance(struct instance *instance)
instance->prepare_header = NULL;
instance->check_response = NULL;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for MongoDB exporting connector instance %s", instance->config.name);
return 1;
diff --git a/exporting/opentsdb/README.md b/exporting/opentsdb/README.md
index c9b1ab95a..c6069f372 100644
--- a/exporting/opentsdb/README.md
+++ b/exporting/opentsdb/README.md
@@ -1,14 +1,19 @@
# Export metrics to OpenTSDB
-You can use the OpenTSDB connector for the [exporting engine](/exporting/README.md) to archive your agent's metrics to OpenTSDB
-databases for long-term storage, further analysis, or correlation with data from other sources.
+You can use the OpenTSDB connector for
+the [exporting engine](https://github.com/netdata/netdata/blob/master/exporting/README.md) to archive your agent's
+metrics to OpenTSDB databases for long-term storage, further analysis, or correlation with data from other sources.
## Configuration
@@ -21,7 +26,8 @@ directory and set the following options:
destination = localhost:4242
```
-Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For example: `opentsdb:http:my_opentsdb_instance`,
+Add `:http` or `:https` modifiers to the connector type if you need to use other than a plaintext protocol. For
+example: `opentsdb:http:my_opentsdb_instance`,
`opentsdb:https:my_opentsdb_instance`. You can set basic HTTP authentication credentials using
```conf
@@ -29,7 +35,7 @@ Add `:http` or `:https` modifiers to the connector type if you need to use other
password = my_password
```
-The OpenTSDB connector is further configurable using additional settings. See the [exporting reference
-doc](/exporting/README.md#options) for details.
+The OpenTSDB connector is further configurable using additional settings. See
+the [exporting reference doc](https://github.com/netdata/netdata/blob/master/exporting/README.md#options) for details.
diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c
index a974c1264..fc01ae461 100644
--- a/exporting/opentsdb/opentsdb.c
+++ b/exporting/opentsdb/opentsdb.c
@@ -45,7 +45,7 @@ int init_opentsdb_telnet_instance(struct instance *instance)
instance->prepare_header = NULL;
instance->check_response = exporting_discard_response;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for opentsdb telnet exporting connector instance %s", instance->config.name);
return 1;
@@ -102,7 +102,7 @@ int init_opentsdb_http_instance(struct instance *instance)
instance->prepare_header = opentsdb_http_prepare_header;
instance->check_response = exporting_discard_response;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for opentsdb HTTP exporting connector instance %s", instance->config.name);
return 1;
@@ -150,7 +150,7 @@ void sanitize_opentsdb_label_value(char *dst, const char *src, size_t len)
int format_host_labels_opentsdb_telnet(struct instance *instance, RRDHOST *host) {
if(!instance->labels_buffer)
- instance->labels_buffer = buffer_create(1024);
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
if (unlikely(!sending_labels_configured(instance)))
return 0;
@@ -283,7 +283,7 @@ void opentsdb_http_prepare_header(struct instance *instance)
int format_host_labels_opentsdb_http(struct instance *instance, RRDHOST *host) {
if (!instance->labels_buffer)
- instance->labels_buffer = buffer_create(1024);
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
if (unlikely(!sending_labels_configured(instance)))
return 0;
diff --git a/exporting/process_data.c b/exporting/process_data.c
index fbcda0d9b..eb492535d 100644
--- a/exporting/process_data.c
+++ b/exporting/process_data.c
@@ -77,8 +77,8 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
time_t before = instance->before;
// find the edges of the rrd database for this chart
- time_t first_t = rd->tiers[0]->query_ops->oldest_time(rd->tiers[0]->db_metric_handle);
- time_t last_t = rd->tiers[0]->query_ops->latest_time(rd->tiers[0]->db_metric_handle);
+ time_t first_t = rd->tiers[0].query_ops->oldest_time_s(rd->tiers[0].db_metric_handle);
+ time_t last_t = rd->tiers[0].query_ops->latest_time_s(rd->tiers[0].db_metric_handle);
time_t update_every = st->update_every;
struct storage_engine_query_handle handle;
@@ -126,11 +126,11 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
size_t counter = 0;
NETDATA_DOUBLE sum = 0;
- for (rd->tiers[0]->query_ops->init(rd->tiers[0]->db_metric_handle, &handle, after, before); !rd->tiers[0]->query_ops->is_finished(&handle);) {
- STORAGE_POINT sp = rd->tiers[0]->query_ops->next_metric(&handle);
+ for (rd->tiers[0].query_ops->init(rd->tiers[0].db_metric_handle, &handle, after, before, STORAGE_PRIORITY_LOW); !rd->tiers[0].query_ops->is_finished(&handle);) {
+ STORAGE_POINT sp = rd->tiers[0].query_ops->next_metric(&handle);
points_read++;
- if (unlikely(storage_point_is_empty(sp))) {
+ if (unlikely(storage_point_is_gap(sp))) {
// not collected
continue;
}
@@ -138,7 +138,7 @@ NETDATA_DOUBLE exporting_calculate_value_from_stored_data(
sum += sp.sum;
counter += sp.count;
}
- rd->tiers[0]->query_ops->finalize(&handle);
+ rd->tiers[0].query_ops->finalize(&handle);
global_statistics_exporters_query_completed(points_read);
if (unlikely(!counter)) {
@@ -397,7 +397,7 @@ int simple_connector_end_batch(struct instance *instance)
struct simple_connector_buffer *last_buffer = simple_connector_data->last_buffer;
if (!last_buffer->buffer) {
- last_buffer->buffer = buffer_create(0);
+ last_buffer->buffer = buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
}
if (last_buffer->used) {
@@ -419,7 +419,7 @@ int simple_connector_end_batch(struct instance *instance)
if (last_buffer->header)
buffer_flush(last_buffer->header);
else
- last_buffer->header = buffer_create(0);
+ last_buffer->header = buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (instance->prepare_header)
instance->prepare_header(instance);
diff --git a/exporting/prometheus/README.md b/exporting/prometheus/README.md
index ae94867fa..97e9c632f 100644
--- a/exporting/prometheus/README.md
+++ b/exporting/prometheus/README.md
@@ -1,9 +1,14 @@
+
import { OneLineInstallWget, OneLineInstallCurl } from '@site/src/components/OneLineInstall/'
# Using Netdata with Prometheus
@@ -17,7 +22,8 @@ are starting at a fresh ubuntu shell (whether you'd like to follow along in a VM
### Installing Netdata
-There are number of ways to install Netdata according to [Installation](/packaging/installer/README.md). The suggested way
+There are number of ways to install Netdata according to
+[Installation](https://github.com/netdata/netdata/blob/master/packaging/installer/README.md). The suggested way
of installing the latest Netdata and keep it upgrade automatically.
@@ -77,24 +83,24 @@ sudo tar -xvf /tmp/prometheus-*linux-amd64.tar.gz -C /opt/prometheus --strip=1
We will use the following `prometheus.yml` file. Save it at `/opt/prometheus/prometheus.yml`.
-Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
+Make sure to replace `your.netdata.ip` with the IP or hostname of the host running Netdata.
```yaml
# my global config
global:
- scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
+ scrape_interval: 5s # Set the scrape interval to every 5 seconds. Default is every 1 minute.
evaluation_interval: 5s # Evaluate rules every 5 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
- monitor: 'codelab-monitor'
+ monitor: 'codelab-monitor'
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- # - "first.rules"
- # - "second.rules"
+# - "first.rules"
+# - "second.rules"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
@@ -106,7 +112,7 @@ scrape_configs:
# scheme defaults to 'http'.
static_configs:
- - targets: ['0.0.0.0:9090']
+ - targets: [ '0.0.0.0:9090' ]
- job_name: 'netdata-scrape'
@@ -114,7 +120,7 @@ scrape_configs:
params:
# format: prometheus | prometheus_all_hosts
# You can use `prometheus_all_hosts` if you want Prometheus to set the `instance` to your hostname instead of IP
- format: [prometheus]
+ format: [ prometheus ]
#
# sources: as-collected | raw | average | sum | volume
# default is: average
@@ -126,7 +132,7 @@ scrape_configs:
honor_labels: true
static_configs:
- - targets: ['{your.netdata.ip}:19999']
+ - targets: [ '{your.netdata.ip}:19999' ]
```
#### Install nodes.yml
@@ -202,7 +208,7 @@ sudo systemctl start prometheus
sudo systemctl enable prometheus
```
-Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
+Prometheus should now start and listen on port 9090. Attempt to head there with your browser.
If everything is working correctly when you fetch `http://your.prometheus.ip:9090` you will see a 'Status' tab. Click
this and click on 'targets' We should see the Netdata host as a scraped target.
@@ -219,16 +225,16 @@ Before explaining the changes, we have to understand the key differences between
Each chart in Netdata has several properties (common to all its metrics):
-- `chart_id` - uniquely identifies a chart.
+- `chart_id` - uniquely identifies a chart.
-- `chart_name` - a more human friendly name for `chart_id`, also unique.
+- `chart_name` - a more human friendly name for `chart_id`, also unique.
-- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
- have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
+- `context` - this is the template of the chart. All disk I/O charts have the same context, all mysql requests charts
+ have the same context, etc. This is used for alarm templates to match all the charts they should be attached to.
-- `family` groups a set of charts together. It is used as the submenu of the dashboard.
+- `family` groups a set of charts together. It is used as the submenu of the dashboard.
-- `units` is the units for all the metrics attached to the chart.
+- `units` is the units for all the metrics attached to the chart.
#### dimensions
@@ -240,44 +246,44 @@ they are both in the same chart).
Netdata can send metrics to Prometheus from 3 data sources:
-- `as collected` or `raw` - this data source sends the metrics to Prometheus as they are collected. No conversion is
- done by Netdata. The latest value for each metric is just given to Prometheus. This is the most preferred method by
- Prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
- to get meaningful values out of them.
+- `as collected` or `raw` - this data source sends the metrics to Prometheus as they are collected. No conversion is
+ done by Netdata. The latest value for each metric is just given to Prometheus. This is the most preferred method by
+ Prometheus, but it is also the harder to work with. To work with this data source, you will need to understand how
+ to get meaningful values out of them.
+
+ The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
- The format of the metrics is: `CONTEXT{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+ If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
- If the metric is a counter (`incremental` in Netdata lingo), `_total` is appended the context.
+ Unlike Prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
+ (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
+ format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
- Unlike Prometheus, Netdata allows each dimension of a chart to have a different algorithm and conversion constants
- (`multiplier` and `divisor`). In this case, that the dimensions of a charts are heterogeneous, Netdata will use this
- format: `CONTEXT_DIMENSION{chart="CHART",family="FAMILY"}`
+- `average` - this data source uses the Netdata database to send the metrics to Prometheus as they are presented on
+ the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
+ dashboard charts. This is the easiest to work with.
-- `average` - this data source uses the Netdata database to send the metrics to Prometheus as they are presented on
- the Netdata dashboard. So, all the metrics are sent as gauges, at the units they are presented in the Netdata
- dashboard charts. This is the easiest to work with.
+ The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
- The format of the metrics is: `CONTEXT_UNITS_average{chart="CHART",family="FAMILY",dimension="DIMENSION"}`.
+ When this source is used, Netdata keeps track of the last access time for each Prometheus server fetching the
+ metrics. This last access time is used at the subsequent queries of the same Prometheus server to identify the
+ time-frame the `average` will be calculated.
- When this source is used, Netdata keeps track of the last access time for each Prometheus server fetching the
- metrics. This last access time is used at the subsequent queries of the same Prometheus server to identify the
- time-frame the `average` will be calculated.
+ So, no matter how frequently Prometheus scrapes Netdata, it will get all the database data.
+ To identify each Prometheus server, Netdata uses by default the IP of the client fetching the metrics.
- So, no matter how frequently Prometheus scrapes Netdata, it will get all the database data.
- To identify each Prometheus server, Netdata uses by default the IP of the client fetching the metrics.
-
- If there are multiple Prometheus servers fetching data from the same Netdata, using the same IP, each Prometheus
- server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the Prometheus server.
+ If there are multiple Prometheus servers fetching data from the same Netdata, using the same IP, each Prometheus
+ server can append `server=NAME` to the URL. Netdata will use this `NAME` to uniquely identify the Prometheus server.
-- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
+- `sum` or `volume`, is like `average` but instead of averaging the values, it sums them.
- The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
- other operations are the same with `average`.
+ The format of the metrics is: `CONTEXT_UNITS_sum{chart="CHART",family="FAMILY",dimension="DIMENSION"}`. All the
+ other operations are the same with `average`.
- To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
- e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
+ To change the data source to `sum` or `as-collected` you need to provide the `source` parameter in the request URL.
+ e.g.: `http://your.netdata.ip:19999/api/v1/allmetrics?format=prometheus&help=yes&source=as-collected`
- Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
+ Keep in mind that early versions of Netdata were sending the metrics as: `CHART_DIMENSION{}`.
### Querying Metrics
@@ -364,7 +370,7 @@ functionality of Netdata this ignores any upstream hosts - so you should conside
```yaml
metrics_path: '/api/v1/allmetrics'
params:
- format: [prometheus_all_hosts]
+ format: [ prometheus_all_hosts ]
honor_labels: true
```
@@ -389,7 +395,9 @@ To save bandwidth, and because Prometheus does not use them anyway, `# TYPE` and
wanted they can be re-enabled via `types=yes` and `help=yes`, e.g.
`/api/v1/allmetrics?format=prometheus&types=yes&help=yes`
-Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against the Prometheus documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
+Note that if enabled, the `# TYPE` and `# HELP` lines are repeated for every occurrence of a metric, which goes against
+the Prometheus
+documentation's [specification for these lines](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#comments-help-text-and-type-information).
### Names and IDs
@@ -408,8 +416,8 @@ The default is controlled in `exporting.conf`:
You can overwrite it from Prometheus, by appending to the URL:
-- `&names=no` to get IDs (the old behaviour)
-- `&names=yes` to get names
+- `&names=no` to get IDs (the old behaviour)
+- `&names=yes` to get names
### Filtering metrics sent to Prometheus
@@ -420,7 +428,8 @@ Netdata can filter the metrics it sends to Prometheus with this setting:
send charts matching = *
```
-This settings accepts a space separated list of [simple patterns](/libnetdata/simple_pattern/README.md) to match the
+This settings accepts a space separated list
+of [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) to match the
**charts** to be sent to Prometheus. Each pattern can use `*` as wildcard, any number of times (e.g `*a*b*c*` is valid).
Patterns starting with `!` give a negative match (e.g `!*.bad users.* groups.*` will send all the users and groups
except `bad` user and `bad` group). The order is important: the first match (positive or negative) left to right, is
diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c
index 294d8ec2c..dc675dd32 100644
--- a/exporting/prometheus/prometheus.c
+++ b/exporting/prometheus/prometheus.c
@@ -317,7 +317,7 @@ void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
return;
if (!instance->labels_buffer)
- instance->labels_buffer = buffer_create(1024);
+ instance->labels_buffer = buffer_create(1024, &netdata_buffers_statistics.buffers_exporters);
struct format_prometheus_label_callback tmp = {
.instance = instance,
diff --git a/exporting/prometheus/remote_write/README.md b/exporting/prometheus/remote_write/README.md
index 54c5d6588..9bda02d49 100644
--- a/exporting/prometheus/remote_write/README.md
+++ b/exporting/prometheus/remote_write/README.md
@@ -1,8 +1,11 @@
# Prometheus remote write exporting connector
@@ -15,7 +18,7 @@ than 20 external storage providers for long-term archiving and further analysis.
To use the Prometheus remote write API with [storage
providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage), install
[protobuf](https://developers.google.com/protocol-buffers/) and [snappy](https://github.com/google/snappy) libraries.
-Next, [reinstall Netdata](/packaging/installer/REINSTALL.md), which detects that the required libraries and utilities
+Next, [reinstall Netdata](https://github.com/netdata/netdata/blob/master/packaging/installer/REINSTALL.md), which detects that the required libraries and utilities
are now available.
## Configuration
diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c
index 2e2fa3c12..1857ca333 100644
--- a/exporting/prometheus/remote_write/remote_write.c
+++ b/exporting/prometheus/remote_write/remote_write.c
@@ -104,7 +104,7 @@ int init_prometheus_remote_write_instance(struct instance *instance)
instance->prepare_header = prometheus_remote_write_prepare_header;
instance->check_response = process_prometheus_remote_write_response;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (uv_mutex_init(&instance->mutex))
return 1;
diff --git a/exporting/pubsub/README.md b/exporting/pubsub/README.md
index 2f9ac83d4..10252f167 100644
--- a/exporting/pubsub/README.md
+++ b/exporting/pubsub/README.md
@@ -1,8 +1,12 @@
# Export metrics to Google Cloud Pub/Sub Service
diff --git a/exporting/pubsub/pubsub.c b/exporting/pubsub/pubsub.c
index b218338f1..d65fc2c40 100644
--- a/exporting/pubsub/pubsub.c
+++ b/exporting/pubsub/pubsub.c
@@ -30,7 +30,7 @@ int init_pubsub_instance(struct instance *instance)
instance->prepare_header = NULL;
instance->check_response = NULL;
- instance->buffer = (void *)buffer_create(0);
+ instance->buffer = (void *)buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
if (!instance->buffer) {
error("EXPORTING: cannot create buffer for Pub/Sub exporting connector instance %s", instance->config.name);
return 1;
diff --git a/exporting/send_data.c b/exporting/send_data.c
index 1d20f3b74..045aab6ed 100644
--- a/exporting/send_data.c
+++ b/exporting/send_data.c
@@ -64,7 +64,7 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
{
static BUFFER *response = NULL;
if (!response)
- response = buffer_create(4096);
+ response = buffer_create(4096, &netdata_buffers_statistics.buffers_exporters);
struct stats *stats = &instance->stats;
#ifdef ENABLE_HTTPS
diff --git a/exporting/send_internal_metrics.c b/exporting/send_internal_metrics.c
index 515cda3b2..e4347964f 100644
--- a/exporting/send_internal_metrics.c
+++ b/exporting/send_internal_metrics.c
@@ -65,7 +65,7 @@ void send_internal_metrics(struct instance *instance)
if (!stats->initialized) {
char id[RRD_ID_LENGTH_MAX + 1];
- BUFFER *family = buffer_create(0);
+ BUFFER *family = buffer_create(0, &netdata_buffers_statistics.buffers_exporters);
buffer_sprintf(family, "exporting_%s", instance->config.name);
diff --git a/exporting/tests/test_exporting_engine.c b/exporting/tests/test_exporting_engine.c
index 6ea6b1e5c..418be0b01 100644
--- a/exporting/tests/test_exporting_engine.c
+++ b/exporting/tests/test_exporting_engine.c
@@ -612,7 +612,7 @@ static void test_exporting_discard_response(void **state)
{
struct engine *engine = *state;
- BUFFER *response = buffer_create(0);
+ BUFFER *response = buffer_create(0, NULL);
buffer_sprintf(response, "Test response");
assert_int_equal(exporting_discard_response(response, engine->instance_root), 0);
@@ -651,8 +651,8 @@ static void test_simple_connector_send_buffer(void **state)
int sock = 1;
int failures = 3;
size_t buffered_metrics = 1;
- BUFFER *header = buffer_create(0);
- BUFFER *buffer = buffer_create(0);
+ BUFFER *header = buffer_create(0, NULL);
+ BUFFER *buffer = buffer_create(0, NULL);
buffer_strcat(header, "test header\n");
buffer_strcat(buffer, "test buffer\n");
@@ -695,10 +695,10 @@ static void test_simple_connector_worker(void **state)
instance->connector_specific_data = simple_connector_data;
simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
simple_connector_data->first_buffer = simple_connector_data->last_buffer;
- simple_connector_data->header = buffer_create(0);
- simple_connector_data->buffer = buffer_create(0);
- simple_connector_data->last_buffer->header = buffer_create(0);
- simple_connector_data->last_buffer->buffer = buffer_create(0);
+ simple_connector_data->header = buffer_create(0, NULL);
+ simple_connector_data->buffer = buffer_create(0, NULL);
+ simple_connector_data->last_buffer->header = buffer_create(0, NULL);
+ simple_connector_data->last_buffer->buffer = buffer_create(0, NULL);
strcpy(simple_connector_data->connected_to, "localhost");
buffer_sprintf(simple_connector_data->last_buffer->header, "test header");
@@ -822,7 +822,7 @@ static void test_flush_host_labels(void **state)
struct engine *engine = *state;
struct instance *instance = engine->instance_root;
- instance->labels_buffer = buffer_create(12);
+ instance->labels_buffer = buffer_create(12, NULL);
buffer_strcat(instance->labels_buffer, "check string");
assert_int_equal(buffer_strlen(instance->labels_buffer), 12);
@@ -1133,7 +1133,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(void **state)
{
(void)state;
- BUFFER *buffer = buffer_create(0);
+ BUFFER *buffer = buffer_create(0, NULL);
RRDSET *st;
rrdset_foreach_read(st, localhost);
@@ -1241,8 +1241,8 @@ static void test_prometheus_remote_write_prepare_header(void **state)
struct simple_connector_data *simple_connector_data = callocz(1, sizeof(struct simple_connector_data));
instance->connector_specific_data = simple_connector_data;
simple_connector_data->last_buffer = callocz(1, sizeof(struct simple_connector_buffer));
- simple_connector_data->last_buffer->header = buffer_create(0);
- simple_connector_data->last_buffer->buffer = buffer_create(0);
+ simple_connector_data->last_buffer->header = buffer_create(0, NULL);
+ simple_connector_data->last_buffer->buffer = buffer_create(0, NULL);
strcpy(simple_connector_data->connected_to, "localhost");
buffer_sprintf(simple_connector_data->last_buffer->buffer, "test buffer");
@@ -1269,7 +1269,7 @@ static void test_prometheus_remote_write_prepare_header(void **state)
static void test_process_prometheus_remote_write_response(void **state)
{
(void)state;
- BUFFER *buffer = buffer_create(0);
+ BUFFER *buffer = buffer_create(0, NULL);
buffer_sprintf(buffer, "HTTP/1.1 200 OK\r\n");
assert_int_equal(process_prometheus_remote_write_response(buffer, NULL), 0);
@@ -1834,7 +1834,7 @@ static void test_format_batch_mongodb(void **state)
connector_specific_data->first_buffer->next = current_buffer;
connector_specific_data->last_buffer = current_buffer;
- BUFFER *buffer = buffer_create(0);
+ BUFFER *buffer = buffer_create(0, NULL);
buffer_sprintf(buffer, "{ \"metric\": \"test_metric\" }\n");
instance->buffer = buffer;
stats->buffered_metrics = 1;
diff --git a/exporting/tests/test_exporting_engine.h b/exporting/tests/test_exporting_engine.h
index a9180a518..24dac8630 100644
--- a/exporting/tests/test_exporting_engine.h
+++ b/exporting/tests/test_exporting_engine.h
@@ -55,9 +55,6 @@ int __wrap_connect_to_one_of(
size_t *reconnects_counter,
char *connected_to,
size_t connected_to_size);
-void __rrdhost_check_rdlock(RRDHOST *host, const char *file, const char *function, const unsigned long line);
-void __rrdset_check_rdlock(RRDSET *st, const char *file, const char *function, const unsigned long line);
-void __rrd_check_rdlock(const char *file, const char *function, const unsigned long line);
time_t __mock_rrddim_query_oldest_time(STORAGE_METRIC_HANDLE *db_metric_handle);
time_t __mock_rrddim_query_latest_time(STORAGE_METRIC_HANDLE *db_metric_handle);
void __mock_rrddim_query_init(STORAGE_METRIC_HANDLE *db_metric_handle, struct rrddim_query_handle *handle, time_t start_time, time_t end_time);
diff --git a/health/Makefile.am b/health/Makefile.am
index 7c8d7f9d2..f0cbb7715 100644
--- a/health/Makefile.am
+++ b/health/Makefile.am
@@ -36,13 +36,14 @@ dist_healthconfig_DATA = \
health.d/cgroups.conf \
health.d/cpu.conf \
health.d/cockroachdb.conf \
+ health.d/consul.conf \
health.d/disks.conf \
health.d/dnsmasq_dhcp.conf \
health.d/dns_query.conf \
health.d/dockerd.conf \
+ health.d/elasticsearch.conf \
health.d/entropy.conf \
health.d/exporting.conf \
- health.d/fping.conf \
health.d/geth.conf \
health.d/ioping.conf \
health.d/gearman.conf \
diff --git a/health/QUICKSTART.md b/health/QUICKSTART.md
deleted file mode 100644
index bc2da2df1..000000000
--- a/health/QUICKSTART.md
+++ /dev/null
@@ -1,143 +0,0 @@
-
-
-# Health quickstart
-
-In this quickstart guide, you'll learn the basics of editing health configuration files. With this knowledge, you
-will be able to customize how and when Netdata triggers alarms based on the health and performance of your system or
-infrastructure.
-
-To learn about more advanced health configurations, visit the [health reference guide](/health/REFERENCE.md).
-
-## Edit health configuration files
-
-You should [use `edit-config`](/docs/configure/nodes.md) to edit Netdata's health configuration files. `edit-config`
-will open your system's default terminal editor for you to make your changes. Once you've saved and closed the editor,
-`edit-config` will copy your edited file into `/etc/netdata/health.d/`, which will override the stock file in
-`/usr/lib/netdata/conf.d/health.d/` and ensure your customizations are persistent between updates.
-
-For example, to edit the `cpu.conf` health configuration file, you would run:
-
-```bash
-cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /etc/netdata/
-./edit-config health.d/cpu.conf
-```
-
-Each health configuration file contains one or more health entities, which always begin with an `alarm:` or `template:`
-line. You can edit these entities based on your needs. To make any changes live, be sure to [reload your health
-configuration](#reload-health-configuration).
-
-## Reference Netdata's stock health configuration files
-
-While you should always [use `edit-config`](#edit-health-configuration-files), you might also want to view the stock
-health configuration files Netdata ships with. Stock files can be useful as reference material, or to determine which
-file you should edit with `edit-config`.
-
-By default, Netdata will put health configuration files in `/usr/lib/netdata/conf.d/health.d`. However, you can
-double-check the location of these files by navigating to `http://NODE:19999/netdata.conf`, replacing `NODE` with the IP
-address or hostname for your Agent dashboard, looking for the `stock health configuration directory` option. The value
-here will show the correct path for your installation.
-
-```conf
-[directories]
- ...
- # stock health config = /usr/lib/netdata/conf.d/health.d
-```
-
-Navigate to the health configuration directory to see all the available files and open them for reading.
-
-```bash
-cd /usr/lib/netdata/conf.d/health.d/
-ls
-adaptec_raid.conf entropy.conf memory.conf squid.conf
-am2320.conf fping.conf mongodb.conf
-apache.conf mysql.conf swap.conf
-...
-```
-
-> ⚠️ If you edit configuration files in your stock health configuration directory, Netdata will overwrite them during
-> any updates. Please use `edit-config` as described in the [section above](#edit-health-configuration-files).
-
-## Write a new health entity
-
-While tuning existing alarms may work in some cases, you may need to write entirely new health entities based on how
-your systems and applications work.
-
-To write a new health entity, let's create a new file inside of the `health.d/` directory. We'll name our file
-`example.conf` for now.
-
-```bash
-./edit-config health.d/example.conf
-```
-
-As an example, let's build a health entity that triggers an alarm your system's RAM usage goes above 80%. Copy and paste
-the following into the editor:
-
-```yaml
- alarm: ram_usage
- on: system.ram
-lookup: average -1m percentage of used
- units: %
- every: 1m
- warn: $this > 80
- crit: $this > 90
- info: The percentage of RAM used by the system.
-```
-
-Let's look into each of the lines to see how they create a working health entity.
-
-- `alarm`: The name for your new entity. The name needs to follow these requirements:
- - Any alphabet letter or number.
- - The symbols `.` and `_`.
- - Cannot be `chart name`, `dimension name`, `family name`, or `chart variable names`.
-- `on`: Which chart the entity listens to.
-- `lookup`: Which metrics the alarm monitors, the duration of time to monitor, and how to process the metrics into a
- usable format.
- - `average`: Calculate the average of all the metrics collected.
- - `-1m`: Use metrics from 1 minute ago until now to calculate that average.
- - `percentage`: Clarify that we're calculating a percentage of RAM usage.
- - `of used`: Specify which dimension (`used`) on the `system.ram` chart you want to monitor with this entity.
-- `units`: Use percentages rather than absolute units.
-- `every`: How often to perform the `lookup` calculation to decide whether or not to trigger this alarm.
-- `warn`/`crit`: The value at which Netdata should trigger a warning or critical alarm.
-- `info`: A description of the alarm, which will appear in the dashboard and notifications.
-
-Let's put all these lines into a human-readable format.
-
-This health entity, named **ram_usage**, watches at the **system.ram** chart. It looks up the last **1 minute** of
-metrics from the **used** dimension and calculates the **average** of all those metrics in a **percentage** format,
-using a **% unit**. The entity performs this lookup **every minute**. If the average RAM usage percentage over the last
-1 minute is **more than 80%**, the entity triggers a warning alarm. If the usage is **more than 90%**, the entity
-triggers a critical alarm.
-
-Now that you've written a new health entity, you need to reload it to see it live on the dashboard.
-
-## Reload health configuration
-
-To make any changes to your health configuration live, you must reload Netdata's health monitoring system. To do that
-without restarting all of Netdata, run the following:
-
-```bash
-netdatacli reload-health
-```
-
-If you receive an error like `command not found`, this means that `netdatacli` is not installed in your `$PATH`. In that
- case, you can reload only the health component by sending a `SIGUSR2` to Netdata:
-
-```bash
-killall -USR2 netdata
-```
-## What's next?
-
-To learn about all of Netdata's health configuration options, view the [reference guide](/health/REFERENCE.md) and
-[daemon configuration](/daemon/config/README.md#health-section-options) for additional options available in the
-`[health]` section of `netdata.conf`.
-
-Or, get guided insights into specific health configurations with our [health guides](/health/README.md#guides).
-
-Finally, move on to Netdata's [notification system](/health/notifications/README.md) to learn more about how Netdata can
-let you know when the health of your systems or apps goes awry.
-
-
diff --git a/health/README.md b/health/README.md
index 2b1caf548..460f65680 100644
--- a/health/README.md
+++ b/health/README.md
@@ -1,6 +1,10 @@
# Health monitoring
@@ -10,15 +14,13 @@ worked closely with our community of DevOps engineers, SREs, and developers to d
alarms that work without any configuration.
The Agent's health monitoring system is also dynamic and fully customizable. You can write entirely new alarms, tune the
-community-configured alarms for every app/service [the Agent collects metrics from](/collectors/COLLECTORS.md), or
+community-configured alarms for every app/service [the Agent collects metrics from](https://github.com/netdata/netdata/blob/master/collectors/COLLECTORS.md), or
silence anything you're not interested in. You can even power complex lookups by running statistical algorithms against
your metrics.
Ready to take the next steps with health monitoring?
-[Quickstart](/health/QUICKSTART.md)
-
-[Configuration reference](/health/REFERENCE.md)
+[Configuration reference](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md)
## Guides
@@ -26,13 +28,13 @@ Every infrastructure is different, so we're not interested in mandating how you
monitoring features. Instead, these guides should give you the details you need to tweak alarms to your heart's
content.
-[Stopping notifications for individual alarms](/docs/guides/monitor/stop-notifications-alarms.md)
+[Stopping notifications for individual alarms](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/stop-notifications-alarms.md)
-[Use dimension templates to create dynamic alarms](/docs/guides/monitor/dimension-templates.md)
+[Use dimension templates to create dynamic alarms](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/dimension-templates.md)
## Related features
-**[Notifications](/health/notifications/README.md)**: Get notified about ongoing alarms from your Agents via your
+**[Notifications](https://github.com/netdata/netdata/blob/master/health/notifications/README.md)**: Get notified about ongoing alarms from your Agents via your
favorite platform(s), such as Slack, Discord, PagerDuty, email, and much more.
diff --git a/health/REFERENCE.md b/health/REFERENCE.md
index 90da4102a..27031cd19 100644
--- a/health/REFERENCE.md
+++ b/health/REFERENCE.md
@@ -1,6 +1,10 @@
# Health configuration reference
@@ -11,7 +15,7 @@ This guide contains information about editing health configuration files to twea
entities that are customized to the needs of your infrastructure.
To learn the basics of locating and editing health configuration files, see the [health
-quickstart](/health/QUICKSTART.md).
+quickstart](https://github.com/netdata/netdata/blob/master/health/QUICKSTART.md).
## Health configuration files
@@ -19,7 +23,7 @@ You can configure the Agent's health watchdog service by editing files in two lo
- The `[health]` section in `netdata.conf`. By editing the daemon's behavior, you can disable health monitoring
altogether, run health checks more or less often, and more. See [daemon
- configuration](/daemon/config/README.md#health-section-options) for a table of all the available settings, their
+ configuration](https://github.com/netdata/netdata/blob/master/daemon/config/README.md#health-section-options) for a table of all the available settings, their
default values, and what they control.
- The individual `.conf` files in `health.d/`. These health entity files are organized by the type of metric they are
performing calculations on or their associated collector. You should edit these files using the `edit-config`
@@ -52,7 +56,7 @@ Netdata parses the following lines. Beneath the table is an in-depth explanation
- The `every` line is **required** if not using `lookup`.
- Each entity **must** have at least one of the following lines: `lookup`, `calc`, `warn`, or `crit`.
- A few lines use space-separated lists to define how the entity behaves. You can use `*` as a wildcard or prefix with
- `!` for a negative match. Order is important, too! See our [simple patterns docs](/libnetdata/simple_pattern/README.md) for
+ `!` for a negative match. Order is important, too! See our [simple patterns docs](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) for
more examples.
- Lines terminated by a `\` are spliced together with the next line. The backslash is removed and the following line is
joined with the current one. No space is inserted, so you may split a line anywhere, even in the middle of a word.
@@ -236,7 +240,7 @@ hosts: server1 server2 database* !redis3 redis*
#### Alarm line `plugin`
The `plugin` line filters which plugin within the context this alarm should apply to. The value is a space-separated
-list of [simple patterns](/libnetdata/simple_pattern/README.md). For example,
+list of [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). For example,
you can create a filter for an alarm that applies specifically to `python.d.plugin`:
```yaml
@@ -250,7 +254,7 @@ comprehensive example using both.
#### Alarm line `module`
The `module` line filters which module within the context this alarm should apply to. The value is a space-separated
-list of [simple patterns](/libnetdata/simple_pattern/README.md). For
+list of [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). For
example, you can create an alarm that applies only on the `isc_dhcpd` module started by `python.d.plugin`:
```yaml
@@ -262,7 +266,7 @@ module: isc_dhcpd
The `charts` line filters which chart this alarm should apply to. It is only available on entities using the
[`template`](#alarm-line-alarm-or-template) line.
-The value is a space-separated list of [simple patterns](/libnetdata/simple_pattern/README.md). For
+The value is a space-separated list of [simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). For
example, a template that applies to `disk.svctm` (Average Service Time) context, but excludes the disk `sdb` from alarms:
```yaml
@@ -276,7 +280,7 @@ template: disk_svctm_alarm
The `families` line, used only alongside templates, filters which families within the context this alarm should apply
to. The value is a space-separated list.
-The value is a space-separate list of simple patterns. See our [simple patterns docs](/libnetdata/simple_pattern/README.md) for
+The value is a space-separate list of simple patterns. See our [simple patterns docs](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) for
some examples.
For example, you can create a template on the `disk.io` context, but filter it to only the `sda` and `sdb` families:
@@ -295,7 +299,7 @@ The format is:
lookup: METHOD AFTER [at BEFORE] [every DURATION] [OPTIONS] [of DIMENSIONS] [foreach DIMENSIONS]
```
-Everything is the same with [badges](/web/api/badges/README.md). In short:
+Everything is the same with [badges](https://github.com/netdata/netdata/blob/master/web/api/badges/README.md). In short:
- `METHOD` is one of `average`, `min`, `max`, `sum`, `incremental-sum`.
This is required.
@@ -312,7 +316,7 @@ Everything is the same with [badges](/web/api/badges/README.md). In short:
above too).
- `OPTIONS` is a space separated list of `percentage`, `absolute`, `min2max`, `unaligned`,
- `match-ids`, `match-names`. Check the [badges](/web/api/badges/README.md) documentation for more info.
+ `match-ids`, `match-names`. Check the [badges](https://github.com/netdata/netdata/blob/master/web/api/badges/README.md) documentation for more info.
- `of DIMENSIONS` is optional and has to be the last parameter. Dimensions have to be separated
by `,` or `|`. The space characters found in dimensions will be kept as-is (a few dimensions
@@ -499,7 +503,7 @@ good idea to tell Netdata to not clear the notification, by using the `no-clear-
#### Alarm line `host labels`
-Defines the list of labels present on a host. See our [host labels guide](/docs/guides/using-host-labels.md) for
+Defines the list of labels present on a host. See our [host labels guide](https://github.com/netdata/netdata/blob/master/docs/guides/using-host-labels.md) for
an explanation of host labels and how to implement them.
For example, let's suppose that `netdata.conf` is configured with the following labels:
@@ -532,7 +536,7 @@ that will be applied to all hosts installed in the last decade with the followin
host labels: installed = 201*
```
-See our [simple patterns docs](/libnetdata/simple_pattern/README.md) for more examples.
+See our [simple patterns docs](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) for more examples.
#### Alarm line `info`
@@ -548,13 +552,13 @@ alert information. Current variables supported are:
| variable | description |
| ---------| ----------- |
-| $family | Will be replaced by the family instance for the alert (e.g. eth0) |
-| $label: | Followed by a chart label name, this will replace the variable with the chart label's value |
+| ${family} | Will be replaced by the family instance for the alert (e.g. eth0) |
+| ${label:LABEL_NAME} | The variable will be replaced with the value of the label |
For example, an info field like the following:
```yaml
-info: average inbound utilization for the network interface $family over the last minute
+info: average inbound utilization for the network interface ${family} over the last minute
```
Will be rendered on the alert acting on interface `eth0` as:
@@ -567,7 +571,7 @@ An alert acting on a chart that has a chart label named e.g. `target`, with a va
can be enriched as follows:
```yaml
-info: average ratio of HTTP responses with unexpected status over the last 5 minutes for the site $label:target
+info: average ratio of HTTP responses with unexpected status over the last 5 minutes for the site ${label:target}
```
Will become:
@@ -647,15 +651,15 @@ You can find all the variables that can be used for a given chart, using
Agent dashboard. For example, [variables for the `system.cpu` chart of the
registry](https://registry.my-netdata.io/api/v1/alarm_variables?chart=system.cpu).
-> If you don't know how to find the CHART_NAME, you can read about it [here](/web/README.md#charts).
+> If you don't know how to find the CHART_NAME, you can read about it [here](https://github.com/netdata/netdata/blob/master/web/README.md#charts).
Netdata supports 3 internal indexes for variables that will be used in health monitoring.
The variables below can be used in both chart alarms and context templates.
Although the `alarm_variables` link shows you variables for a particular chart, the same variables can also be used in
-templates for charts belonging to a given [context](/web/README.md#contexts). The reason is that all charts of a given
-context are essentially identical, with the only difference being the [family](/web/README.md#families) that
+templates for charts belonging to a given [context](https://github.com/netdata/netdata/blob/master/web/README.md#contexts). The reason is that all charts of a given
+context are essentially identical, with the only difference being the [family](https://github.com/netdata/netdata/blob/master/web/README.md#families) that
identifies a particular hardware or software instance. Charts and templates do not apply to specific families anyway,
unless if you explicitly limit an alarm with the [alarm line `families`](#alarm-line-families).
@@ -995,7 +999,7 @@ The `lookup` line will use the `anomaly_rate` dimension of the `anomaly_detectio
## Troubleshooting
-You can compile Netdata with [debugging](/daemon/README.md#debugging) and then set in `netdata.conf`:
+You can compile Netdata with [debugging](https://github.com/netdata/netdata/blob/master/daemon/README.md#debugging) and then set in `netdata.conf`:
```yaml
[global]
@@ -1018,6 +1022,6 @@ expression.
It's currently not possible to schedule notifications from within the alarm template. For those scenarios where you need
to temporary disable notifications (for instance when running backups triggers a disk alert) you can disable or silence
notifications are runtime. The health checks can be controlled at runtime via the [health management
-api](/web/api/health/README.md).
+api](https://github.com/netdata/netdata/blob/master/web/api/health/README.md).
diff --git a/health/health.c b/health/health.c
index 3784e0f31..b34f54ab5 100644
--- a/health/health.c
+++ b/health/health.c
@@ -159,9 +159,10 @@ static bool prepare_command(BUFFER *wb,
unsigned int default_health_enabled = 1;
char *silencers_filename;
+SIMPLE_PATTERN *conf_enabled_alarms = NULL;
// the queue of executed alarm notifications that haven't been waited for yet
-static __thread struct {
+static struct {
ALARM_ENTRY *head; // oldest
ALARM_ENTRY *tail; // latest
} alarm_notifications_in_progress = {NULL, NULL};
@@ -301,7 +302,7 @@ void health_init(void) {
* @param host the structure of the host that the function will reload the configuration.
*/
static void health_reload_host(RRDHOST *host) {
- if(unlikely(!host->health_enabled) && !rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH))
+ if(unlikely(!host->health.health_enabled) && !rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH))
return;
log_health("[%s]: Reloading health.", rrdhost_hostname(host));
@@ -345,7 +346,6 @@ static void health_reload_host(RRDHOST *host) {
rrdcalctemplate_link_matching_templates_to_rrdset(st);
}
rrdset_foreach_done(st);
- host->aclk_alert_reloaded = 1;
}
/**
@@ -363,6 +363,12 @@ void health_reload(void) {
health_reload_host(host);
rrd_unlock();
+
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting) {
+ aclk_alert_reloaded = 1;
+ }
+#endif
}
// ----------------------------------------------------------------------------
@@ -444,8 +450,8 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
log_health("[%s]: Sending notification for alarm '%s.%s' status %s.", rrdhost_hostname(host), ae_chart_name(ae), ae_name(ae), rrdcalc_status2string(ae->new_status));
- const char *exec = (ae->exec) ? ae_exec(ae) : string2str(host->health_default_exec);
- const char *recipient = (ae->recipient) ? ae_recipient(ae) : string2str(host->health_default_recipient);
+ const char *exec = (ae->exec) ? ae_exec(ae) : string2str(host->health.health_default_exec);
+ const char *recipient = (ae->recipient) ? ae_recipient(ae) : string2str(host->health.health_default_recipient);
int n_warn=0, n_crit=0;
RRDCALC *rc;
@@ -453,8 +459,8 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
BUFFER *warn_alarms, *crit_alarms;
active_alerts_t *active_alerts = callocz(ACTIVE_ALARMS_LIST_EXAMINE, sizeof(active_alerts_t));
- warn_alarms = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
- crit_alarms = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE);
+ warn_alarms = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE, &netdata_buffers_statistics.buffers_health);
+ crit_alarms = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE, &netdata_buffers_statistics.buffers_health);
foreach_rrdcalc_in_rrdhost_read(host, rc) {
if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
@@ -511,7 +517,7 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
char *edit_command = ae->source ? health_edit_command_from_source(ae_source(ae)) : strdupz("UNKNOWN=0=UNKNOWN");
- BUFFER *wb = buffer_create(8192);
+ BUFFER *wb = buffer_create(8192, &netdata_buffers_statistics.buffers_health);
bool ok = prepare_command(wb,
exec,
recipient,
@@ -692,8 +698,8 @@ static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run)
}
int update_every = rc->rrdset->update_every;
- time_t first = rrdset_first_entry_t(rc->rrdset);
- time_t last = rrdset_last_entry_t(rc->rrdset);
+ time_t first = rrdset_first_entry_s(rc->rrdset);
+ time_t last = rrdset_last_entry_s(rc->rrdset);
if(unlikely(now + update_every < first /* || now - update_every > last */)) {
debug(D_HEALTH
@@ -719,7 +725,7 @@ static inline int rrdcalc_isrunnable(RRDCALC *rc, time_t now, time_t *next_run)
}
static inline int check_if_resumed_from_suspension(void) {
- static __thread usec_t last_realtime = 0, last_monotonic = 0;
+ static usec_t last_realtime = 0, last_monotonic = 0;
usec_t realtime = now_realtime_usec(), monotonic = now_monotonic_usec();
int ret = 0;
@@ -735,25 +741,29 @@ static inline int check_if_resumed_from_suspension(void) {
return ret;
}
-static void health_thread_cleanup(void *ptr) {
+static void health_main_cleanup(void *ptr) {
worker_unregister();
- struct health_state *h = ptr;
- h->host->health_spawn = 0;
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
+ info("cleaning up...");
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
- netdata_thread_cancel(netdata_thread_self());
- log_health("[%s]: Health thread ended.", rrdhost_hostname(h->host));
- debug(D_HEALTH, "HEALTH %s: Health thread ended.", rrdhost_hostname(h->host));
+ log_health("Health thread ended.");
}
static void initialize_health(RRDHOST *host, int is_localhost) {
- if(!host->health_enabled || rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH)) return;
+ if(!host->health.health_enabled ||
+ rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH) ||
+ !service_running(SERVICE_HEALTH))
+ return;
+
rrdhost_flag_set(host, RRDHOST_FLAG_INITIALIZED_HEALTH);
log_health("[%s]: Initializing health.", rrdhost_hostname(host));
- host->health_default_warn_repeat_every = config_get_duration(CONFIG_SECTION_HEALTH, "default repeat warning", "never");
- host->health_default_crit_repeat_every = config_get_duration(CONFIG_SECTION_HEALTH, "default repeat critical", "never");
+ host->health.health_default_warn_repeat_every = config_get_duration(CONFIG_SECTION_HEALTH, "default repeat warning", "never");
+ host->health.health_default_crit_repeat_every = config_get_duration(CONFIG_SECTION_HEALTH, "default repeat critical", "never");
host->health_log.next_log_id = 1;
host->health_log.next_alarm_id = 1;
@@ -769,6 +779,8 @@ static void initialize_health(RRDHOST *host, int is_localhost) {
else
host->health_log.max = (unsigned int)n;
+ conf_enabled_alarms = simple_pattern_create(config_get(CONFIG_SECTION_HEALTH, "enabled alarms", "*"), NULL, SIMPLE_PATTERN_EXACT);
+
netdata_rwlock_init(&host->health_log.alarm_log_rwlock);
char filename[FILENAME_MAX + 1];
@@ -785,30 +797,15 @@ static void initialize_health(RRDHOST *host, int is_localhost) {
if(r != 0 && errno != EEXIST)
error("Host '%s': cannot create directory '%s'", rrdhost_hostname(host), filename);
}
- snprintfz(filename, FILENAME_MAX, "%s/health/health-log.db", host->varlib_dir);
- host->health_log_filename = strdupz(filename);
snprintfz(filename, FILENAME_MAX, "%s/alarm-notify.sh", netdata_configured_primary_plugins_dir);
- host->health_default_exec = string_strdupz(config_get(CONFIG_SECTION_HEALTH, "script to execute on alarm", filename));
- host->health_default_recipient = string_strdupz("root");
-
- if (!file_is_migrated(host->health_log_filename)) {
- int rc = sql_create_health_log_table(host);
- if (unlikely(rc)) {
- log_health("[%s]: Failed to create health log table in the database", rrdhost_hostname(host));
- health_alarm_log_load(host);
- health_alarm_log_open(host);
- }
- else {
- health_alarm_log_load(host);
- add_migrated_file(host->health_log_filename, 0);
- }
- } else {
- // TODO: This needs to go to the metadata thread
- // Health should wait before accessing the table (needs to be created by the metadata thread)
- sql_create_health_log_table(host);
- sql_health_alarm_log_load(host);
- }
+ host->health.health_default_exec = string_strdupz(config_get(CONFIG_SECTION_HEALTH, "script to execute on alarm", filename));
+ host->health.health_default_recipient = string_strdupz("root");
+
+ // TODO: This needs to go to the metadata thread
+ // Health should wait before accessing the table (needs to be created by the metadata thread)
+ sql_create_health_log_table(host);
+ sql_health_alarm_log_load(host);
// ------------------------------------------------------------------------
// load health configuration
@@ -828,16 +825,14 @@ static void initialize_health(RRDHOST *host, int is_localhost) {
//Discard alarms with labels that do not apply to host
rrdcalc_delete_alerts_not_matching_host_labels_from_this_host(host);
-
- health_silencers_init();
}
-static void health_sleep(time_t next_run, unsigned int loop __maybe_unused, RRDHOST *host) {
+static void health_sleep(time_t next_run, unsigned int loop __maybe_unused) {
time_t now = now_realtime_sec();
if(now < next_run) {
worker_is_idle();
debug(D_HEALTH, "Health monitoring iteration no %u done. Next iteration in %d secs", loop, (int) (next_run - now));
- while (now < next_run && host->health_enabled && !netdata_exit) {
+ while (now < next_run && service_running(SERVICE_HEALTH)) {
sleep_usec(USEC_PER_SEC);
now = now_realtime_sec();
}
@@ -995,555 +990,567 @@ void *health_main(void *ptr) {
worker_register_job_name(WORKER_HEALTH_JOB_DELAYED_INIT_RRDSET, "rrdset init");
worker_register_job_name(WORKER_HEALTH_JOB_DELAYED_INIT_RRDDIM, "rrddim init");
- struct health_state *h = ptr;
- netdata_thread_cleanup_push(health_thread_cleanup, ptr);
-
- RRDHOST *host = h->host;
- initialize_health(host, host == localhost);
+ netdata_thread_cleanup_push(health_main_cleanup, ptr);
int min_run_every = (int)config_get_number(CONFIG_SECTION_HEALTH, "run at least every seconds", 10);
if(min_run_every < 1) min_run_every = 1;
- int cleanup_sql_every_loop = 7200 / min_run_every;
-
- time_t now = now_realtime_sec();
time_t hibernation_delay = config_get_number(CONFIG_SECTION_HEALTH, "postpone alarms during hibernation for seconds", 60);
bool health_running_logged = false;
- rrdhost_rdlock(host); //CHECK
- rrdcalc_delete_alerts_not_matching_host_labels_from_this_host(host);
- rrdhost_unlock(host);
+ rrdcalc_delete_alerts_not_matching_host_labels_from_all_hosts();
unsigned int loop = 0;
#ifdef ENABLE_ACLK
unsigned int marked_aclk_reload_loop = 0;
#endif
- while(!netdata_exit && host->health_enabled) {
+ while(service_running(SERVICE_HEALTH)) {
loop++;
debug(D_HEALTH, "Health monitoring iteration no %u started", loop);
- now = now_realtime_sec();
+ time_t now = now_realtime_sec();
int runnable = 0, apply_hibernation_delay = 0;
time_t next_run = now + min_run_every;
RRDCALC *rc;
+ RRDHOST *host;
if (unlikely(check_if_resumed_from_suspension())) {
apply_hibernation_delay = 1;
log_health(
- "[%s]: Postponing alarm checks for %"PRId64" seconds, "
+ "Postponing alarm checks for %"PRId64" seconds, "
"because it seems that the system was just resumed from suspension.",
- rrdhost_hostname(host),
(int64_t)hibernation_delay);
}
if (unlikely(silencers->all_alarms && silencers->stype == STYPE_DISABLE_ALARMS)) {
- static __thread int logged=0;
+ static int logged=0;
if (!logged) {
- log_health("[%s]: Skipping health checks, because all alarms are disabled via a %s command.",
- rrdhost_hostname(host),
+ log_health("Skipping health checks, because all alarms are disabled via a %s command.",
HEALTH_CMDAPI_CMD_DISABLEALL);
logged = 1;
}
}
#ifdef ENABLE_ACLK
- if (host->aclk_alert_reloaded && !marked_aclk_reload_loop)
+ if (aclk_alert_reloaded && !marked_aclk_reload_loop)
marked_aclk_reload_loop = loop;
#endif
- if (unlikely(apply_hibernation_delay)) {
- log_health(
- "[%s]: Postponing health checks for %"PRId64" seconds.",
- rrdhost_hostname(host),
- (int64_t)hibernation_delay);
-
- host->health_delay_up_to = now + hibernation_delay;
- next_run = now + hibernation_delay;
- health_sleep(next_run, loop, host);
- }
+ worker_is_busy(WORKER_HEALTH_JOB_RRD_LOCK);
+ rrd_rdlock();
- if (unlikely(host->health_delay_up_to)) {
- if (unlikely(now < host->health_delay_up_to)) {
- next_run = host->health_delay_up_to;
- health_sleep(next_run, loop, host);
- continue;
- }
+ rrdhost_foreach_read(host) {
- log_health("[%s]: Resuming health checks after delay.", rrdhost_hostname(host));
- host->health_delay_up_to = 0;
- }
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
- // wait until cleanup of obsolete charts on children is complete
- if (host != localhost) {
- if (unlikely(host->trigger_chart_obsoletion_check == 1)) {
- log_health("[%s]: Waiting for chart obsoletion check.", rrdhost_hostname(host));
- health_sleep(next_run, loop, host);
+ if (unlikely(!host->health.health_enabled))
continue;
- }
- }
- if (!health_running_logged) {
- log_health("[%s]: Health is running.", rrdhost_hostname(host));
- health_running_logged = true;
- }
-
- if(likely(!host->health_log_fp) && (loop == 1 || loop % cleanup_sql_every_loop == 0))
- sql_health_alarm_log_cleanup(host);
+ if (unlikely(!rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH))) {
+ rrd_unlock();
+ initialize_health(host, host == localhost);
+ rrd_rdlock();
+ }
- health_execute_delayed_initializations(host);
+ health_execute_delayed_initializations(host);
- worker_is_busy(WORKER_HEALTH_JOB_HOST_LOCK);
+ rrdcalc_delete_alerts_not_matching_host_labels_from_this_host(host);
- // the first loop is to lookup values from the db
- foreach_rrdcalc_in_rrdhost_read(host, rc) {
+ if (unlikely(apply_hibernation_delay)) {
+ log_health(
+ "[%s]: Postponing health checks for %"PRId64" seconds.",
+ rrdhost_hostname(host),
+ (int64_t)hibernation_delay);
- rrdcalc_update_info_using_rrdset_labels(rc);
+ host->health.health_delay_up_to = now + hibernation_delay;
+ }
- if (update_disabled_silenced(host, rc))
- continue;
+ if (unlikely(host->health.health_delay_up_to)) {
+ if (unlikely(now < host->health.health_delay_up_to)) {
+ continue;
+ }
- // create an alert removed event if the chart is obsolete and
- // has stopped being collected for 60 seconds
- if (unlikely(rc->rrdset && rc->status != RRDCALC_STATUS_REMOVED &&
- rrdset_flag_check(rc->rrdset, RRDSET_FLAG_OBSOLETE) &&
- now > (rc->rrdset->last_collected_time.tv_sec + 60))) {
- if (!rrdcalc_isrepeating(rc)) {
- worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_ENTRY);
- time_t now = now_realtime_sec();
-
- ALARM_ENTRY *ae = health_create_alarm_entry(
- host,
- rc->id,
- rc->next_event_id++,
- rc->config_hash_id,
- now,
- rc->name,
- rc->rrdset->id,
- rc->rrdset->context,
- rc->rrdset->family,
- rc->classification,
- rc->component,
- rc->type,
- rc->exec,
- rc->recipient,
- now - rc->last_status_change,
- rc->value,
- NAN,
- rc->status,
- RRDCALC_STATUS_REMOVED,
- rc->source,
- rc->units,
- rc->info,
- 0,
- rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0);
-
- if (ae) {
- health_alarm_log_add_entry(host, ae);
- rc->old_status = rc->status;
- rc->status = RRDCALC_STATUS_REMOVED;
- rc->last_status_change = now;
- rc->last_updated = now;
- rc->value = NAN;
+ log_health("[%s]: Resuming health checks after delay.", rrdhost_hostname(host));
+ host->health.health_delay_up_to = 0;
+ }
-#ifdef ENABLE_ACLK
- if (netdata_cloud_setting && likely(!host->aclk_alert_reloaded))
- sql_queue_alarm_to_aclk(host, ae, 1);
-#endif
- }
+ // wait until cleanup of obsolete charts on children is complete
+ if (host != localhost) {
+ if (unlikely(host->trigger_chart_obsoletion_check == 1)) {
+ log_health("[%s]: Waiting for chart obsoletion check.", rrdhost_hostname(host));
+ continue;
}
}
- if (unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) {
- if (unlikely(rc->run_flags & RRDCALC_FLAG_RUNNABLE))
- rc->run_flags &= ~RRDCALC_FLAG_RUNNABLE;
- continue;
+ if (!health_running_logged) {
+ log_health("[%s]: Health is running.", rrdhost_hostname(host));
+ health_running_logged = true;
}
- runnable++;
- rc->old_value = rc->value;
- rc->run_flags |= RRDCALC_FLAG_RUNNABLE;
+ worker_is_busy(WORKER_HEALTH_JOB_HOST_LOCK);
- // ------------------------------------------------------------
- // if there is database lookup, do it
+ // the first loop is to lookup values from the db
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
- if (unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) {
- worker_is_busy(WORKER_HEALTH_JOB_DB_QUERY);
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
- /* time_t old_db_timestamp = rc->db_before; */
- int value_is_null = 0;
+ rrdcalc_update_info_using_rrdset_labels(rc);
- int ret = rrdset2value_api_v1(rc->rrdset, NULL, &rc->value, rrdcalc_dimensions(rc), 1,
- rc->after, rc->before, rc->group, NULL,
- 0, rc->options,
- &rc->db_after,&rc->db_before,
- NULL, NULL, NULL,
- &value_is_null, NULL, 0, 0,
- QUERY_SOURCE_HEALTH);
+ if (update_disabled_silenced(host, rc))
+ continue;
- if (unlikely(ret != 200)) {
- // database lookup failed
- rc->value = NAN;
- rc->run_flags |= RRDCALC_FLAG_DB_ERROR;
+ // create an alert removed event if the chart is obsolete and
+ // has stopped being collected for 60 seconds
+ if (unlikely(rc->rrdset && rc->status != RRDCALC_STATUS_REMOVED &&
+ rrdset_flag_check(rc->rrdset, RRDSET_FLAG_OBSOLETE) &&
+ now > (rc->rrdset->last_collected_time.tv_sec + 60))) {
+ if (!rrdcalc_isrepeating(rc)) {
+ worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_ENTRY);
+ time_t now = now_realtime_sec();
+
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host,
+ rc->id,
+ rc->next_event_id++,
+ rc->config_hash_id,
+ now,
+ rc->name,
+ rc->rrdset->id,
+ rc->rrdset->context,
+ rc->rrdset->family,
+ rc->classification,
+ rc->component,
+ rc->type,
+ rc->exec,
+ rc->recipient,
+ now - rc->last_status_change,
+ rc->value,
+ NAN,
+ rc->status,
+ RRDCALC_STATUS_REMOVED,
+ rc->source,
+ rc->units,
+ rc->info,
+ 0,
+ rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0);
+
+ if (ae) {
+ health_alarm_log_add_entry(host, ae);
+ rc->old_status = rc->status;
+ rc->status = RRDCALC_STATUS_REMOVED;
+ rc->last_status_change = now;
+ rc->last_updated = now;
+ rc->value = NAN;
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup returned error %d",
- rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc), ret
- );
- } else
- rc->run_flags &= ~RRDCALC_FLAG_DB_ERROR;
-
- /* - RRDCALC_FLAG_DB_STALE not currently used
- if (unlikely(old_db_timestamp == rc->db_before)) {
- // database is stale
-
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
-
- if (unlikely(!(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))) {
- rc->rrdcalc_flags |= RRDCALC_FLAG_DB_STALE;
- error("Health on host '%s', alarm '%s.%s': database is stale", host->hostname, rc->chart?rc->chart:"NOCHART", rc->name);
- }
- }
- else if (unlikely(rc->rrdcalc_flags & RRDCALC_FLAG_DB_STALE))
- rc->rrdcalc_flags &= ~RRDCALC_FLAG_DB_STALE;
- */
-
- if (unlikely(value_is_null)) {
- // collected value is null
- rc->value = NAN;
- rc->run_flags |= RRDCALC_FLAG_DB_NAN;
-
- debug(D_HEALTH,
- "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)",
- rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc)
- );
- } else
- rc->run_flags &= ~RRDCALC_FLAG_DB_NAN;
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting && likely(!aclk_alert_reloaded))
+ sql_queue_alarm_to_aclk(host, ae, 1);
+#endif
+ }
+ }
+ }
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup gave value " NETDATA_DOUBLE_FORMAT,
- rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc), rc->value
- );
- }
+ if (unlikely(!rrdcalc_isrunnable(rc, now, &next_run))) {
+ if (unlikely(rc->run_flags & RRDCALC_FLAG_RUNNABLE))
+ rc->run_flags &= ~RRDCALC_FLAG_RUNNABLE;
+ continue;
+ }
- // ------------------------------------------------------------
- // if there is calculation expression, run it
+ runnable++;
+ rc->old_value = rc->value;
+ rc->run_flags |= RRDCALC_FLAG_RUNNABLE;
- if (unlikely(rc->calculation)) {
- worker_is_busy(WORKER_HEALTH_JOB_CALC_EVAL);
+ // ------------------------------------------------------------
+ // if there is database lookup, do it
- if (unlikely(!expression_evaluate(rc->calculation))) {
- // calculation failed
- rc->value = NAN;
- rc->run_flags |= RRDCALC_FLAG_CALC_ERROR;
+ if (unlikely(RRDCALC_HAS_DB_LOOKUP(rc))) {
+ worker_is_busy(WORKER_HEALTH_JOB_DB_QUERY);
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s",
- rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
- rc->calculation->parsed_as, buffer_tostring(rc->calculation->error_msg)
- );
- } else {
- rc->run_flags &= ~RRDCALC_FLAG_CALC_ERROR;
+ /* time_t old_db_timestamp = rc->db_before; */
+ int value_is_null = 0;
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value "
- NETDATA_DOUBLE_FORMAT
- ": %s (source: %s)", rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
- rc->calculation->parsed_as, rc->calculation->result,
- buffer_tostring(rc->calculation->error_msg), rrdcalc_source(rc)
- );
+ int ret = rrdset2value_api_v1(rc->rrdset, NULL, &rc->value, rrdcalc_dimensions(rc), 1,
+ rc->after, rc->before, rc->group, NULL,
+ 0, rc->options,
+ &rc->db_after,&rc->db_before,
+ NULL, NULL, NULL,
+ &value_is_null, NULL, 0, 0,
+ QUERY_SOURCE_HEALTH, STORAGE_PRIORITY_LOW);
- rc->value = rc->calculation->result;
- }
- }
- }
- foreach_rrdcalc_in_rrdhost_done(rc);
+ if (unlikely(ret != 200)) {
+ // database lookup failed
+ rc->value = NAN;
+ rc->run_flags |= RRDCALC_FLAG_DB_ERROR;
- if (unlikely(runnable && !netdata_exit)) {
- foreach_rrdcalc_in_rrdhost_read(host, rc) {
- if (unlikely(!(rc->run_flags & RRDCALC_FLAG_RUNNABLE)))
- continue;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup returned error %d",
+ rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc), ret
+ );
+ } else
+ rc->run_flags &= ~RRDCALC_FLAG_DB_ERROR;
- if (rc->run_flags & RRDCALC_FLAG_DISABLED) {
- continue;
+ if (unlikely(value_is_null)) {
+ // collected value is null
+ rc->value = NAN;
+ rc->run_flags |= RRDCALC_FLAG_DB_NAN;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': database lookup returned empty value (possibly value is not collected yet)",
+ rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc)
+ );
+ } else
+ rc->run_flags &= ~RRDCALC_FLAG_DB_NAN;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': database lookup gave value " NETDATA_DOUBLE_FORMAT,
+ rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc), rc->value
+ );
}
- RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED;
- RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED;
- // --------------------------------------------------------
- // check the warning expression
+ // ------------------------------------------------------------
+ // if there is calculation expression, run it
- if (likely(rc->warning)) {
- worker_is_busy(WORKER_HEALTH_JOB_WARNING_EVAL);
+ if (unlikely(rc->calculation)) {
+ worker_is_busy(WORKER_HEALTH_JOB_CALC_EVAL);
- if (unlikely(!expression_evaluate(rc->warning))) {
+ if (unlikely(!expression_evaluate(rc->calculation))) {
// calculation failed
- rc->run_flags |= RRDCALC_FLAG_WARN_ERROR;
+ rc->value = NAN;
+ rc->run_flags |= RRDCALC_FLAG_CALC_ERROR;
- debug(D_HEALTH,
- "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s",
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' failed: %s",
rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
- buffer_tostring(rc->warning->error_msg)
+ rc->calculation->parsed_as, buffer_tostring(rc->calculation->error_msg)
);
} else {
- rc->run_flags &= ~RRDCALC_FLAG_WARN_ERROR;
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': warning expression gave value "
+ rc->run_flags &= ~RRDCALC_FLAG_CALC_ERROR;
+
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': expression '%s' gave value "
NETDATA_DOUBLE_FORMAT
- ": %s (source: %s)", rrdhost_hostname(host), rrdcalc_chart_name(rc),
- rrdcalc_name(rc), rc->warning->result, buffer_tostring(rc->warning->error_msg), rrdcalc_source(rc)
+ ": %s (source: %s)", rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
+ rc->calculation->parsed_as, rc->calculation->result,
+ buffer_tostring(rc->calculation->error_msg), rrdcalc_source(rc)
);
- warning_status = rrdcalc_value2status(rc->warning->result);
+
+ rc->value = rc->calculation->result;
}
}
+ }
+ foreach_rrdcalc_in_rrdhost_done(rc);
- // --------------------------------------------------------
- // check the critical expression
+ if (unlikely(runnable && service_running(SERVICE_HEALTH))) {
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
- if (likely(rc->critical)) {
- worker_is_busy(WORKER_HEALTH_JOB_CRITICAL_EVAL);
+ if (unlikely(!(rc->run_flags & RRDCALC_FLAG_RUNNABLE)))
+ continue;
- if (unlikely(!expression_evaluate(rc->critical))) {
- // calculation failed
- rc->run_flags |= RRDCALC_FLAG_CRIT_ERROR;
+ if (rc->run_flags & RRDCALC_FLAG_DISABLED) {
+ continue;
+ }
+ RRDCALC_STATUS warning_status = RRDCALC_STATUS_UNDEFINED;
+ RRDCALC_STATUS critical_status = RRDCALC_STATUS_UNDEFINED;
+
+ // --------------------------------------------------------
+ // check the warning expression
+
+ if (likely(rc->warning)) {
+ worker_is_busy(WORKER_HEALTH_JOB_WARNING_EVAL);
+
+ if (unlikely(!expression_evaluate(rc->warning))) {
+ // calculation failed
+ rc->run_flags |= RRDCALC_FLAG_WARN_ERROR;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': warning expression failed with error: %s",
+ rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
+ buffer_tostring(rc->warning->error_msg)
+ );
+ } else {
+ rc->run_flags &= ~RRDCALC_FLAG_WARN_ERROR;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': warning expression gave value "
+ NETDATA_DOUBLE_FORMAT
+ ": %s (source: %s)", rrdhost_hostname(host), rrdcalc_chart_name(rc),
+ rrdcalc_name(rc), rc->warning->result, buffer_tostring(rc->warning->error_msg), rrdcalc_source(rc)
+ );
+ warning_status = rrdcalc_value2status(rc->warning->result);
+ }
+ }
- debug(D_HEALTH,
- "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s",
- rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
- buffer_tostring(rc->critical->error_msg)
- );
- } else {
- rc->run_flags &= ~RRDCALC_FLAG_CRIT_ERROR;
- debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': critical expression gave value "
- NETDATA_DOUBLE_FORMAT
- ": %s (source: %s)", rrdhost_hostname(host), rrdcalc_chart_name(rc),
- rrdcalc_name(rc), rc->critical->result, buffer_tostring(rc->critical->error_msg),
- rrdcalc_source(rc)
- );
- critical_status = rrdcalc_value2status(rc->critical->result);
+ // --------------------------------------------------------
+ // check the critical expression
+
+ if (likely(rc->critical)) {
+ worker_is_busy(WORKER_HEALTH_JOB_CRITICAL_EVAL);
+
+ if (unlikely(!expression_evaluate(rc->critical))) {
+ // calculation failed
+ rc->run_flags |= RRDCALC_FLAG_CRIT_ERROR;
+
+ debug(D_HEALTH,
+ "Health on host '%s', alarm '%s.%s': critical expression failed with error: %s",
+ rrdhost_hostname(host), rrdcalc_chart_name(rc), rrdcalc_name(rc),
+ buffer_tostring(rc->critical->error_msg)
+ );
+ } else {
+ rc->run_flags &= ~RRDCALC_FLAG_CRIT_ERROR;
+ debug(D_HEALTH, "Health on host '%s', alarm '%s.%s': critical expression gave value "
+ NETDATA_DOUBLE_FORMAT
+ ": %s (source: %s)", rrdhost_hostname(host), rrdcalc_chart_name(rc),
+ rrdcalc_name(rc), rc->critical->result, buffer_tostring(rc->critical->error_msg),
+ rrdcalc_source(rc)
+ );
+ critical_status = rrdcalc_value2status(rc->critical->result);
+ }
}
- }
- // --------------------------------------------------------
- // decide the final alarm status
+ // --------------------------------------------------------
+ // decide the final alarm status
- RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED;
+ RRDCALC_STATUS status = RRDCALC_STATUS_UNDEFINED;
- switch (warning_status) {
- case RRDCALC_STATUS_CLEAR:
- status = RRDCALC_STATUS_CLEAR;
- break;
+ switch (warning_status) {
+ case RRDCALC_STATUS_CLEAR:
+ status = RRDCALC_STATUS_CLEAR;
+ break;
- case RRDCALC_STATUS_RAISED:
- status = RRDCALC_STATUS_WARNING;
- break;
+ case RRDCALC_STATUS_RAISED:
+ status = RRDCALC_STATUS_WARNING;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
- switch (critical_status) {
- case RRDCALC_STATUS_CLEAR:
- if (status == RRDCALC_STATUS_UNDEFINED)
- status = RRDCALC_STATUS_CLEAR;
- break;
+ switch (critical_status) {
+ case RRDCALC_STATUS_CLEAR:
+ if (status == RRDCALC_STATUS_UNDEFINED)
+ status = RRDCALC_STATUS_CLEAR;
+ break;
- case RRDCALC_STATUS_RAISED:
- status = RRDCALC_STATUS_CRITICAL;
- break;
+ case RRDCALC_STATUS_RAISED:
+ status = RRDCALC_STATUS_CRITICAL;
+ break;
- default:
- break;
- }
+ default:
+ break;
+ }
- // --------------------------------------------------------
- // check if the new status and the old differ
+ // --------------------------------------------------------
+ // check if the new status and the old differ
- if (status != rc->status) {
- worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_ENTRY);
- int delay = 0;
+ if (status != rc->status) {
+ worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_ENTRY);
+ int delay = 0;
- // apply trigger hysteresis
+ // apply trigger hysteresis
- if (now > rc->delay_up_to_timestamp) {
- rc->delay_up_current = rc->delay_up_duration;
- rc->delay_down_current = rc->delay_down_duration;
- rc->delay_last = 0;
- rc->delay_up_to_timestamp = 0;
- } else {
- rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier);
- if (rc->delay_up_current > rc->delay_max_duration)
- rc->delay_up_current = rc->delay_max_duration;
+ if (now > rc->delay_up_to_timestamp) {
+ rc->delay_up_current = rc->delay_up_duration;
+ rc->delay_down_current = rc->delay_down_duration;
+ rc->delay_last = 0;
+ rc->delay_up_to_timestamp = 0;
+ } else {
+ rc->delay_up_current = (int) (rc->delay_up_current * rc->delay_multiplier);
+ if (rc->delay_up_current > rc->delay_max_duration)
+ rc->delay_up_current = rc->delay_max_duration;
- rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier);
- if (rc->delay_down_current > rc->delay_max_duration)
- rc->delay_down_current = rc->delay_max_duration;
- }
+ rc->delay_down_current = (int) (rc->delay_down_current * rc->delay_multiplier);
+ if (rc->delay_down_current > rc->delay_max_duration)
+ rc->delay_down_current = rc->delay_max_duration;
+ }
- if (status > rc->status)
- delay = rc->delay_up_current;
- else
- delay = rc->delay_down_current;
-
- // COMMENTED: because we do need to send raising alarms
- // if(now + delay < rc->delay_up_to_timestamp)
- // delay = (int)(rc->delay_up_to_timestamp - now);
-
- rc->delay_last = delay;
- rc->delay_up_to_timestamp = now + delay;
-
- ALARM_ENTRY *ae = health_create_alarm_entry(
- host,
- rc->id,
- rc->next_event_id++,
- rc->config_hash_id,
- now,
- rc->name,
- rc->rrdset->id,
- rc->rrdset->context,
- rc->rrdset->family,
- rc->classification,
- rc->component,
- rc->type,
- rc->exec,
- rc->recipient,
- now - rc->last_status_change,
- rc->old_value,
- rc->value,
- rc->status,
- status,
- rc->source,
- rc->units,
- rc->info,
- rc->delay_last,
- (
- ((rc->options & RRDCALC_OPTION_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) |
- ((rc->run_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0) |
- (rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0)
- )
- );
-
- health_alarm_log_add_entry(host, ae);
-
- log_health("[%s]: Alert event for [%s.%s], value [%s], status [%s].", rrdhost_hostname(host), ae_chart_name(ae), ae_name(ae), ae_new_value_string(ae), rrdcalc_status2string(ae->new_status));
-
- rc->last_status_change = now;
- rc->old_status = rc->status;
- rc->status = status;
- }
+ if (status > rc->status)
+ delay = rc->delay_up_current;
+ else
+ delay = rc->delay_down_current;
+
+ // COMMENTED: because we do need to send raising alarms
+ // if(now + delay < rc->delay_up_to_timestamp)
+ // delay = (int)(rc->delay_up_to_timestamp - now);
+
+ rc->delay_last = delay;
+ rc->delay_up_to_timestamp = now + delay;
+
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host,
+ rc->id,
+ rc->next_event_id++,
+ rc->config_hash_id,
+ now,
+ rc->name,
+ rc->rrdset->id,
+ rc->rrdset->context,
+ rc->rrdset->family,
+ rc->classification,
+ rc->component,
+ rc->type,
+ rc->exec,
+ rc->recipient,
+ now - rc->last_status_change,
+ rc->old_value,
+ rc->value,
+ rc->status,
+ status,
+ rc->source,
+ rc->units,
+ rc->info,
+ rc->delay_last,
+ (
+ ((rc->options & RRDCALC_OPTION_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) |
+ ((rc->run_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0) |
+ (rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0)
+ )
+ );
- rc->last_updated = now;
- rc->next_update = now + rc->update_every;
+ health_alarm_log_add_entry(host, ae);
- if (next_run > rc->next_update)
- next_run = rc->next_update;
- }
- foreach_rrdcalc_in_rrdhost_done(rc);
+ log_health("[%s]: Alert event for [%s.%s], value [%s], status [%s].", rrdhost_hostname(host), ae_chart_name(ae), ae_name(ae), ae_new_value_string(ae), rrdcalc_status2string(ae->new_status));
- // process repeating alarms
- foreach_rrdcalc_in_rrdhost_read(host, rc) {
- int repeat_every = 0;
- if(unlikely(rrdcalc_isrepeating(rc) && rc->delay_up_to_timestamp <= now)) {
- if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) {
- rc->run_flags &= ~RRDCALC_FLAG_RUN_ONCE;
- repeat_every = rc->warn_repeat_every;
- } else if(unlikely(rc->status == RRDCALC_STATUS_CRITICAL)) {
- rc->run_flags &= ~RRDCALC_FLAG_RUN_ONCE;
- repeat_every = rc->crit_repeat_every;
- } else if(unlikely(rc->status == RRDCALC_STATUS_CLEAR)) {
- if(!(rc->run_flags & RRDCALC_FLAG_RUN_ONCE)) {
- if(rc->old_status == RRDCALC_STATUS_CRITICAL) {
- repeat_every = 1;
- } else if (rc->old_status == RRDCALC_STATUS_WARNING) {
- repeat_every = 1;
+ rc->last_status_change = now;
+ rc->old_status = rc->status;
+ rc->status = status;
+ }
+
+ rc->last_updated = now;
+ rc->next_update = now + rc->update_every;
+
+ if (next_run > rc->next_update)
+ next_run = rc->next_update;
+ }
+ foreach_rrdcalc_in_rrdhost_done(rc);
+
+ // process repeating alarms
+ foreach_rrdcalc_in_rrdhost_read(host, rc) {
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
+
+ int repeat_every = 0;
+ if(unlikely(rrdcalc_isrepeating(rc) && rc->delay_up_to_timestamp <= now)) {
+ if(unlikely(rc->status == RRDCALC_STATUS_WARNING)) {
+ rc->run_flags &= ~RRDCALC_FLAG_RUN_ONCE;
+ repeat_every = rc->warn_repeat_every;
+ } else if(unlikely(rc->status == RRDCALC_STATUS_CRITICAL)) {
+ rc->run_flags &= ~RRDCALC_FLAG_RUN_ONCE;
+ repeat_every = rc->crit_repeat_every;
+ } else if(unlikely(rc->status == RRDCALC_STATUS_CLEAR)) {
+ if(!(rc->run_flags & RRDCALC_FLAG_RUN_ONCE)) {
+ if(rc->old_status == RRDCALC_STATUS_CRITICAL) {
+ repeat_every = 1;
+ } else if (rc->old_status == RRDCALC_STATUS_WARNING) {
+ repeat_every = 1;
+ }
}
}
+ } else {
+ continue;
}
- } else {
- continue;
- }
- if(unlikely(repeat_every > 0 && (rc->last_repeat + repeat_every) <= now)) {
- worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_ENTRY);
- rc->last_repeat = now;
- if (likely(rc->times_repeat < UINT32_MAX)) rc->times_repeat++;
-
- ALARM_ENTRY *ae = health_create_alarm_entry(
- host,
- rc->id,
- rc->next_event_id++,
- rc->config_hash_id,
- now,
- rc->name,
- rc->rrdset->id,
- rc->rrdset->context,
- rc->rrdset->family,
- rc->classification,
- rc->component,
- rc->type,
- rc->exec,
- rc->recipient,
- now - rc->last_status_change,
- rc->old_value,
- rc->value,
- rc->old_status,
- rc->status,
- rc->source,
- rc->units,
- rc->info,
- rc->delay_last,
- (
- ((rc->options & RRDCALC_OPTION_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) |
- ((rc->run_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0) |
- (rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0)
- )
- );
-
- ae->last_repeat = rc->last_repeat;
- if (!(rc->run_flags & RRDCALC_FLAG_RUN_ONCE) && rc->status == RRDCALC_STATUS_CLEAR) {
- ae->flags |= HEALTH_ENTRY_RUN_ONCE;
+ if(unlikely(repeat_every > 0 && (rc->last_repeat + repeat_every) <= now)) {
+ worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_ENTRY);
+ rc->last_repeat = now;
+ if (likely(rc->times_repeat < UINT32_MAX)) rc->times_repeat++;
+
+ ALARM_ENTRY *ae = health_create_alarm_entry(
+ host,
+ rc->id,
+ rc->next_event_id++,
+ rc->config_hash_id,
+ now,
+ rc->name,
+ rc->rrdset->id,
+ rc->rrdset->context,
+ rc->rrdset->family,
+ rc->classification,
+ rc->component,
+ rc->type,
+ rc->exec,
+ rc->recipient,
+ now - rc->last_status_change,
+ rc->old_value,
+ rc->value,
+ rc->old_status,
+ rc->status,
+ rc->source,
+ rc->units,
+ rc->info,
+ rc->delay_last,
+ (
+ ((rc->options & RRDCALC_OPTION_NO_CLEAR_NOTIFICATION)? HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION : 0) |
+ ((rc->run_flags & RRDCALC_FLAG_SILENCED)? HEALTH_ENTRY_FLAG_SILENCED : 0) |
+ (rrdcalc_isrepeating(rc)?HEALTH_ENTRY_FLAG_IS_REPEATING:0)
+ )
+ );
+
+ ae->last_repeat = rc->last_repeat;
+ if (!(rc->run_flags & RRDCALC_FLAG_RUN_ONCE) && rc->status == RRDCALC_STATUS_CLEAR) {
+ ae->flags |= HEALTH_ENTRY_RUN_ONCE;
+ }
+ rc->run_flags |= RRDCALC_FLAG_RUN_ONCE;
+ health_process_notifications(host, ae);
+ debug(D_HEALTH, "Notification sent for the repeating alarm %u.", ae->alarm_id);
+ health_alarm_wait_for_execution(ae);
+ health_alarm_log_free_one_nochecks_nounlink(ae);
}
- rc->run_flags |= RRDCALC_FLAG_RUN_ONCE;
- health_process_notifications(host, ae);
- debug(D_HEALTH, "Notification sent for the repeating alarm %u.", ae->alarm_id);
- health_alarm_wait_for_execution(ae);
- health_alarm_log_free_one_nochecks_nounlink(ae);
}
+ foreach_rrdcalc_in_rrdhost_done(rc);
}
- foreach_rrdcalc_in_rrdhost_done(rc);
- }
- if (unlikely(netdata_exit))
- break;
+ if (unlikely(!service_running(SERVICE_HEALTH)))
+ break;
- // execute notifications
- // and cleanup
- worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_PROCESS);
- health_alarm_log_process(host);
+ // execute notifications
+ // and cleanup
+ worker_is_busy(WORKER_HEALTH_JOB_ALARM_LOG_PROCESS);
+ health_alarm_log_process(host);
- if (unlikely(netdata_exit)) {
- // wait for all notifications to finish before allowing health to be cleaned up
- ALARM_ENTRY *ae;
- while (NULL != (ae = alarm_notifications_in_progress.head)) {
- health_alarm_wait_for_execution(ae);
+ if (unlikely(!service_running(SERVICE_HEALTH))) {
+ // wait for all notifications to finish before allowing health to be cleaned up
+ ALARM_ENTRY *ae;
+ while (NULL != (ae = alarm_notifications_in_progress.head)) {
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
+
+ health_alarm_wait_for_execution(ae);
+ }
+ break;
}
- break;
- }
+ } //for each host
+
+ rrd_unlock();
// wait for all notifications to finish before allowing health to be cleaned up
ALARM_ENTRY *ae;
while (NULL != (ae = alarm_notifications_in_progress.head)) {
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
+
health_alarm_wait_for_execution(ae);
}
#ifdef ENABLE_ACLK
- if (netdata_cloud_setting && unlikely(host->aclk_alert_reloaded) && loop > (marked_aclk_reload_loop + 2)) {
- sql_queue_removed_alerts_to_aclk(host);
- host->aclk_alert_reloaded = 0;
+ if (netdata_cloud_setting && unlikely(aclk_alert_reloaded) && loop > (marked_aclk_reload_loop + 2)) {
+ rrdhost_foreach_read(host) {
+ if(unlikely(!service_running(SERVICE_HEALTH)))
+ break;
+
+ if (unlikely(!host->health.health_enabled))
+ continue;
+
+ sql_queue_removed_alerts_to_aclk(host);
+ }
+ aclk_alert_reloaded = 0;
marked_aclk_reload_loop = 0;
}
#endif
- if(unlikely(netdata_exit))
+ if(unlikely(!service_running(SERVICE_HEALTH)))
break;
- health_sleep(next_run, loop, host);
+ health_sleep(next_run, loop);
} // forever
@@ -1554,28 +1561,13 @@ void *health_main(void *ptr) {
void health_add_host_labels(void) {
DICTIONARY *labels = localhost->rrdlabels;
+ // The source should be CONF, but when it is set, these labels are exported by default ('send configured labels' in exporting.conf).
+ // Their export seems to break exporting to Graphite, see https://github.com/netdata/netdata/issues/14084.
+
int is_ephemeral = appconfig_get_boolean(&netdata_config, CONFIG_SECTION_HEALTH, "is ephemeral", CONFIG_BOOLEAN_NO);
- rrdlabels_add(labels, "_is_ephemeral", is_ephemeral ? "true" : "false", RRDLABEL_SRC_CONFIG);
+ rrdlabels_add(labels, "_is_ephemeral", is_ephemeral ? "true" : "false", RRDLABEL_SRC_AUTO);
int has_unstable_connection = appconfig_get_boolean(&netdata_config, CONFIG_SECTION_HEALTH, "has unstable connection", CONFIG_BOOLEAN_NO);
- rrdlabels_add(labels, "_has_unstable_connection", has_unstable_connection ? "true" : "false", RRDLABEL_SRC_CONFIG);
+ rrdlabels_add(labels, "_has_unstable_connection", has_unstable_connection ? "true" : "false", RRDLABEL_SRC_AUTO);
}
-void health_thread_spawn(RRDHOST * host) {
- if(!host->health_spawn) {
- char tag[NETDATA_THREAD_TAG_MAX + 1];
- snprintfz(tag, NETDATA_THREAD_TAG_MAX, "HEALTH[%s]", rrdhost_hostname(host));
- struct health_state *health = callocz(1, sizeof(*health));
- health->host = host;
-
- if(netdata_thread_create(&host->health_thread, tag, NETDATA_THREAD_OPTION_JOINABLE, health_main, (void *) health)) {
- log_health("[%s]: Failed to create new thread for client.", rrdhost_hostname(host));
- error("HEALTH [%s]: Failed to create new thread for client.", rrdhost_hostname(host));
- }
- else {
- log_health("[%s]: Created new thread for client.", rrdhost_hostname(host));
- host->health_spawn = 1;
- host->aclk_alert_reloaded = 1;
- }
- }
-}
diff --git a/health/health.d/cgroups.conf b/health/health.d/cgroups.conf
index 4bfe38b65..08260ff6d 100644
--- a/health/health.d/cgroups.conf
+++ b/health/health.d/cgroups.conf
@@ -51,7 +51,7 @@ component: Network
lookup: average -1m unaligned of received
units: packets
every: 10s
- info: average number of packets received by the network interface $family over the last minute
+ info: average number of packets received by the network interface ${label:device} over the last minute
template: cgroup_10s_received_packets_storm
on: cgroup.net_packets
@@ -66,7 +66,7 @@ component: Network
warn: $this > (($status >= $WARNING)?(200):(5000))
crit: $this > (($status == $CRITICAL)?(5000):(6000))
options: no-clear-notification
- info: ratio of average number of received packets for the network interface $family over the last 10 seconds, \
+ info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, \
compared to the rate over the last minute
to: sysadmin
@@ -121,7 +121,7 @@ component: Network
lookup: average -1m unaligned of received
units: packets
every: 10s
- info: average number of packets received by the network interface $family over the last minute
+ info: average number of packets received by the network interface ${label:device} over the last minute
template: k8s_cgroup_10s_received_packets_storm
on: k8s.cgroup.net_packets
@@ -136,6 +136,6 @@ component: Network
warn: $this > (($status >= $WARNING)?(200):(5000))
crit: $this > (($status == $CRITICAL)?(5000):(6000))
options: no-clear-notification
- info: ratio of average number of received packets for the network interface $family over the last 10 seconds, \
+ info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, \
compared to the rate over the last minute
to: sysadmin
diff --git a/health/health.d/consul.conf b/health/health.d/consul.conf
new file mode 100644
index 000000000..dff6d2df3
--- /dev/null
+++ b/health/health.d/consul.conf
@@ -0,0 +1,159 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+ template: consul_license_expiration_time
+ on: consul.license_expiration_time
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ calc: $license_expiration
+ every: 60m
+ units: seconds
+ warn: $this < 14*24*60*60
+ crit: $this < 7*24*60*60
+ info: Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_autopilot_health_status
+ on: consul.autopilot_health_status
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ calc: $unhealthy
+ every: 10s
+ units: status
+ warn: $this == 1
+ delay: down 5m multiplier 1.5 max 1h
+ info: datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name}
+ to: sysadmin
+
+ template: consul_autopilot_server_health_status
+ on: consul.autopilot_server_health_status
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ calc: $unhealthy
+ every: 10s
+ units: status
+ warn: $this == 1
+ delay: down 5m multiplier 1.5 max 1h
+ info: server ${label:node_name} from datacenter ${label:datacenter} is unhealthy
+ to: sysadmin
+
+ template: consul_raft_leader_last_contact_time
+ on: consul.raft_leader_last_contact_time
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ lookup: average -1m unaligned of quantile_0.5
+ every: 10s
+ units: milliseconds
+ warn: $this > (($status >= $WARNING) ? (150) : (200))
+ crit: $this > (($status == $CRITICAL) ? (200) : (500))
+ delay: down 5m multiplier 1.5 max 1h
+ info: median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes
+ to: sysadmin
+
+ template: consul_raft_leadership_transitions
+ on: consul.raft_leadership_transitions_rate
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ lookup: sum -1m unaligned
+ every: 10s
+ units: transitions
+ warn: $this > 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader
+ to: sysadmin
+
+ template: consul_raft_thread_main_saturation
+ on: consul.raft_thread_main_saturation_perc
+ class: Utilization
+ type: ServiceMesh
+component: Consul
+ lookup: average -1m unaligned of quantile_0.9
+ every: 10s
+ units: percentage
+ warn: $this > (($status >= $WARNING) ? (40) : (50))
+ delay: down 5m multiplier 1.5 max 1h
+ info: average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_raft_thread_fsm_saturation
+ on: consul.raft_thread_fsm_saturation_perc
+ class: Utilization
+ type: ServiceMesh
+component: Consul
+ lookup: average -1m unaligned of quantile_0.9
+ every: 10s
+ units: milliseconds
+ warn: $this > (($status >= $WARNING) ? (40) : (50))
+ delay: down 5m multiplier 1.5 max 1h
+ info: average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_client_rpc_requests_exceeded
+ on: consul.client_rpc_requests_exceeded_rate
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ lookup: sum -1m unaligned
+ every: 10s
+ units: requests
+ warn: $this > (($status >= $WARNING) ? (0) : (5))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_client_rpc_requests_failed
+ on: consul.client_rpc_requests_failed_rate
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ lookup: sum -1m unaligned
+ every: 10s
+ units: requests
+ warn: $this > (($status >= $WARNING) ? (0) : (5))
+ delay: down 5m multiplier 1.5 max 1h
+ info: number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_node_health_check_status
+ on: consul.node_health_check_status
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ calc: $warning + $critical
+ every: 10s
+ units: status
+ warn: $this != nan AND $this != 0
+ delay: down 5m multiplier 1.5 max 1h
+ info: node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_service_health_check_status
+ on: consul.service_health_check_status
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ calc: $warning + $critical
+ every: 10s
+ units: status
+ warn: $this == 1
+ delay: down 5m multiplier 1.5 max 1h
+ info: service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
+
+ template: consul_gc_pause_time
+ on: consul.gc_pause_time
+ class: Errors
+ type: ServiceMesh
+component: Consul
+ lookup: sum -1m unaligned
+ every: 10s
+ units: seconds
+ warn: $this > (($status >= $WARNING) ? (1) : (2))
+ crit: $this > (($status >= $WARNING) ? (2) : (5))
+ delay: down 5m multiplier 1.5 max 1h
+ info: time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter}
+ to: sysadmin
diff --git a/health/health.d/disks.conf b/health/health.d/disks.conf
index 5daff61a1..fd207fbc1 100644
--- a/health/health.d/disks.conf
+++ b/health/health.d/disks.conf
@@ -23,7 +23,7 @@ component: Disk
warn: $this > (($status >= $WARNING ) ? (80) : (90))
crit: $this > (($status == $CRITICAL) ? (90) : (98))
delay: up 1m down 15m multiplier 1.5 max 1h
- info: disk $family space utilization
+ info: disk ${label:mount_point} space utilization
to: sysadmin
template: disk_inode_usage
@@ -40,7 +40,7 @@ component: Disk
warn: $this > (($status >= $WARNING) ? (80) : (90))
crit: $this > (($status == $CRITICAL) ? (90) : (98))
delay: up 1m down 15m multiplier 1.5 max 1h
- info: disk $family inode utilization
+ info: disk ${label:mount_point} inode utilization
to: sysadmin
@@ -147,7 +147,7 @@ component: Disk
every: 1m
warn: $this > 98 * (($status >= $WARNING) ? (0.7) : (1))
delay: down 15m multiplier 1.2 max 1h
- info: average percentage of time $family disk was busy over the last 10 minutes
+ info: average percentage of time ${label:device} disk was busy over the last 10 minutes
to: silent
@@ -169,5 +169,5 @@ component: Disk
every: 1m
warn: $this > 5000 * (($status >= $WARNING) ? (0.7) : (1))
delay: down 15m multiplier 1.2 max 1h
- info: average backlog size of the $family disk over the last 10 minutes
+ info: average backlog size of the ${label:device} disk over the last 10 minutes
to: silent
diff --git a/health/health.d/dns_query.conf b/health/health.d/dns_query.conf
index b9d6c2374..bf9397d85 100644
--- a/health/health.d/dns_query.conf
+++ b/health/health.d/dns_query.conf
@@ -10,5 +10,5 @@ component: DNS
every: 10s
warn: $this != nan && $this != 1
delay: up 30s down 5m multiplier 1.5 max 1h
- info: DNS request type $label:record_type to server $label:server is unsuccessful
+ info: DNS request type ${label:record_type} to server ${label:server} is unsuccessful
to: sysadmin
diff --git a/health/health.d/elasticsearch.conf b/health/health.d/elasticsearch.conf
new file mode 100644
index 000000000..47f8e1eb9
--- /dev/null
+++ b/health/health.d/elasticsearch.conf
@@ -0,0 +1,73 @@
+# you can disable an alarm notification by setting the 'to' line to: silent
+
+# 'red' is a threshold, can't lookup the 'red' dimension - using simple pattern is a workaround.
+
+ template: elasticsearch_cluster_health_status_red
+ on: elasticsearch.cluster_health_status
+ class: Errors
+ type: SearchEngine
+component: Elasticsearch
+ lookup: average -5s unaligned of *ed
+ every: 10s
+ units: status
+ warn: $this == 1
+ delay: down 5m multiplier 1.5 max 1h
+ info: cluster health status is red.
+ to: sysadmin
+
+# the idea of '-10m' is to handle yellow status after node restart,
+# (usually) no action is required because Elasticsearch will automatically restore the green status.
+ template: elasticsearch_cluster_health_status_yellow
+ on: elasticsearch.cluster_health_status
+ class: Errors
+ type: SearchEngine
+component: Elasticsearch
+ lookup: average -10m unaligned of yellow
+ every: 1m
+ units: status
+ warn: $this == 1
+ delay: down 5m multiplier 1.5 max 1h
+ info: cluster health status is yellow.
+ to: sysadmin
+
+ template: elasticsearch_node_index_health_red
+ on: elasticsearch.node_index_health
+ class: Errors
+ type: SearchEngine
+component: Elasticsearch
+ lookup: average -5s unaligned of *ed
+ every: 10s
+ units: status
+ warn: $this == 1
+ delay: down 5m multiplier 1.5 max 1h
+ info: node index $label:index health status is red.
+ to: sysadmin
+
+# don't convert 'lookup' value to seconds in 'calc' due to UI showing seconds as hh:mm:ss (0 as now).
+
+ template: elasticsearch_node_indices_search_time_query
+ on: elasticsearch.node_indices_search_time
+ class: Workload
+ type: SearchEngine
+component: Elasticsearch
+ lookup: average -10m unaligned of query
+ every: 10s
+ units: milliseconds
+ warn: $this > (($status >= $WARNING) ? (20 * 1000) : (30 * 1000))
+ delay: down 5m multiplier 1.5 max 1h
+ info: search performance is degraded, queries run slowly.
+ to: sysadmin
+
+ template: elasticsearch_node_indices_search_time_fetch
+ on: elasticsearch.node_indices_search_time
+ class: Workload
+ type: SearchEngine
+component: Elasticsearch
+ lookup: average -10m unaligned of fetch
+ every: 10s
+ units: milliseconds
+ warn: $this > (($status >= $WARNING) ? (3 * 1000) : (5 * 1000))
+ crit: $this > (($status == $CRITICAL) ? (5 * 1000) : (30 * 1000))
+ delay: down 5m multiplier 1.5 max 1h
+ info: search performance is degraded, fetches run slowly.
+ to: sysadmin
diff --git a/health/health.d/fping.conf b/health/health.d/fping.conf
deleted file mode 100644
index bb22419fa..000000000
--- a/health/health.d/fping.conf
+++ /dev/null
@@ -1,64 +0,0 @@
-
- template: fping_last_collected_secs
- families: *
- on: fping.latency
- class: Latency
- type: Other
-component: Network
- calc: $now - $last_collected_t
- units: seconds ago
- every: 10s
- warn: $this > (($status >= $WARNING) ? ($update_every) : ( 5 * $update_every))
- crit: $this > (($status == $CRITICAL) ? ($update_every) : (60 * $update_every))
- delay: down 5m multiplier 1.5 max 1h
- info: number of seconds since the last successful data collection
- to: sysadmin
-
- template: fping_host_reachable
- families: *
- on: fping.latency
- class: Errors
- type: Other
-component: Network
- calc: $average != nan
- units: up/down
- every: 10s
- crit: $this == 0
- delay: down 30m multiplier 1.5 max 2h
- info: reachability status of the network host (0: unreachable, 1: reachable)
- to: sysadmin
-
- template: fping_host_latency
- families: *
- on: fping.latency
- class: Latency
- type: Other
-component: Network
- lookup: average -10s unaligned of average
- units: ms
- every: 10s
- green: 500
- red: 1000
- warn: $this > $green OR $max > $red
- crit: $this > $red
- delay: down 30m multiplier 1.5 max 2h
- info: average latency to the network host over the last 10 seconds
- to: sysadmin
-
- template: fping_packet_loss
- families: *
- on: fping.quality
- class: Errors
- type: System
-component: Network
- lookup: average -10m unaligned of returned
- calc: 100 - $this
- green: 1
- red: 10
- units: %
- every: 10s
- warn: $this > $green
- crit: $this > $red
- delay: down 30m multiplier 1.5 max 2h
- info: packet loss ratio to the network host over the last 10 minutes
- to: sysadmin
diff --git a/health/health.d/httpcheck.conf b/health/health.d/httpcheck.conf
index 599c47acc..2008b000d 100644
--- a/health/health.d/httpcheck.conf
+++ b/health/health.d/httpcheck.conf
@@ -10,7 +10,7 @@ component: HTTP endpoint
calc: ($this < 75) ? (0) : ($this)
every: 5s
units: up/down
- info: average ratio of successful HTTP requests over the last minute (at least 75%)
+ info: HTTP endpoint ${label:url} liveness status
to: silent
template: httpcheck_web_service_bad_content
@@ -25,8 +25,7 @@ component: HTTP endpoint
warn: $this >= 10 AND $this < 40
crit: $this >= 40
delay: down 5m multiplier 1.5 max 1h
- info: average ratio of HTTP responses with unexpected content over the last 5 minutes
- options: no-clear-notification
+ info: percentage of HTTP responses from ${label:url} with unexpected content in the last 5 minutes
to: webmaster
template: httpcheck_web_service_bad_status
@@ -41,8 +40,7 @@ component: HTTP endpoint
warn: $this >= 10 AND $this < 40
crit: $this >= 40
delay: down 5m multiplier 1.5 max 1h
- info: average ratio of HTTP responses with unexpected status over the last 5 minutes
- options: no-clear-notification
+ info: percentage of HTTP responses from ${label:url} with unexpected status in the last 5 minutes
to: webmaster
template: httpcheck_web_service_timeouts
@@ -54,9 +52,13 @@ component: HTTP endpoint
lookup: average -5m unaligned percentage of timeout
every: 10s
units: %
- info: average ratio of HTTP request timeouts over the last 5 minutes
+ warn: $this >= 10 AND $this < 40
+ crit: $this >= 40
+ delay: down 5m multiplier 1.5 max 1h
+ info: percentage of timed-out HTTP requests to ${label:url} in the last 5 minutes
+ to: webmaster
- template: httpcheck_no_web_service_connections
+ template: httpcheck_web_service_no_connection
families: *
on: httpcheck.status
class: Errors
@@ -65,48 +67,8 @@ component: HTTP endpoint
lookup: average -5m unaligned percentage of no_connection
every: 10s
units: %
- info: average ratio of failed requests during the last 5 minutes
-
-# combined timeout & no connection alarm
- template: httpcheck_web_service_unreachable
- families: *
- on: httpcheck.status
- class: Errors
- type: Web Server
-component: HTTP endpoint
- calc: ($httpcheck_no_web_service_connections >= $httpcheck_web_service_timeouts) ? ($httpcheck_no_web_service_connections) : ($httpcheck_web_service_timeouts)
- units: %
- every: 10s
- warn: ($httpcheck_no_web_service_connections >= 10 OR $httpcheck_web_service_timeouts >= 10) AND ($httpcheck_no_web_service_connections < 40 OR $httpcheck_web_service_timeouts < 40)
- crit: $httpcheck_no_web_service_connections >= 40 OR $httpcheck_web_service_timeouts >= 40
- delay: down 5m multiplier 1.5 max 1h
- info: ratio of failed requests either due to timeouts or no connection over the last 5 minutes
- options: no-clear-notification
- to: webmaster
-
- template: httpcheck_1h_web_service_response_time
- families: *
- on: httpcheck.responsetime
- class: Latency
- type: Other
-component: HTTP endpoint
- lookup: average -1h unaligned of time
- every: 30s
- units: ms
- info: average HTTP response time over the last hour
-
- template: httpcheck_web_service_slow
- families: *
- on: httpcheck.responsetime
- class: Latency
- type: Web Server
-component: HTTP endpoint
- lookup: average -3m unaligned of time
- units: ms
- every: 10s
- warn: ($this > ($httpcheck_1h_web_service_response_time * 2) )
- crit: ($this > ($httpcheck_1h_web_service_response_time * 3) )
+ warn: $this >= 10 AND $this < 40
+ crit: $this >= 40
delay: down 5m multiplier 1.5 max 1h
- info: average HTTP response time over the last 3 minutes, compared to the average over the last hour
- options: no-clear-notification
+ info: percentage of failed HTTP requests to ${label:url} in the last 5 minutes
to: webmaster
diff --git a/health/health.d/kubelet.conf b/health/health.d/kubelet.conf
index c2778cc5e..428b6ee91 100644
--- a/health/health.d/kubelet.conf
+++ b/health/health.d/kubelet.conf
@@ -9,7 +9,7 @@
class: Errors
type: Kubernetes
component: Kubelet
- calc: $kubelet_node_config_error
+ calc: $experiencing_error
units: bool
every: 10s
warn: $this == 1
@@ -20,12 +20,12 @@ component: Kubelet
# Failed Token() requests to the alternate token source
template: kubelet_token_requests
- lookup: sum -10s of token_fail_count
on: k8s_kubelet.kubelet_token_requests
class: Errors
type: Kubernetes
component: Kubelet
- units: failed requests
+ lookup: sum -10s of failed
+ units: requests
every: 10s
warn: $this > 0
delay: down 1m multiplier 1.5 max 2h
@@ -35,11 +35,11 @@ component: Kubelet
# Docker and runtime operation errors
template: kubelet_operations_error
- lookup: sum -1m
on: k8s_kubelet.kubelet_operations_errors
class: Errors
type: Kubernetes
component: Kubelet
+ lookup: sum -1m
units: errors
every: 10s
warn: $this > (($status >= $WARNING) ? (0) : (20))
@@ -67,7 +67,7 @@ component: Kubelet
class: Latency
type: Kubernetes
component: Kubelet
- lookup: average -1m unaligned of kubelet_pleg_relist_latency_05
+ lookup: average -1m unaligned of 0.5
units: microseconds
every: 10s
info: average Pod Lifecycle Event Generator relisting latency over the last minute (quantile 0.5)
@@ -77,7 +77,7 @@ component: Kubelet
class: Latency
type: Kubernetes
component: Kubelet
- lookup: average -10s unaligned of kubelet_pleg_relist_latency_05
+ lookup: average -10s unaligned of 0.5
calc: $this * 100 / (($kubelet_1m_pleg_relist_latency_quantile_05 < 1000)?(1000):($kubelet_1m_pleg_relist_latency_quantile_05))
every: 10s
units: %
@@ -95,7 +95,7 @@ component: Kubelet
class: Latency
type: Kubernetes
component: Kubelet
- lookup: average -1m unaligned of kubelet_pleg_relist_latency_09
+ lookup: average -1m unaligned of 0.9
units: microseconds
every: 10s
info: average Pod Lifecycle Event Generator relisting latency over the last minute (quantile 0.9)
@@ -105,7 +105,7 @@ component: Kubelet
class: Latency
type: Kubernetes
component: Kubelet
- lookup: average -10s unaligned of kubelet_pleg_relist_latency_09
+ lookup: average -10s unaligned of 0.9
calc: $this * 100 / (($kubelet_1m_pleg_relist_latency_quantile_09 < 1000)?(1000):($kubelet_1m_pleg_relist_latency_quantile_09))
every: 10s
units: %
@@ -123,7 +123,7 @@ component: Kubelet
class: Latency
type: Kubernetes
component: Kubelet
- lookup: average -1m unaligned of kubelet_pleg_relist_latency_099
+ lookup: average -1m unaligned of 0.99
units: microseconds
every: 10s
info: average Pod Lifecycle Event Generator relisting latency over the last minute (quantile 0.99)
@@ -133,7 +133,7 @@ component: Kubelet
class: Latency
type: Kubernetes
component: Kubelet
- lookup: average -10s unaligned of kubelet_pleg_relist_latency_099
+ lookup: average -10s unaligned of 0.99
calc: $this * 100 / (($kubelet_1m_pleg_relist_latency_quantile_099 < 1000)?(1000):($kubelet_1m_pleg_relist_latency_quantile_099))
every: 10s
units: %
diff --git a/health/health.d/load.conf b/health/health.d/load.conf
index 0bd872f85..75989c57f 100644
--- a/health/health.d/load.conf
+++ b/health/health.d/load.conf
@@ -11,7 +11,7 @@
component: Load
os: linux
hosts: *
- calc: ($active_processors == nan or $active_processors == inf or $active_processors < 2) ? ( 2 ) : ( $active_processors )
+ calc: ($active_processors == nan or $active_processors == 0) ? (nan) : ( ($active_processors < 2) ? ( 2 ) : ( $active_processors ) )
units: cpus
every: 1m
info: number of active CPU cores in the system
@@ -28,6 +28,7 @@ component: Load
os: linux
hosts: *
lookup: max -1m unaligned of load15
+ calc: ($load_cpu_number == nan) ? (nan) : ($this)
units: load
every: 1m
warn: ($this * 100 / $load_cpu_number) > (($status >= $WARNING) ? 175 : 200)
@@ -43,6 +44,7 @@ component: Load
os: linux
hosts: *
lookup: max -1m unaligned of load5
+ calc: ($load_cpu_number == nan) ? (nan) : ($this)
units: load
every: 1m
warn: ($this * 100 / $load_cpu_number) > (($status >= $WARNING) ? 350 : 400)
@@ -58,6 +60,7 @@ component: Load
os: linux
hosts: *
lookup: max -1m unaligned of load1
+ calc: ($load_cpu_number == nan) ? (nan) : ($this)
units: load
every: 1m
warn: ($this * 100 / $load_cpu_number) > (($status >= $WARNING) ? 700 : 800)
diff --git a/health/health.d/mdstat.conf b/health/health.d/mdstat.conf
index cedaa000e..ed980a26a 100644
--- a/health/health.d/mdstat.conf
+++ b/health/health.d/mdstat.conf
@@ -20,7 +20,7 @@ component: RAID
every: 10s
calc: $down
crit: $this > 0
- info: number of devices in the down state for the $family array. \
+ info: number of devices in the down state for the ${label:device} ${label:raid_level} array. \
Any number > 0 indicates that the array is degraded.
to: sysadmin
@@ -35,7 +35,7 @@ component: RAID
every: 60s
warn: $this > 1024
delay: up 30m
- info: number of unsynchronized blocks for the $family array
+ info: number of unsynchronized blocks for the ${label:device} ${label:raid_level} array
to: sysadmin
template: mdstat_nonredundant_last_collected
diff --git a/health/health.d/net.conf b/health/health.d/net.conf
index 9d5b3b8d3..a0723f303 100644
--- a/health/health.d/net.conf
+++ b/health/health.d/net.conf
@@ -15,7 +15,7 @@ component: Network
calc: ( $nic_speed_max > 0 ) ? ( $nic_speed_max) : ( nan )
units: Mbit
every: 10s
- info: network interface $family current speed
+ info: network interface ${label:device} current speed
template: 1m_received_traffic_overflow
on: net.net
@@ -31,7 +31,7 @@ component: Network
every: 10s
warn: $this > (($status >= $WARNING) ? (85) : (90))
delay: up 1m down 1m multiplier 1.5 max 1h
- info: average inbound utilization for the network interface $family over the last minute
+ info: average inbound utilization for the network interface ${label:device} over the last minute
to: sysadmin
template: 1m_sent_traffic_overflow
@@ -48,7 +48,7 @@ component: Network
every: 10s
warn: $this > (($status >= $WARNING) ? (85) : (90))
delay: up 1m down 1m multiplier 1.5 max 1h
- info: average outbound utilization for the network interface $family over the last minute
+ info: average outbound utilization for the network interface ${label:device} over the last minute
to: sysadmin
# -----------------------------------------------------------------------------
@@ -72,7 +72,7 @@ component: Network
lookup: sum -10m unaligned absolute of inbound
units: packets
every: 1m
- info: number of inbound dropped packets for the network interface $family in the last 10 minutes
+ info: number of inbound dropped packets for the network interface ${label:device} in the last 10 minutes
template: outbound_packets_dropped
on: net.drops
@@ -85,7 +85,7 @@ component: Network
lookup: sum -10m unaligned absolute of outbound
units: packets
every: 1m
- info: number of outbound dropped packets for the network interface $family in the last 10 minutes
+ info: number of outbound dropped packets for the network interface ${label:device} in the last 10 minutes
template: inbound_packets_dropped_ratio
on: net.packets
@@ -101,7 +101,7 @@ component: Network
every: 1m
warn: $this >= 2
delay: up 1m down 1h multiplier 1.5 max 2h
- info: ratio of inbound dropped packets for the network interface $family over the last 10 minutes
+ info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
to: sysadmin
template: outbound_packets_dropped_ratio
@@ -118,7 +118,7 @@ component: Network
every: 1m
warn: $this >= 2
delay: up 1m down 1h multiplier 1.5 max 2h
- info: ratio of outbound dropped packets for the network interface $family over the last 10 minutes
+ info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
to: sysadmin
template: wifi_inbound_packets_dropped_ratio
@@ -135,7 +135,7 @@ component: Network
every: 1m
warn: $this >= 10
delay: up 1m down 1h multiplier 1.5 max 2h
- info: ratio of inbound dropped packets for the network interface $family over the last 10 minutes
+ info: ratio of inbound dropped packets for the network interface ${label:device} over the last 10 minutes
to: sysadmin
template: wifi_outbound_packets_dropped_ratio
@@ -152,7 +152,7 @@ component: Network
every: 1m
warn: $this >= 10
delay: up 1m down 1h multiplier 1.5 max 2h
- info: ratio of outbound dropped packets for the network interface $family over the last 10 minutes
+ info: ratio of outbound dropped packets for the network interface ${label:device} over the last 10 minutes
to: sysadmin
# -----------------------------------------------------------------------------
@@ -171,7 +171,7 @@ component: Network
every: 1m
warn: $this >= 5
delay: down 1h multiplier 1.5 max 2h
- info: number of inbound errors for the network interface $family in the last 10 minutes
+ info: number of inbound errors for the network interface ${label:device} in the last 10 minutes
to: sysadmin
template: interface_outbound_errors
@@ -187,7 +187,7 @@ component: Network
every: 1m
warn: $this >= 5
delay: down 1h multiplier 1.5 max 2h
- info: number of outbound errors for the network interface $family in the last 10 minutes
+ info: number of outbound errors for the network interface ${label:device} in the last 10 minutes
to: sysadmin
# -----------------------------------------------------------------------------
@@ -211,7 +211,7 @@ component: Network
every: 1m
warn: $this > 0
delay: down 1h multiplier 1.5 max 2h
- info: number of FIFO errors for the network interface $family in the last 10 minutes
+ info: number of FIFO errors for the network interface ${label:device} in the last 10 minutes
to: sysadmin
# -----------------------------------------------------------------------------
@@ -234,7 +234,7 @@ component: Network
lookup: average -1m unaligned of received
units: packets
every: 10s
- info: average number of packets received by the network interface $family over the last minute
+ info: average number of packets received by the network interface ${label:device} over the last minute
template: 10s_received_packets_storm
on: net.packets
@@ -251,6 +251,6 @@ component: Network
warn: $this > (($status >= $WARNING)?(200):(5000))
crit: $this > (($status == $CRITICAL)?(5000):(6000))
options: no-clear-notification
- info: ratio of average number of received packets for the network interface $family over the last 10 seconds, \
+ info: ratio of average number of received packets for the network interface ${label:device} over the last 10 seconds, \
compared to the rate over the last minute
to: sysadmin
diff --git a/health/health.d/nvme.conf b/health/health.d/nvme.conf
index 5f729d52b..b7c0e6fd4 100644
--- a/health/health.d/nvme.conf
+++ b/health/health.d/nvme.conf
@@ -11,5 +11,5 @@ component: Disk
every: 10s
crit: $this != nan AND $this != 0
delay: down 5m multiplier 1.5 max 2h
- info: NVMe device $label:device has critical warnings
+ info: NVMe device ${label:device} has critical warnings
to: sysadmin
diff --git a/health/health.d/ping.conf b/health/health.d/ping.conf
index cbe7c30c9..fa8213ad3 100644
--- a/health/health.d/ping.conf
+++ b/health/health.d/ping.conf
@@ -12,7 +12,7 @@ component: Network
every: 10s
crit: $this == 0
delay: down 30m multiplier 1.5 max 2h
- info: network host $label:host reachability status
+ info: network host ${label:host} reachability status
to: sysadmin
template: ping_packet_loss
@@ -29,7 +29,7 @@ component: Network
warn: $this > $green
crit: $this > $red
delay: down 30m multiplier 1.5 max 2h
- info: packet loss percentage to the network host $label:host over the last 10 minutes
+ info: packet loss percentage to the network host ${label:host} over the last 10 minutes
to: sysadmin
template: ping_host_latency
@@ -46,5 +46,5 @@ component: Network
warn: $this > $green OR $max > $red
crit: $this > $red
delay: down 30m multiplier 1.5 max 2h
- info: average latency to the network host $label:host over the last 10 seconds
+ info: average latency to the network host ${label:host} over the last 10 seconds
to: sysadmin
diff --git a/health/health.d/portcheck.conf b/health/health.d/portcheck.conf
index 8cbd7729c..e8908404c 100644
--- a/health/health.d/portcheck.conf
+++ b/health/health.d/portcheck.conf
@@ -10,7 +10,7 @@ component: TCP endpoint
calc: ($this < 75) ? (0) : ($this)
every: 5s
units: up/down
- info: average ratio of successful connections over the last minute (at least 75%)
+ info: TCP host ${label:host} port ${label:port} liveness status
to: silent
template: portcheck_connection_timeouts
@@ -25,7 +25,7 @@ component: TCP endpoint
warn: $this >= 10 AND $this < 40
crit: $this >= 40
delay: down 5m multiplier 1.5 max 1h
- info: average ratio of timeouts over the last 5 minutes
+ info: percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes
to: sysadmin
template: portcheck_connection_fails
@@ -40,5 +40,5 @@ component: TCP endpoint
warn: $this >= 10 AND $this < 40
crit: $this >= 40
delay: down 5m multiplier 1.5 max 1h
- info: average ratio of failed connections over the last 5 minutes
+ info: percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes
to: sysadmin
diff --git a/health/health.d/postgres.conf b/health/health.d/postgres.conf
index 66d034cfe..67b25673b 100644
--- a/health/health.d/postgres.conf
+++ b/health/health.d/postgres.conf
@@ -58,7 +58,7 @@ component: PostgreSQL
warn: $this < (($status >= $WARNING) ? (70) : (60))
crit: $this < (($status == $CRITICAL) ? (60) : (50))
delay: down 15m multiplier 1.5 max 1h
- info: average cache hit ratio in db $label:database over the last minute
+ info: average cache hit ratio in db ${label:database} over the last minute
to: dba
template: postgres_db_transactions_rollback_ratio
@@ -72,7 +72,7 @@ component: PostgreSQL
every: 1m
warn: $this > (($status >= $WARNING) ? (0) : (2))
delay: down 15m multiplier 1.5 max 1h
- info: average aborted transactions percentage in db $label:database over the last five minutes
+ info: average aborted transactions percentage in db ${label:database} over the last five minutes
to: dba
template: postgres_db_deadlocks_rate
@@ -86,7 +86,7 @@ component: PostgreSQL
every: 1m
warn: $this > (($status >= $WARNING) ? (0) : (10))
delay: down 15m multiplier 1.5 max 1h
- info: number of deadlocks detected in db $label:database in the last minute
+ info: number of deadlocks detected in db ${label:database} in the last minute
to: dba
# Table alarms
@@ -104,7 +104,7 @@ component: PostgreSQL
warn: $this < (($status >= $WARNING) ? (70) : (60))
crit: $this < (($status == $CRITICAL) ? (60) : (50))
delay: down 15m multiplier 1.5 max 1h
- info: average cache hit ratio in db $label:database table $label:table over the last minute
+ info: average cache hit ratio in db ${label:database} table ${label:table} over the last minute
to: dba
template: postgres_table_index_cache_io_ratio
@@ -120,7 +120,7 @@ component: PostgreSQL
warn: $this < (($status >= $WARNING) ? (70) : (60))
crit: $this < (($status == $CRITICAL) ? (60) : (50))
delay: down 15m multiplier 1.5 max 1h
- info: average index cache hit ratio in db $label:database table $label:table over the last minute
+ info: average index cache hit ratio in db ${label:database} table ${label:table} over the last minute
to: dba
template: postgres_table_toast_cache_io_ratio
@@ -136,7 +136,7 @@ component: PostgreSQL
warn: $this < (($status >= $WARNING) ? (70) : (60))
crit: $this < (($status == $CRITICAL) ? (60) : (50))
delay: down 15m multiplier 1.5 max 1h
- info: average TOAST hit ratio in db $label:database table $label:table over the last minute
+ info: average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute
to: dba
template: postgres_table_toast_index_cache_io_ratio
@@ -152,7 +152,7 @@ component: PostgreSQL
warn: $this < (($status >= $WARNING) ? (70) : (60))
crit: $this < (($status == $CRITICAL) ? (60) : (50))
delay: down 15m multiplier 1.5 max 1h
- info: average index TOAST hit ratio in db $label:database table $label:table over the last minute
+ info: average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute
to: dba
template: postgres_table_bloat_size_perc
@@ -161,13 +161,13 @@ component: PostgreSQL
type: Database
component: PostgreSQL
hosts: *
- calc: $bloat
+ calc: ($table_size > (1024 * 1024 * 100)) ? ($bloat) : (0)
units: %
every: 1m
warn: $this > (($status >= $WARNING) ? (60) : (70))
crit: $this > (($status == $CRITICAL) ? (70) : (80))
delay: down 15m multiplier 1.5 max 1h
- info: bloat size percentage in db $label:database table $label:table
+ info: bloat size percentage in db ${label:database} table ${label:table}
to: dba
template: postgres_table_last_autovacuum_time
@@ -180,7 +180,7 @@ component: PostgreSQL
units: seconds
every: 1m
warn: $this != nan AND $this > (60 * 60 * 24 * 7)
- info: time elapsed since db $label:database table $label:table was vacuumed by the autovacuum daemon
+ info: time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon
to: dba
template: postgres_table_last_autoanalyze_time
@@ -193,7 +193,7 @@ component: PostgreSQL
units: seconds
every: 1m
warn: $this != nan AND $this > (60 * 60 * 24 * 7)
- info: time elapsed since db $label:database table $label:table was analyzed by the autovacuum daemon
+ info: time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon
to: dba
# Index alarms
@@ -204,11 +204,11 @@ component: PostgreSQL
type: Database
component: PostgreSQL
hosts: *
- calc: $bloat
+ calc: ($index_size > (1024 * 1024 * 10)) ? ($bloat) : (0)
units: %
every: 1m
warn: $this > (($status >= $WARNING) ? (60) : (70))
crit: $this > (($status == $CRITICAL) ? (70) : (80))
delay: down 15m multiplier 1.5 max 1h
- info: bloat size percentage in db $label:database table $label:table index $label:index
+ info: bloat size percentage in db ${label:database} table ${label:table} index ${label:index}
to: dba
diff --git a/health/health.d/zfs.conf b/health/health.d/zfs.conf
index 785838d47..7f8ea2793 100644
--- a/health/health.d/zfs.conf
+++ b/health/health.d/zfs.conf
@@ -24,7 +24,7 @@ component: File system
every: 10s
warn: $this > 0
delay: down 1m multiplier 1.5 max 1h
- info: ZFS pool $family state is degraded
+ info: ZFS pool ${label:pool} state is degraded
to: sysadmin
template: zfs_pool_state_crit
@@ -37,5 +37,5 @@ component: File system
every: 10s
crit: $this > 0
delay: down 1m multiplier 1.5 max 1h
- info: ZFS pool $family state is faulted or unavail
+ info: ZFS pool ${label:pool} state is faulted or unavail
to: sysadmin
diff --git a/health/health.h b/health/health.h
index 15d8326ee..50c3e3452 100644
--- a/health/health.h
+++ b/health/health.h
@@ -31,6 +31,7 @@ extern unsigned int default_health_enabled;
#define HEALTH_SILENCERS_MAX_FILE_LEN 10000
extern char *silencers_filename;
+extern SIMPLE_PATTERN *conf_enabled_alarms;
void health_init(void);
@@ -48,9 +49,6 @@ int health_alarm_log_open(RRDHOST *host);
void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae);
void health_alarm_log_load(RRDHOST *host);
-void health_thread_spawn(RRDHOST *host);
-void health_thread_stop(RRDHOST *host);
-
ALARM_ENTRY* health_create_alarm_entry(
RRDHOST *host,
uint32_t alarm_id,
@@ -79,11 +77,6 @@ ALARM_ENTRY* health_create_alarm_entry(
void health_alarm_log_add_entry(RRDHOST *host, ALARM_ENTRY *ae);
-struct health_state {
- RRDHOST *host;
- netdata_thread_t thread;
-};
-
void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath);
char *health_user_config_dir(void);
char *health_stock_config_dir(void);
diff --git a/health/health_config.c b/health/health_config.c
index f9decfad5..55d5e10eb 100644
--- a/health/health_config.c
+++ b/health/health_config.c
@@ -553,33 +553,37 @@ static int health_readfile(const char *filename, void *data) {
rt = NULL;
}
- rc = callocz(1, sizeof(RRDCALC));
- rc->next_event_id = 1;
-
- {
- char *tmp = strdupz(value);
- if(rrdvar_fix_name(tmp))
- error("Health configuration renamed alarm '%s' to '%s'", value, tmp);
-
- rc->name = string_strdupz(tmp);
- freez(tmp);
- }
-
- rc->source = health_source_file(line, filename);
- rc->green = NAN;
- rc->red = NAN;
- rc->value = NAN;
- rc->old_value = NAN;
- rc->delay_multiplier = 1.0;
- rc->old_status = RRDCALC_STATUS_UNINITIALIZED;
- rc->warn_repeat_every = host->health_default_warn_repeat_every;
- rc->crit_repeat_every = host->health_default_crit_repeat_every;
- if (alert_cfg)
- alert_config_free(alert_cfg);
- alert_cfg = callocz(1, sizeof(struct alert_config));
-
- alert_cfg->alarm = string_dup(rc->name);
- ignore_this = 0;
+ if (simple_pattern_matches(conf_enabled_alarms, value)) {
+ rc = callocz(1, sizeof(RRDCALC));
+ rc->next_event_id = 1;
+
+ {
+ char *tmp = strdupz(value);
+ if(rrdvar_fix_name(tmp))
+ error("Health configuration renamed alarm '%s' to '%s'", value, tmp);
+
+ rc->name = string_strdupz(tmp);
+ freez(tmp);
+ }
+
+ rc->source = health_source_file(line, filename);
+ rc->green = NAN;
+ rc->red = NAN;
+ rc->value = NAN;
+ rc->old_value = NAN;
+ rc->delay_multiplier = 1.0;
+ rc->old_status = RRDCALC_STATUS_UNINITIALIZED;
+ rc->warn_repeat_every = host->health.health_default_warn_repeat_every;
+ rc->crit_repeat_every = host->health.health_default_crit_repeat_every;
+ if (alert_cfg)
+ alert_config_free(alert_cfg);
+ alert_cfg = callocz(1, sizeof(struct alert_config));
+
+ alert_cfg->alarm = string_dup(rc->name);
+ ignore_this = 0;
+ } else {
+ rc = NULL;
+ }
}
else if(hash == hash_template && !strcasecmp(key, HEALTH_TEMPLATE_KEY)) {
if(rc) {
@@ -599,29 +603,33 @@ static int health_readfile(const char *filename, void *data) {
rrdcalctemplate_add_from_config(host, rt);
}
- rt = callocz(1, sizeof(RRDCALCTEMPLATE));
+ if (simple_pattern_matches(conf_enabled_alarms, value)) {
+ rt = callocz(1, sizeof(RRDCALCTEMPLATE));
- {
- char *tmp = strdupz(value);
- if(rrdvar_fix_name(tmp))
- error("Health configuration renamed template '%s' to '%s'", value, tmp);
-
- rt->name = string_strdupz(tmp);
- freez(tmp);
- }
+ {
+ char *tmp = strdupz(value);
+ if(rrdvar_fix_name(tmp))
+ error("Health configuration renamed template '%s' to '%s'", value, tmp);
- rt->source = health_source_file(line, filename);
- rt->green = NAN;
- rt->red = NAN;
- rt->delay_multiplier = (float)1.0;
- rt->warn_repeat_every = host->health_default_warn_repeat_every;
- rt->crit_repeat_every = host->health_default_crit_repeat_every;
- if (alert_cfg)
- alert_config_free(alert_cfg);
- alert_cfg = callocz(1, sizeof(struct alert_config));
+ rt->name = string_strdupz(tmp);
+ freez(tmp);
+ }
- alert_cfg->template_key = string_dup(rt->name);
- ignore_this = 0;
+ rt->source = health_source_file(line, filename);
+ rt->green = NAN;
+ rt->red = NAN;
+ rt->delay_multiplier = (float)1.0;
+ rt->warn_repeat_every = host->health.health_default_warn_repeat_every;
+ rt->crit_repeat_every = host->health.health_default_crit_repeat_every;
+ if (alert_cfg)
+ alert_config_free(alert_cfg);
+ alert_cfg = callocz(1, sizeof(struct alert_config));
+
+ alert_cfg->template_key = string_dup(rt->name);
+ ignore_this = 0;
+ } else {
+ rt = NULL;
+ }
}
else if(hash == hash_os && !strcasecmp(key, HEALTH_OS_KEY)) {
char *os_match = value;
@@ -1163,7 +1171,8 @@ void sql_refresh_hashes(void)
}
void health_readdir(RRDHOST *host, const char *user_path, const char *stock_path, const char *subpath) {
- if(unlikely(!host->health_enabled) && !rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH)) {
+ if(unlikely((!host->health.health_enabled) && !rrdhost_flag_check(host, RRDHOST_FLAG_INITIALIZED_HEALTH)) ||
+ !service_running(SERVICE_HEALTH)) {
debug(D_HEALTH, "CONFIG health is not enabled for host '%s'", rrdhost_hostname(host));
return;
}
diff --git a/health/health_json.c b/health/health_json.c
index 2dd59fd46..8cabaa0bf 100644
--- a/health/health_json.c
+++ b/health/health_json.c
@@ -75,8 +75,8 @@ void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host)
, (ae->flags & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false"
, (unsigned long)ae->exec_run_timestamp
, (ae->flags & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false"
- , ae->exec?ae_exec(ae):string2str(host->health_default_exec)
- , ae->recipient?ae_recipient(ae):string2str(host->health_default_recipient)
+ , ae->exec?ae_exec(ae):string2str(host->health.health_default_exec)
+ , ae->recipient?ae_recipient(ae):string2str(host->health.health_default_recipient)
, ae->exec_code
, ae_source(ae)
, edit_command
@@ -219,8 +219,8 @@ static inline void health_rrdcalc2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC
, (rc->rrdset)?"true":"false"
, (rc->run_flags & RRDCALC_FLAG_DISABLED)?"true":"false"
, (rc->run_flags & RRDCALC_FLAG_SILENCED)?"true":"false"
- , rc->exec?rrdcalc_exec(rc):string2str(host->health_default_exec)
- , rc->recipient?rrdcalc_recipient(rc):string2str(host->health_default_recipient)
+ , rc->exec?rrdcalc_exec(rc):string2str(host->health.health_default_exec)
+ , rc->recipient?rrdcalc_recipient(rc):string2str(host->health.health_default_recipient)
, rrdcalc_source(rc)
, rrdcalc_units(rc)
, rrdcalc_info(rc)
@@ -372,7 +372,7 @@ void health_alarms2json(RRDHOST *host, BUFFER *wb, int all) {
"\n\t\"alarms\": {\n",
rrdhost_hostname(host),
(host->health_log.next_log_id > 0)?(host->health_log.next_log_id - 1):0,
- host->health_enabled?"true":"false",
+ host->health.health_enabled?"true":"false",
(unsigned long)now_realtime_sec());
health_alarms2json_fill_alarms(host, wb, all, health_rrdcalc2json_nolock);
diff --git a/health/health_log.c b/health/health_log.c
index 8105e01ae..d3417493b 100644
--- a/health/health_log.c
+++ b/health/health_log.c
@@ -3,149 +3,10 @@
#include "health.h"
// ----------------------------------------------------------------------------
-// health alarm log load/save
-// no need for locking - only one thread is reading / writing the alarms log
-
-inline int health_alarm_log_open(RRDHOST *host) {
- if(host->health_log_fp)
- fclose(host->health_log_fp);
-
- host->health_log_fp = fopen(host->health_log_filename, "a");
-
- if(host->health_log_fp) {
- if (setvbuf(host->health_log_fp, NULL, _IOLBF, 0) != 0)
- error("HEALTH [%s]: cannot set line buffering on health log file '%s'.", rrdhost_hostname(host), host->health_log_filename);
- return 0;
- }
-
- error("HEALTH [%s]: cannot open health log file '%s'. Health data will be lost in case of netdata or server crash.", rrdhost_hostname(host), host->health_log_filename);
- return -1;
-}
-
-static inline void health_alarm_log_close(RRDHOST *host) {
- if(host->health_log_fp) {
- fclose(host->health_log_fp);
- host->health_log_fp = NULL;
- }
-}
-
-static inline void health_log_rotate(RRDHOST *host) {
- static size_t rotate_every = 0;
-
- if(unlikely(rotate_every == 0)) {
- rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000);
- if(rotate_every < 100) rotate_every = 100;
- }
-
- if(unlikely(host->health_log_entries_written > rotate_every)) {
- if(unlikely(host->health_log_fp)) {
- health_alarm_log_close(host);
-
- char old_filename[FILENAME_MAX + 1];
- snprintfz(old_filename, FILENAME_MAX, "%s.old", host->health_log_filename);
-
- if(unlink(old_filename) == -1 && errno != ENOENT)
- error("HEALTH [%s]: cannot remove old alarms log file '%s'", rrdhost_hostname(host), old_filename);
-
- if(link(host->health_log_filename, old_filename) == -1 && errno != ENOENT)
- error("HEALTH [%s]: cannot move file '%s' to '%s'.", rrdhost_hostname(host), host->health_log_filename, old_filename);
-
- if(unlink(host->health_log_filename) == -1 && errno != ENOENT)
- error("HEALTH [%s]: cannot remove old alarms log file '%s'", rrdhost_hostname(host), host->health_log_filename);
-
- // open it with truncate
- host->health_log_fp = fopen(host->health_log_filename, "w");
-
- if(host->health_log_fp)
- fclose(host->health_log_fp);
- else
- error("HEALTH [%s]: cannot truncate health log '%s'", rrdhost_hostname(host), host->health_log_filename);
-
- host->health_log_fp = NULL;
-
- host->health_log_entries_written = 0;
- health_alarm_log_open(host);
- }
- }
-}
-
-inline void health_label_log_save(RRDHOST *host) {
- health_log_rotate(host);
-
- if(unlikely(host->health_log_fp)) {
- BUFFER *wb = buffer_create(1024);
-
- rrdlabels_to_buffer(localhost->rrdlabels, wb, "", "=", "", "\t ", NULL, NULL, NULL, NULL);
- char *write = (char *) buffer_tostring(wb);
-
- if (unlikely(fprintf(host->health_log_fp, "L\t%s", write) < 0))
- error("HEALTH [%s]: failed to save alarm log entry to '%s'. Health data may be lost in case of abnormal restart.",
- rrdhost_hostname(host), host->health_log_filename);
- else
- host->health_log_entries_written++;
-
- buffer_free(wb);
- }
-}
inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) {
- health_log_rotate(host);
- if(unlikely(host->health_log_fp)) {
- if(unlikely(fprintf(host->health_log_fp
- , "%c\t%s"
- "\t%08x\t%08x\t%08x\t%08x\t%08x"
- "\t%08x\t%08x\t%08x"
- "\t%08x\t%08x\t%08x"
- "\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s"
- "\t%d\t%d\t%d\t%d"
- "\t" NETDATA_DOUBLE_FORMAT_AUTO "\t" NETDATA_DOUBLE_FORMAT_AUTO
- "\t%016"PRIx64""
- "\t%s\t%s\t%s"
- "\n"
- , (ae->flags & HEALTH_ENTRY_FLAG_SAVED)?'U':'A'
- , rrdhost_hostname(host)
-
- , ae->unique_id
- , ae->alarm_id
- , ae->alarm_event_id
- , ae->updated_by_id
- , ae->updates_id
-
- , (uint32_t)ae->when
- , (uint32_t)ae->duration
- , (uint32_t)ae->non_clear_duration
- , (uint32_t)ae->flags
- , (uint32_t)ae->exec_run_timestamp
- , (uint32_t)ae->delay_up_to_timestamp
-
- , ae_name(ae)
- , ae_chart_name(ae)
- , ae_family(ae)
- , ae_exec(ae)
- , ae_recipient(ae)
- , ae_source(ae)
- , ae_units(ae)
- , ae_info(ae)
- , ae->exec_code
- , ae->new_status
- , ae->old_status
- , ae->delay
-
- , ae->new_value
- , ae->old_value
- , (uint64_t)ae->last_repeat
- , (ae->classification)?ae_classification(ae):"Unknown"
- , (ae->component)?ae_component(ae):"Unknown"
- , (ae->type)?ae_type(ae):"Unknown"
- ) < 0))
- error("HEALTH [%s]: failed to save alarm log entry to '%s'. Health data may be lost in case of abnormal restart.", rrdhost_hostname(host), host->health_log_filename);
- else {
- ae->flags |= HEALTH_ENTRY_FLAG_SAVED;
- host->health_log_entries_written++;
- }
- }else
- sql_health_alarm_log_save(host, ae);
+ sql_health_alarm_log_save(host, ae);
#ifdef ENABLE_ACLK
if (netdata_cloud_setting) {
@@ -154,291 +15,6 @@ inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) {
#endif
}
-static uint32_t is_valid_alarm_id(RRDHOST *host, const char *chart, const char *name, uint32_t alarm_id)
-{
- STRING *chart_string = string_strdupz(chart);
- STRING *name_string = string_strdupz(name);
-
- uint32_t ret = 1;
-
- ALARM_ENTRY *ae;
- for(ae = host->health_log.alarms; ae ;ae = ae->next) {
- if (unlikely(ae->alarm_id == alarm_id && (!(chart_string == ae->chart && name_string == ae->name)))) {
- ret = 0;
- break;
- }
- }
-
- string_freez(chart_string);
- string_freez(name_string);
-
- return ret;
-}
-
-static inline ssize_t health_alarm_log_read(RRDHOST *host, FILE *fp, const char *filename) {
- errno = 0;
-
- char *s, *buf = mallocz(65536 + 1);
- size_t line = 0, len = 0;
- ssize_t loaded = 0, updated = 0, errored = 0, duplicate = 0;
-
- DICTIONARY *all_rrdcalcs = dictionary_create(
- DICT_OPTION_NAME_LINK_DONT_CLONE | DICT_OPTION_VALUE_LINK_DONT_CLONE | DICT_OPTION_DONT_OVERWRITE_VALUE);
- RRDCALC *rc;
- foreach_rrdcalc_in_rrdhost_read(host, rc) {
- dictionary_set(all_rrdcalcs, rrdcalc_name(rc), rc, sizeof(*rc));
- }
- foreach_rrdcalc_in_rrdhost_done(rc);
-
- netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
-
- while((s = fgets_trim_len(buf, 65536, fp, &len))) {
- host->health_log_entries_written++;
- line++;
-
- int max_entries = 33, entries = 0;
- char *pointers[max_entries];
-
- pointers[entries++] = s++;
- while(*s) {
- if(unlikely(*s == '\t')) {
- *s = '\0';
- pointers[entries++] = ++s;
- if(entries >= max_entries) {
- error("HEALTH [%s]: line %zu of file '%s' has more than %d entries. Ignoring excessive entries.", rrdhost_hostname(host), line, filename, max_entries);
- break;
- }
- }
- else s++;
- }
-
- if(likely(*pointers[0] == 'L'))
- continue;
-
- if(likely(*pointers[0] == 'U' || *pointers[0] == 'A')) {
- ALARM_ENTRY *ae = NULL;
-
- if(entries < 27) {
- error("HEALTH [%s]: line %zu of file '%s' should have at least 27 entries, but it has %d. Ignoring it.", rrdhost_hostname(host), line, filename, entries);
- errored++;
- continue;
- }
-
- // check that we have valid ids
- uint32_t unique_id = (uint32_t)strtoul(pointers[2], NULL, 16);
- if(!unique_id) {
- error("HEALTH [%s]: line %zu of file '%s' states alarm entry with invalid unique id %u (%s). Ignoring it.", rrdhost_hostname(host), line, filename, unique_id, pointers[2]);
- errored++;
- continue;
- }
-
- uint32_t alarm_id = (uint32_t)strtoul(pointers[3], NULL, 16);
- if(!alarm_id) {
- error("HEALTH [%s]: line %zu of file '%s' states alarm entry for invalid alarm id %u (%s). Ignoring it.", rrdhost_hostname(host), line, filename, alarm_id, pointers[3]);
- errored++;
- continue;
- }
-
- // Check if we got last_repeat field
- time_t last_repeat = 0;
- if(entries > 27) {
- char* alarm_name = pointers[13];
- last_repeat = (time_t)strtoul(pointers[27], NULL, 16);
-
- rc = dictionary_get(all_rrdcalcs, alarm_name);
- if(unlikely(rc)) {
- if (rrdcalc_isrepeating(rc)) {
- rc->last_repeat = last_repeat;
- // We iterate through repeating alarm entries only to
- // find the latest last_repeat timestamp. Otherwise,
- // there is no need to keep them in memory.
- continue;
- }
- }
- }
-
- if(unlikely(*pointers[0] == 'A')) {
- // make sure it is properly numbered
- if(unlikely(host->health_log.alarms && unique_id < host->health_log.alarms->unique_id)) {
- error( "HEALTH [%s]: line %zu of file '%s' has alarm log entry %u in wrong order. Ignoring it."
- , rrdhost_hostname(host), line, filename, unique_id);
- errored++;
- continue;
- }
-
- ae = callocz(1, sizeof(ALARM_ENTRY));
- }
- else if(unlikely(*pointers[0] == 'U')) {
- // find the original
- for(ae = host->health_log.alarms; ae ; ae = ae->next) {
- if(unlikely(unique_id == ae->unique_id)) {
- if(unlikely(*pointers[0] == 'A')) {
- error("HEALTH [%s]: line %zu of file '%s' adds duplicate alarm log entry %u. Using the later."
- , rrdhost_hostname(host), line, filename, unique_id);
- *pointers[0] = 'U';
- duplicate++;
- }
- break;
- }
- else if(unlikely(unique_id > ae->unique_id)) {
- // no need to continue
- // the linked list is sorted
- ae = NULL;
- break;
- }
- }
- }
-
- // if not found, skip this line
- if(unlikely(!ae)) {
- // error("HEALTH [%s]: line %zu of file '%s' updates alarm log entry with unique id %u, but it is not found.", host->hostname, line, filename, unique_id);
- continue;
- }
-
- // check for a possible host mismatch
- //if(strcmp(pointers[1], host->hostname))
- // error("HEALTH [%s]: line %zu of file '%s' provides an alarm for host '%s' but this is named '%s'.", host->hostname, line, filename, pointers[1], host->hostname);
-
- ae->unique_id = unique_id;
- if (!is_valid_alarm_id(host, pointers[14], pointers[13], alarm_id)) {
- STRING *chart = string_strdupz(pointers[14]);
- STRING *name = string_strdupz(pointers[13]);
- alarm_id = rrdcalc_get_unique_id(host, chart, name, NULL);
- string_freez(chart);
- string_freez(name);
- }
- ae->alarm_id = alarm_id;
- ae->alarm_event_id = (uint32_t)strtoul(pointers[4], NULL, 16);
- ae->updated_by_id = (uint32_t)strtoul(pointers[5], NULL, 16);
- ae->updates_id = (uint32_t)strtoul(pointers[6], NULL, 16);
-
- ae->when = (uint32_t)strtoul(pointers[7], NULL, 16);
- ae->duration = (uint32_t)strtoul(pointers[8], NULL, 16);
- ae->non_clear_duration = (uint32_t)strtoul(pointers[9], NULL, 16);
-
- ae->flags = (uint32_t)strtoul(pointers[10], NULL, 16);
- ae->flags |= HEALTH_ENTRY_FLAG_SAVED;
-
- ae->exec_run_timestamp = (uint32_t)strtoul(pointers[11], NULL, 16);
- ae->delay_up_to_timestamp = (uint32_t)strtoul(pointers[12], NULL, 16);
-
- string_freez(ae->name);
- ae->name = string_strdupz(pointers[13]);
-
- string_freez(ae->chart);
- ae->chart = string_strdupz(pointers[14]);
-
- string_freez(ae->family);
- ae->family = string_strdupz(pointers[15]);
-
- string_freez(ae->exec);
- ae->exec = string_strdupz(pointers[16]);
-
- string_freez(ae->recipient);
- ae->recipient = string_strdupz(pointers[17]);
-
- string_freez(ae->source);
- ae->source = string_strdupz(pointers[18]);
-
- string_freez(ae->units);
- ae->units = string_strdupz(pointers[19]);
-
- string_freez(ae->info);
- ae->info = string_strdupz(pointers[20]);
-
- ae->exec_code = str2i(pointers[21]);
- ae->new_status = str2i(pointers[22]);
- ae->old_status = str2i(pointers[23]);
- ae->delay = str2i(pointers[24]);
-
- ae->new_value = str2l(pointers[25]);
- ae->old_value = str2l(pointers[26]);
-
- ae->last_repeat = last_repeat;
-
- if (likely(entries > 30)) {
- string_freez(ae->classification);
- ae->classification = string_strdupz(pointers[28]);
-
- string_freez(ae->component);
- ae->component = string_strdupz(pointers[29]);
-
- string_freez(ae->type);
- ae->type = string_strdupz(pointers[30]);
- }
-
- char value_string[100 + 1];
- string_freez(ae->old_value_string);
- string_freez(ae->new_value_string);
- ae->old_value_string = string_strdupz(format_value_and_unit(value_string, 100, ae->old_value, ae_units(ae), -1));
- ae->new_value_string = string_strdupz(format_value_and_unit(value_string, 100, ae->new_value, ae_units(ae), -1));
-
- // add it to host if not already there
- if(unlikely(*pointers[0] == 'A')) {
- ae->next = host->health_log.alarms;
- host->health_log.alarms = ae;
- sql_health_alarm_log_insert(host, ae);
- loaded++;
- }
- else {
- sql_health_alarm_log_update(host, ae);
- updated++;
- }
-
- if(unlikely(ae->unique_id > host->health_max_unique_id))
- host->health_max_unique_id = ae->unique_id;
-
- if(unlikely(ae->alarm_id >= host->health_max_alarm_id))
- host->health_max_alarm_id = ae->alarm_id;
- }
- else {
- error("HEALTH [%s]: line %zu of file '%s' is invalid (unrecognized entry type '%s').", rrdhost_hostname(host), line, filename, pointers[0]);
- errored++;
- }
- }
-
- netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
-
- dictionary_destroy(all_rrdcalcs);
- all_rrdcalcs = NULL;
-
- freez(buf);
-
- if(!host->health_max_unique_id) host->health_max_unique_id = (uint32_t)now_realtime_sec();
- if(!host->health_max_alarm_id) host->health_max_alarm_id = (uint32_t)now_realtime_sec();
-
- host->health_log.next_log_id = host->health_max_unique_id + 1;
- if (unlikely(!host->health_log.next_alarm_id || host->health_log.next_alarm_id <= host->health_max_alarm_id))
- host->health_log.next_alarm_id = host->health_max_alarm_id + 1;
-
- debug(D_HEALTH, "HEALTH [%s]: loaded file '%s' with %zd new alarm entries, updated %zd alarms, errors %zd entries, duplicate %zd", rrdhost_hostname(host), filename, loaded, updated, errored, duplicate);
- return loaded;
-}
-
-inline void health_alarm_log_load(RRDHOST *host) {
- health_alarm_log_close(host);
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s.old", host->health_log_filename);
- FILE *fp = fopen(filename, "r");
- if(!fp)
- error("HEALTH [%s]: cannot open health file: %s", rrdhost_hostname(host), filename);
- else {
- health_alarm_log_read(host, fp, filename);
- fclose(fp);
- }
-
- host->health_log_entries_written = 0;
- fp = fopen(host->health_log_filename, "r");
- if(!fp)
- error("HEALTH [%s]: cannot open health file: %s", rrdhost_hostname(host), host->health_log_filename);
- else {
- health_alarm_log_read(host, fp, host->health_log_filename);
- fclose(fp);
- }
-}
-
-
// ----------------------------------------------------------------------------
// health alarm log management
diff --git a/health/notifications/README.md b/health/notifications/README.md
index 0bd6c7649..c59fecced 100644
--- a/health/notifications/README.md
+++ b/health/notifications/README.md
@@ -1,7 +1,11 @@
# Alarm notifications
diff --git a/health/notifications/alarm-notify.sh.in b/health/notifications/alarm-notify.sh.in
index 3edf3d083..0090427a0 100755
--- a/health/notifications/alarm-notify.sh.in
+++ b/health/notifications/alarm-notify.sh.in
@@ -18,7 +18,7 @@
# - emails by @ktsaou
# - slack.com notifications by @ktsaou
# - alerta.io notifications by @kattunga
-# - discordapp.com notifications by @lowfive
+# - discord.com notifications by @lowfive
# - pushover.net notifications by @ktsaou
# - pushbullet.com push notifications by Tiago Peralta @tperalta82 #1070
# - telegram.org notifications by @hashworks #1002
@@ -484,53 +484,105 @@ msteams_migration
# filter a recipient based on alarm event severity
filter_recipient_by_criticality() {
- local method="${1}" x="${2}" r s
- shift
-
- r="${x/|*/}" # the recipient
- s="${x/*|/}" # the severity required for notifying this recipient
+ local method="${1}" recipient_arg="${2}"
+ local tracking_dir tracking_file modifier modifiers recipient="${recipient_arg/|*/}"
+ local mod_critical=0 mod_noclear=0 mod_nowarn=0
# no severity filtering for this person
- [ "${r}" = "${s}" ] && return 0
+ [ "${recipient}" = "${recipient_arg}" ] && return 0
+
+ # find out which modifiers are set
+ modifiers="${recipient_arg#*|}"
+ modifiers="${modifiers//|/ }" # replace pipes with spaces
+ modifiers="${modifiers,,}" # lowercase
+ for modifier in ${modifiers}; do
+ case "${modifier}" in
+ critical) mod_critical=1 ;;
+ noclear) mod_noclear=1 ;;
+ nowarn) mod_nowarn=1 ;;
+
+ *)
+ error "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: invalid modifier '${modifier}'."
+ # invalid modifier, always send notification
+ return 0
+ ;;
+ esac
+ done
- # the severity is invalid
- s="${s^^}"
- if [ "${s}" != "CRITICAL" ]; then
- error "SEVERITY FILTERING for ${x} VIA ${method}: invalid severity '${s,,}', only 'critical' is supported."
- return 0
- fi
+ # set status tracking directory/file var
+ tracking_dir="${NETDATA_CACHE_DIR}/alarm-notify/${method}/${recipient}"
+ tracking_file="${tracking_dir}/${alarm_id}"
- # create the status tracking directory for this user
- [ ! -d "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}" ] &&
- mkdir -p "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}"
+ # create the status tracking directory for this user if "critical" modifier is set
+ [ "${mod_critical}" == "1" ] && [ ! -d "${tracking_dir}" ] && mkdir -p "${tracking_dir}"
case "${status}" in
- CRITICAL)
- # make sure he will get future notifications for this alarm too
- touch "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
- debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)"
- return 0
- ;;
-
- WARNING)
- if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]; then
- # we do not remove the file, so that he will get future notifications of this alarm
- debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
- return 0
- fi
- ;;
+ CRITICAL)
+ # "critical" modifier set, create tracking file for future status changes
+ if [ "${mod_critical}" == "1" ]; then
+ touch "${tracking_file}"
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: the alarm is CRITICAL (will now receive next status change)"
+ return 0
+ fi
- *)
- if [ -f "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}" ]; then
- # remove the file, so that he will only receive notifications for CRITICAL states for this alarm
- rm "${NETDATA_CACHE_DIR}/alarm-notify/${method}/${r}/${alarm_id}"
- debug "SEVERITY FILTERING for ${x} VIA ${method}: ALLOW: recipient has been notified for this alarm (will only receive CRITICAL notifications from now on)"
+ # always send CRITICAL notification
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: the alarm is CRITICAL"
return 0
- fi
- ;;
+ ;;
+
+ WARNING)
+ # "nowarn" modifier set, block notification
+ if [ "${mod_nowarn}" == "1" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: BLOCK: recipient should not receive this notification (nowarn modifier set)"
+ return 1
+ fi
+
+ # "critical" modifier not set, send notification
+ if [ "${mod_critical}" == "0" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: the alarm is WARNING"
+ return 0
+ fi
+
+ # "critical" modifier set, send notification if tracking file exists
+ if [ "${mod_critical}" == "1" ] && [ -f "${tracking_file}" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
+ return 0
+ fi
+ ;;
+
+ CLEAR)
+ # remove tracking file
+ [ -f "${tracking_file}" ] && rm "${tracking_file}"
+
+ # "noclear" modifier set, block notification
+ if [ "${mod_noclear}" == "1" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: BLOCK: recipient should not receive this notification (noclear modifier set)"
+ return 1
+ fi
+
+ # "critical" modifier not set, send notification
+ if [ "${mod_critical}" == "0" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: the alarm is CLEAR"
+ return 0
+ fi
+
+ # "critical" modifier set, send notification if tracking file exists
+ if [ "${mod_critical}" == "1" ] && [ -f "${tracking_file}" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (no status change will be sent from now)"
+ return 0
+ fi
+ ;;
+
+ *)
+ # "critical" modifier set, send notification if tracking file exists
+ if [ "${mod_critical}" == "1" ] && [ -f "${tracking_file}" ]; then
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: ALLOW: recipient has been notified for this alarm in the past (will still receive next status change)"
+ return 0
+ fi
+ ;;
esac
- debug "SEVERITY FILTERING for ${x} VIA ${method}: BLOCK: recipient should not receive this notification"
+ debug "SEVERITY FILTERING for ${recipient_arg} VIA ${method}: BLOCK: recipient should not receive this notification"
return 1
}
@@ -1480,10 +1532,12 @@ send_slack() {
"fields": [
{
"title": "${chart}",
+ "value": "chart",
"short": true
},
{
"title": "${family}",
+ "value": "family",
"short": true
}
],
diff --git a/health/notifications/alerta/README.md b/health/notifications/alerta/README.md
index 9603aae01..5ecf55eea 100644
--- a/health/notifications/alerta/README.md
+++ b/health/notifications/alerta/README.md
@@ -1,7 +1,12 @@
# alerta.io
diff --git a/health/notifications/awssns/README.md b/health/notifications/awssns/README.md
index fc4a665e9..97768399e 100644
--- a/health/notifications/awssns/README.md
+++ b/health/notifications/awssns/README.md
@@ -1,7 +1,12 @@
# Amazon SNS
diff --git a/health/notifications/custom/README.md b/health/notifications/custom/README.md
index edc42623d..df8f88e40 100644
--- a/health/notifications/custom/README.md
+++ b/health/notifications/custom/README.md
@@ -1,6 +1,11 @@
# Custom
@@ -8,8 +13,8 @@ custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notificat
Netdata allows you to send custom notifications to any endpoint you choose.
To configure custom notifications, you will need to customize `health_alarm_notify.conf`. Open the file for editing
-using [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) from the [Netdata config
-directory](/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`.
+using [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) from the [Netdata config
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory), which is typically at `/etc/netdata`.
You can look at the other senders in `/usr/libexec/netdata/plugins.d/alarm-notify.sh` for examples of how to modify the `custom_sender()` function in `health_alarm_notify.conf`.
diff --git a/health/notifications/discord/README.md b/health/notifications/discord/README.md
index 568d03bc3..b4cbce533 100644
--- a/health/notifications/discord/README.md
+++ b/health/notifications/discord/README.md
@@ -1,9 +1,14 @@
-# Discordapp.com
+# Discord.com
This is what you will get:
@@ -11,7 +16,7 @@ This is what you will get:
You need:
-1. The **incoming webhook URL** as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
+1. The **incoming webhook URL** as given by Discord. Create a webhook by following the official [Discord documentation](https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks). You can use the same on all your Netdata servers (or you can have multiple if you like - your decision).
2. One or more Discord channels to post the messages to.
Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system run `/etc/netdata/edit-config health_alarm_notify.conf`), like this:
@@ -27,8 +32,8 @@ Set them in `/etc/netdata/health_alarm_notify.conf` (to edit it on your system r
SEND_DISCORD="YES"
# Create a webhook by following the official documentation -
-# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
-DISCORD_WEBHOOK_URL="https://discordapp.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+# https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks
+DISCORD_WEBHOOK_URL="https://discord.com/api/webhooks/XXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
# if a role's recipients are not configured, a notification will be send to
# this discord channel (empty = do not send a notification for unconfigured
@@ -45,6 +50,4 @@ role_recipients_discord[dba]="databases systems"
role_recipients_discord[webmaster]="marketing development"
```
-The keywords `systems`, `databases`, `marketing`, `development` are discordapp.com channels (they should already exist within your discord server).
-
-
+The keywords `systems`, `databases`, `marketing`, `development` are discord.com channels (they should already exist within your discord server).
diff --git a/health/notifications/dynatrace/README.md b/health/notifications/dynatrace/README.md
index 3f8ad85b6..a36683933 100644
--- a/health/notifications/dynatrace/README.md
+++ b/health/notifications/dynatrace/README.md
@@ -1,6 +1,11 @@
# Dynatrace
diff --git a/health/notifications/email/README.md b/health/notifications/email/README.md
index 3dc84dd40..01dfd0e6f 100644
--- a/health/notifications/email/README.md
+++ b/health/notifications/email/README.md
@@ -1,6 +1,11 @@
# Email
diff --git a/health/notifications/flock/README.md b/health/notifications/flock/README.md
index b9e0025b3..175f8a466 100644
--- a/health/notifications/flock/README.md
+++ b/health/notifications/flock/README.md
@@ -1,6 +1,11 @@
# Flock
diff --git a/health/notifications/gotify/README.md b/health/notifications/gotify/README.md
index c253c845c..d01502b65 100644
--- a/health/notifications/gotify/README.md
+++ b/health/notifications/gotify/README.md
@@ -3,6 +3,10 @@ title: "Send notifications to Gotify"
description: "Send alerts to your Gotify instance when an alert gets triggered in Netdata."
sidebar_label: "Gotify"
custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/gotify/README.md
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup/Notification/Agent"
+learn_autogeneration_metadata: "{'part_of_cloud': False, 'part_of_agent': True}"
-->
# Send notifications to Gotify
@@ -21,7 +25,7 @@ You can generate a new token in the Gotify Web UI.
To set up Gotify in Netdata:
1. Switch to your [config
-directory](/docs/configure/nodes.md) and edit the file `health_alarm_notify.conf` using the edit config script.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md) and edit the file `health_alarm_notify.conf` using the edit config script.
```bash
./edit-config health_alarm_notify.conf
diff --git a/health/notifications/hangouts/README.md b/health/notifications/hangouts/README.md
index 7554b39cd..45da1bfa0 100644
--- a/health/notifications/hangouts/README.md
+++ b/health/notifications/hangouts/README.md
@@ -2,7 +2,11 @@
title: "Send notifications to Google Hangouts"
description: "Send alerts to Send notifications to Google Hangouts any time an anomaly or performance issue strikes a node in your infrastructure."
sidebar_label: "Google Hangouts"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/hangouts/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/health/notifications/hangouts/README.md"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup/Notification/Agent"
+learn_autogeneration_metadata: "{'part_of_cloud': False, 'part_of_agent': True}"
-->
# Send notifications to Google Hangouts
diff --git a/health/notifications/health_alarm_notify.conf b/health/notifications/health_alarm_notify.conf
index 52de86645..4878661aa 100755
--- a/health/notifications/health_alarm_notify.conf
+++ b/health/notifications/health_alarm_notify.conf
@@ -9,7 +9,7 @@
# - messages to your slack team (slack.com),
# - messages to your alerta server (alerta.io),
# - messages to your flock team (flock.com),
-# - messages to your discord guild (discordapp.com),
+# - messages to your discord guild (discord.com),
# - messages to your telegram chat / group chat (telegram.org)
# - sms messages to your cell phone or any sms enabled device (twilio.com)
# - sms messages to your cell phone or any sms enabled device (messagebird.com)
@@ -160,7 +160,11 @@ sendsms=""
# - pagerduty.com (pd) services
# - irc channels
#
-# You can append |critical to limit the notifications to be sent.
+# You can append modifiers to limit the notifications to be sent:
+# |critical - Send critical notifications and following status changes until
+# the alarm is cleared.
+# |nowarn - Do not send warning notifications.
+# |noclear - Do not send clear notifications.
#
# In these examples, the first recipient receives all the alarms
# while the second one receives only notifications for alarms that
@@ -182,6 +186,11 @@ sendsms=""
# irc : " |critical"
# hangouts : "alarms disasters|critical"
#
+# You can append multiple modifiers. In this example, recipient receives
+# notifications for critical alarms and following status changes except clear
+# notifications.
+# email : "user1@example.com|critical|noclear"
+#
# If a recipient is set to empty string, the default recipient of the given
# notification method (email, pushover, telegram, slack, alerta, etc) will be used.
# To disable a notification, use the recipient called: disabled
@@ -579,7 +588,7 @@ DEFAULT_RECIPIENT_FLOCK=""
#------------------------------------------------------------------------------
-# discord (discordapp.com) global notification options
+# discord (discord.com) global notification options
# multiple recipients can be given like this:
# "CHANNEL1 CHANNEL2 ..."
@@ -588,7 +597,7 @@ DEFAULT_RECIPIENT_FLOCK=""
SEND_DISCORD="YES"
# Create a webhook by following the official documentation -
-# https://support.discordapp.com/hc/en-us/articles/228383668-Intro-to-Webhooks
+# https://support.discord.com/hc/en-us/articles/228383668-Intro-to-Webhooks
DISCORD_WEBHOOK_URL=""
# if a role's recipients are not configured, a notification will be send to
diff --git a/health/notifications/irc/README.md b/health/notifications/irc/README.md
index 21c998d11..a4877f48a 100644
--- a/health/notifications/irc/README.md
+++ b/health/notifications/irc/README.md
@@ -1,6 +1,11 @@
# IRC
diff --git a/health/notifications/kavenegar/README.md b/health/notifications/kavenegar/README.md
index 6123eb901..443fcdba4 100644
--- a/health/notifications/kavenegar/README.md
+++ b/health/notifications/kavenegar/README.md
@@ -1,6 +1,11 @@
# Kavenegar
diff --git a/health/notifications/matrix/README.md b/health/notifications/matrix/README.md
index 8eeecf55d..80e22da37 100644
--- a/health/notifications/matrix/README.md
+++ b/health/notifications/matrix/README.md
@@ -2,7 +2,11 @@
title: "Send Netdata notifications to Matrix network rooms"
description: "Stay aware of warning or critical anomalies by sending health alarms to Matrix network rooms with Netdata's health monitoring watchdog."
sidebar_label: "Matrix"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/matrix/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/health/notifications/matrix/README.md"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup/Notification/Agent"
+learn_autogeneration_metadata: "{'part_of_cloud': False, 'part_of_agent': True}"
-->
# Matrix
diff --git a/health/notifications/messagebird/README.md b/health/notifications/messagebird/README.md
index f70e86c68..014301985 100644
--- a/health/notifications/messagebird/README.md
+++ b/health/notifications/messagebird/README.md
@@ -1,6 +1,11 @@
# Messagebird
diff --git a/health/notifications/msteams/README.md b/health/notifications/msteams/README.md
index c9a13bac9..75e652a72 100644
--- a/health/notifications/msteams/README.md
+++ b/health/notifications/msteams/README.md
@@ -1,6 +1,11 @@
# Microsoft Teams
diff --git a/health/notifications/opsgenie/README.md b/health/notifications/opsgenie/README.md
index 640fcd42a..20f14b396 100644
--- a/health/notifications/opsgenie/README.md
+++ b/health/notifications/opsgenie/README.md
@@ -2,7 +2,11 @@
title: "Send notifications to Opsgenie"
description: "Send alerts to your Opsgenie incident response account any time an anomaly or performance issue strikes a node in your infrastructure."
sidebar_label: "Opsgenie"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/opsgenie/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/health/notifications/opsgenie/README.md"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup/Notification/Agent"
+learn_autogeneration_metadata: "{'part_of_cloud': False, 'part_of_agent': True}"
-->
# Send notifications to Opsgenie
@@ -13,9 +17,9 @@ incidents.
The first step is to create a [Netdata integration](https://docs.opsgenie.com/docs/api-integration) in the
[Opsgenie](https://www.atlassian.com/software/opsgenie) dashboard. After this, you need to edit
-`health_alarm_notify.conf` on your system, by running the following from your [config
-directory](/docs/configure/nodes.md):
-
+`health_alarm_notify.conf` on your system, by running the following from
+your [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md):
+
```bash
./edit-config health_alarm_notify.conf
```
@@ -56,7 +60,7 @@ message:
2020-09-03 23:07:00: alarm-notify.sh: ERROR: failed to send opsgenie notification for: hades test.chart.test_alarm is CRITICAL, with HTTP error code 401.
```
-You can find more details about the Opsgenie error codes in their [response
-docs](https://docs.opsgenie.com/docs/response).
+You can find more details about the Opsgenie error codes in
+their [response docs](https://docs.opsgenie.com/docs/response).
diff --git a/health/notifications/pagerduty/README.md b/health/notifications/pagerduty/README.md
index 30db6379c..c6190e83f 100644
--- a/health/notifications/pagerduty/README.md
+++ b/health/notifications/pagerduty/README.md
@@ -2,7 +2,11 @@
title: "Send alert notifications to PagerDuty"
description: "Send alerts to your PagerDuty dashboard any time an anomaly or performance issue strikes a node in your infrastructure."
sidebar_label: "PagerDuty"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/pagerduty/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/health/notifications/pagerduty/README.md"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup/Notification/Agent"
+learn_autogeneration_metadata: "{'part_of_cloud': False, 'part_of_agent': True}"
-->
# Send alert notifications to PagerDuty
@@ -14,7 +18,7 @@ resolution times.
## What you need to get started
-- An installation of the open-source [Netdata](/docs/get-started.mdx) monitoring agent.
+- An installation of the open-source [Netdata](https://github.com/netdata/netdata/blob/master/docs/get-started.mdx) monitoring agent.
- An installation of the [PagerDuty agent](https://www.pagerduty.com/docs/guides/agent-install-guide/) on the node
running Netdata.
- A PagerDuty `Generic API` service using either the `Events API v2` or `Events API v1`.
@@ -25,8 +29,8 @@ resolution times.
to PagerDuty. Click **Use our API directly** and select either `Events API v2` or `Events API v1`. Once you finish
creating the service, click on the **Integrations** tab to find your **Integration Key**.
-Navigate to the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory) and use
-[`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) to open
+Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) and use
+[`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) to open
`health_alarm_notify.conf`.
```bash
@@ -59,5 +63,5 @@ sudo su -s /bin/bash netdata
Aside from the three values set in `health_alarm_notify.conf`, there is no further configuration required to send alert
notifications to PagerDuty.
-To configure individual alarms, read our [alert configuration](/docs/monitor/configure-alarms.md) doc or
-the [health entity reference](/health/REFERENCE.md) doc.
+To configure individual alarms, read our [alert configuration](https://github.com/netdata/netdata/blob/master/docs/monitor/configure-alarms.md) doc or
+the [health entity reference](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md) doc.
diff --git a/health/notifications/prowl/README.md b/health/notifications/prowl/README.md
index dc136820c..8656c1314 100644
--- a/health/notifications/prowl/README.md
+++ b/health/notifications/prowl/README.md
@@ -1,6 +1,11 @@
# Prowl
diff --git a/health/notifications/pushbullet/README.md b/health/notifications/pushbullet/README.md
index 194050bc1..17ed93646 100644
--- a/health/notifications/pushbullet/README.md
+++ b/health/notifications/pushbullet/README.md
@@ -1,6 +1,11 @@
# PushBullet
diff --git a/health/notifications/pushover/README.md b/health/notifications/pushover/README.md
index 1e50f7140..4d5ea5a96 100644
--- a/health/notifications/pushover/README.md
+++ b/health/notifications/pushover/README.md
@@ -1,6 +1,11 @@
# PushOver
diff --git a/health/notifications/rocketchat/README.md b/health/notifications/rocketchat/README.md
index 96d6160b2..0f7867d0f 100644
--- a/health/notifications/rocketchat/README.md
+++ b/health/notifications/rocketchat/README.md
@@ -1,6 +1,11 @@
# Rocket.Chat
diff --git a/health/notifications/slack/README.md b/health/notifications/slack/README.md
index ad36ce34a..ad9a21346 100644
--- a/health/notifications/slack/README.md
+++ b/health/notifications/slack/README.md
@@ -1,6 +1,11 @@
# Slack
diff --git a/health/notifications/smstools3/README.md b/health/notifications/smstools3/README.md
index 6618dfa18..9535c9549 100644
--- a/health/notifications/smstools3/README.md
+++ b/health/notifications/smstools3/README.md
@@ -1,6 +1,11 @@
# SMS Server Tools 3
diff --git a/health/notifications/stackpulse/README.md b/health/notifications/stackpulse/README.md
index c478fd584..25266e822 100644
--- a/health/notifications/stackpulse/README.md
+++ b/health/notifications/stackpulse/README.md
@@ -2,7 +2,11 @@
title: "Send notifications to StackPulse"
description: "Send alerts to your StackPulse Netdata integration any time an anomaly or performance issue strikes a node in your infrastructure."
sidebar_label: "StackPulse"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/health/notifications/stackpulse/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/health/notifications/stackpulse/README.md"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Setup/Notification/Agent"
+learn_autogeneration_metadata: "{'part_of_cloud': False, 'part_of_agent': True}"
-->
# Send notifications to StackPulse
@@ -40,7 +44,7 @@ STACKPULSE_WEBHOOK="https://hooks.stackpulse.io/v1/webhooks/YOUR_UNIQUE_ID"
```
4. Now restart Netdata using `sudo systemctl restart netdata`, or the [appropriate
- method](/docs/configure/start-stop-restart.md) for your system. When your node creates an alarm, you can see the
+ method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system. When your node creates an alarm, you can see the
associated notification on your StackPulse Administration Portal
## React to alarms with playbooks
diff --git a/health/notifications/syslog/README.md b/health/notifications/syslog/README.md
index 8b7863a1a..3527decc4 100644
--- a/health/notifications/syslog/README.md
+++ b/health/notifications/syslog/README.md
@@ -1,6 +1,11 @@
# Syslog
diff --git a/health/notifications/telegram/README.md b/health/notifications/telegram/README.md
index 2a2ed5623..f80a2838d 100644
--- a/health/notifications/telegram/README.md
+++ b/health/notifications/telegram/README.md
@@ -1,6 +1,11 @@
# Telegram
diff --git a/health/notifications/twilio/README.md b/health/notifications/twilio/README.md
index b563c66c1..470b2413b 100644
--- a/health/notifications/twilio/README.md
+++ b/health/notifications/twilio/README.md
@@ -1,6 +1,11 @@
# Twilio
diff --git a/health/notifications/web/README.md b/health/notifications/web/README.md
index 185843af5..b4afd9ea7 100644
--- a/health/notifications/web/README.md
+++ b/health/notifications/web/README.md
@@ -1,9 +1,14 @@
-# Dashboard
+# Pop up notifications
The Netdata dashboard shows HTML notifications, when it is open.
diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am
index 1208d16c2..b81d620ba 100644
--- a/libnetdata/Makefile.am
+++ b/libnetdata/Makefile.am
@@ -5,7 +5,7 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
SUBDIRS = \
adaptive_resortable_list \
- arrayalloc \
+ aral \
avl \
buffer \
clocks \
@@ -15,6 +15,7 @@ SUBDIRS = \
ebpf \
eval \
json \
+ july \
health \
locks \
log \
diff --git a/libnetdata/README.md b/libnetdata/README.md
index fe0690d68..1424faf6c 100644
--- a/libnetdata/README.md
+++ b/libnetdata/README.md
@@ -1,6 +1,10 @@
# libnetdata
diff --git a/libnetdata/adaptive_resortable_list/README.md b/libnetdata/adaptive_resortable_list/README.md
index 9eb942bc8..957578487 100644
--- a/libnetdata/adaptive_resortable_list/README.md
+++ b/libnetdata/adaptive_resortable_list/README.md
@@ -1,6 +1,10 @@
# Adaptive Re-sortable List (ARL)
diff --git a/libnetdata/aral/Makefile.am b/libnetdata/aral/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/libnetdata/aral/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/aral/README.md b/libnetdata/aral/README.md
new file mode 100644
index 000000000..3b0f5bbd6
--- /dev/null
+++ b/libnetdata/aral/README.md
@@ -0,0 +1,173 @@
+
+
+# Array Allocator
+
+Come on! Array allocators are embedded in libc! Why do we need such a thing in Netdata?
+
+Well, we have a couple of problems to solve:
+
+1. **Fragmentation** - It is important for Netdata to keeps its overall memory footprint as low as possible. libc does an amazing job when the same thread allocates and frees some memory. But it simply cannot do better without knowing the specifics of the application when memory is allocated and freed randomly between threads.
+2. **Speed** - Especially when allocations and de-allocations happen across threads, the speed penalty is tremendous.
+
+In Netdata we have a few moments that are very tough. Imagine collecting 1 million metrics per second. You have a buffer for each metric and put append new points there. This works beautifully, of course! But then, when the buffers get full, imagine the situation. You suddenly need 1 million buffers, at once!
+
+To solve this problem we first spread out the buffers. So, the first time each metric asks for a buffer, it gets a smaller one. We added logic there to spread them as evenly as possible across time. Solved? Not exactly!
+
+We have 3 tiers for each metric. For the metrics of tier 0 (per second resolution) we have a max buffer for 1024 points and every new metrics gets a random size between 3 points and 1024. So they are distributed across time. For 1 million metrics, we have about 1000 buffers beings created every second.
+
+But at some point, the end of the minute will come, and suddenly all the metrics will need a new buffer for tier 1 (per minute). Oops! We will spread tier 1 buffers across time too, but the first minute is a tough one. We really need 1 million buffers instantly.
+
+And if that minute happens to also be the beginning of an hour... tier 2 (per hour) kicks in. For that instant we are going to need 2 million buffers instantly.
+
+The problem becomes even bigger when we collect 2, or even 10 million metrics...
+
+So solve it, Netdata uses a special implementation of an array allocator that is tightly integrated with the structures we need.
+
+## Features
+
+1. Malloc, or MMAP modes. File based MMAP is also supported to put the data in file backed up shared memory.
+2. Fully asynchronous operations. There are just a couple of points where spin-locks protect a few counters and pointers.
+3. Optional defragmenter, that once enabled it will make free operation slower while trying to maintain a sorted list of fragments to offer first during allocations. The defragmenter can be enabled / disabled at run time. The defragmenter can hurt performance on application with intense turn-around of allocation, like Netdata dbengine caches. So, it is disabled by default.
+4. Without the defragmenter enabled, ARAL still tries to keep pages full, but the depth of the search is limited to 3 pages (so, a page with a free slot will either become 1st, 2nd, or 3rd). At the same time, during allocations, ARAL will evaluate the first 2 pages to find the one that is more full than the other, to use it for the new allocation.
+
+## How it works
+
+Allocations are organized in pages. Pages have a minimum size (a system page, usually 4KB) and a maximum defined by for each different kind of object.
+
+Initially every page is free. When an allocation request is made, the free space is split, and the first element is reserved. Free space is now considered there rest.
+
+This continuous until the page gets full, where a new page is allocated and the process is repeated.
+
+Each allocation returned has a pointer appended to it. The pointer points to the page the allocation belongs to.
+
+When a pointer is freed, the page it belongs is identified, its space is marked free, and it is prepended in a single linked list that resides in the page itself. So, each page has its own list of free slots to use.
+
+Pages are then on another linked list. This is a double linked list and at its beginning has the pages with free space and at the end the pages that are full.
+
+When the defragmenter is enabled the pages double linked list is also sorted, like this: the fewer the free slots on a page, the earlier in the linked list the page will be, except if it does not have any free slot, in which case it will be at the end. So, the defragmenter tries to have pages full.
+
+When a page is entirerly free, it is given back to the system immediately. There is no caching of free pages.
+
+
+Parallelism is achieved like this:
+
+When some threads are waiting for a page to be allocated, free operations are allowed. If a free operation happens before a new page is allocated, any waiting thread will get the slot that is freed on another page.
+
+Free operations happen in parallel, even for the same page. There is a spin-lock on each page to protect the base pointer of the page's free slots single linked list. But, this is instant. All preparative work happens lockless, then to add the free slot to the page, the page spinlock is acquired, the free slot is prepended to the linked list on the page, the spinlock is released. Such free operations on different pages are totally parallel.
+
+Once the free operation on a page has finished, the pages double linked list spinlock is acquired to put the page first on that linked list. If the defragmenter is enabled, the spinlock is retained for a little longer, to find the exact position of the page in the linked list.
+
+During allocations, the reverse order is used. First get the pages double linked list spinlock, get the first page and decrement its free slots counter, then release the spinlock. If the first page does not have any free slots, a page allocation is spawn, without any locks acquired. All threads are spinning waiting for a page with free slots, either from the newly allocated one or from a free operation that may happen in parallel.
+
+Once a page is acquired, each thread locks its own page to get the first free slot and releases the lock immediately. This is guaranteed to succeed, because when the page was given to that thread its free slots counter was decremented. So, there is a free slot for every thread that got that page. All preparative work to return a pointer to the caller is done lock free. Allocations on different pages are done in parallel, without any intervention between them.
+
+
+## What to expect
+
+Systems not designed for parallelism achieve their top performance single threaded. The single threaded speed is the baseline. Adding more threads makes them slower.
+
+The baseline for ARAL is the following, the included stress test when running single threaded:
+
+```
+Running stress test of 1 threads, with 10000 elements each, for 5 seconds...
+2023-01-29 17:04:50: netdata INFO : TH[0] : set name of thread 1314983 to TH[0]
+ARAL executes 12.27 M malloc and 12.26 M free operations/s
+ARAL executes 12.29 M malloc and 12.29 M free operations/s
+ARAL executes 12.30 M malloc and 12.30 M free operations/s
+ARAL executes 12.30 M malloc and 12.29 M free operations/s
+ARAL executes 12.29 M malloc and 12.29 M free operations/s
+Waiting the threads to finish...
+2023-01-29 17:04:55: netdata INFO : MAIN : ARAL: did 61487356 malloc, 61487356 free, using 1 threads, in 5003808 usecs
+```
+
+The same test with 2 threads, both threads on the same ARAL of course. As you see performance improved:
+
+```
+Running stress test of 2 threads, with 10000 elements each, for 5 seconds...
+2023-01-29 17:05:25: netdata INFO : TH[0] : set name of thread 1315537 to TH[0]
+2023-01-29 17:05:25: netdata INFO : TH[1] : set name of thread 1315538 to TH[1]
+ARAL executes 17.75 M malloc and 17.73 M free operations/s
+ARAL executes 17.93 M malloc and 17.93 M free operations/s
+ARAL executes 18.17 M malloc and 18.18 M free operations/s
+ARAL executes 18.33 M malloc and 18.32 M free operations/s
+ARAL executes 18.36 M malloc and 18.36 M free operations/s
+Waiting the threads to finish...
+2023-01-29 17:05:30: netdata INFO : MAIN : ARAL: did 90976190 malloc, 90976190 free, using 2 threads, in 5029462 usecs
+```
+
+The same test with 4 threads:
+
+```
+Running stress test of 4 threads, with 10000 elements each, for 5 seconds...
+2023-01-29 17:10:12: netdata INFO : TH[0] : set name of thread 1319552 to TH[0]
+2023-01-29 17:10:12: netdata INFO : TH[1] : set name of thread 1319553 to TH[1]
+2023-01-29 17:10:12: netdata INFO : TH[2] : set name of thread 1319554 to TH[2]
+2023-01-29 17:10:12: netdata INFO : TH[3] : set name of thread 1319555 to TH[3]
+ARAL executes 19.95 M malloc and 19.91 M free operations/s
+ARAL executes 20.08 M malloc and 20.08 M free operations/s
+ARAL executes 20.85 M malloc and 20.85 M free operations/s
+ARAL executes 20.84 M malloc and 20.84 M free operations/s
+ARAL executes 21.37 M malloc and 21.37 M free operations/s
+Waiting the threads to finish...
+2023-01-29 17:10:17: netdata INFO : MAIN : ARAL: did 103549747 malloc, 103549747 free, using 4 threads, in 5023325 usecs
+```
+
+The same with 8 threads:
+
+```
+Running stress test of 8 threads, with 10000 elements each, for 5 seconds...
+2023-01-29 17:07:06: netdata INFO : TH[0] : set name of thread 1317608 to TH[0]
+2023-01-29 17:07:06: netdata INFO : TH[1] : set name of thread 1317609 to TH[1]
+2023-01-29 17:07:06: netdata INFO : TH[2] : set name of thread 1317610 to TH[2]
+2023-01-29 17:07:06: netdata INFO : TH[3] : set name of thread 1317611 to TH[3]
+2023-01-29 17:07:06: netdata INFO : TH[4] : set name of thread 1317612 to TH[4]
+2023-01-29 17:07:06: netdata INFO : TH[5] : set name of thread 1317613 to TH[5]
+2023-01-29 17:07:06: netdata INFO : TH[6] : set name of thread 1317614 to TH[6]
+2023-01-29 17:07:06: netdata INFO : TH[7] : set name of thread 1317615 to TH[7]
+ARAL executes 15.73 M malloc and 15.66 M free operations/s
+ARAL executes 13.95 M malloc and 13.94 M free operations/s
+ARAL executes 15.59 M malloc and 15.58 M free operations/s
+ARAL executes 15.49 M malloc and 15.49 M free operations/s
+ARAL executes 16.16 M malloc and 16.16 M free operations/s
+Waiting the threads to finish...
+2023-01-29 17:07:11: netdata INFO : MAIN : ARAL: did 78427750 malloc, 78427750 free, using 8 threads, in 5088591 usecs
+```
+
+The same with 16 threads:
+
+```
+Running stress test of 16 threads, with 10000 elements each, for 5 seconds...
+2023-01-29 17:08:04: netdata INFO : TH[0] : set name of thread 1318663 to TH[0]
+2023-01-29 17:08:04: netdata INFO : TH[1] : set name of thread 1318664 to TH[1]
+2023-01-29 17:08:04: netdata INFO : TH[2] : set name of thread 1318665 to TH[2]
+2023-01-29 17:08:04: netdata INFO : TH[3] : set name of thread 1318666 to TH[3]
+2023-01-29 17:08:04: netdata INFO : TH[4] : set name of thread 1318667 to TH[4]
+2023-01-29 17:08:04: netdata INFO : TH[5] : set name of thread 1318668 to TH[5]
+2023-01-29 17:08:04: netdata INFO : TH[6] : set name of thread 1318669 to TH[6]
+2023-01-29 17:08:04: netdata INFO : TH[7] : set name of thread 1318670 to TH[7]
+2023-01-29 17:08:04: netdata INFO : TH[8] : set name of thread 1318671 to TH[8]
+2023-01-29 17:08:04: netdata INFO : TH[9] : set name of thread 1318672 to TH[9]
+2023-01-29 17:08:04: netdata INFO : TH[10] : set name of thread 1318673 to TH[10]
+2023-01-29 17:08:04: netdata INFO : TH[11] : set name of thread 1318674 to TH[11]
+2023-01-29 17:08:04: netdata INFO : TH[12] : set name of thread 1318675 to TH[12]
+2023-01-29 17:08:04: netdata INFO : TH[13] : set name of thread 1318676 to TH[13]
+2023-01-29 17:08:04: netdata INFO : TH[14] : set name of thread 1318677 to TH[14]
+2023-01-29 17:08:04: netdata INFO : TH[15] : set name of thread 1318678 to TH[15]
+ARAL executes 11.77 M malloc and 11.62 M free operations/s
+ARAL executes 12.80 M malloc and 12.81 M free operations/s
+ARAL executes 13.26 M malloc and 13.25 M free operations/s
+ARAL executes 13.30 M malloc and 13.29 M free operations/s
+ARAL executes 13.23 M malloc and 13.25 M free operations/s
+Waiting the threads to finish...
+2023-01-29 17:08:09: netdata INFO : MAIN : ARAL: did 65302122 malloc, 65302122 free, using 16 threads, in 5066009 usecs
+```
+
+As you can see, the top performance is with 4 threads, almost double the single thread speed.
+16 threads performance is still better than single threaded, despite the intense concurrency.
diff --git a/libnetdata/aral/aral.c b/libnetdata/aral/aral.c
new file mode 100644
index 000000000..4505ee0f2
--- /dev/null
+++ b/libnetdata/aral/aral.c
@@ -0,0 +1,1081 @@
+#include "../libnetdata.h"
+#include "aral.h"
+
+#ifdef NETDATA_TRACE_ALLOCATIONS
+#define TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS , const char *file, const char *function, size_t line
+#define TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS , file, function, line
+#else
+#define TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS
+#define TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS
+#endif
+
+#define ARAL_FREE_PAGES_DELTA_TO_REARRANGE_LIST 5
+
+// max file size
+#define ARAL_MAX_PAGE_SIZE_MMAP (1*1024*1024*1024)
+
+// max malloc size
+// optimal at current versions of libc is up to 256k
+// ideal to have the same overhead as libc is 4k
+#define ARAL_MAX_PAGE_SIZE_MALLOC (65*1024)
+
+typedef struct aral_free {
+ size_t size;
+ struct aral_free *next;
+} ARAL_FREE;
+
+typedef struct aral_page {
+ size_t size; // the allocation size of the page
+ const char *filename;
+ uint8_t *data;
+
+ uint32_t free_elements_to_move_first;
+ uint32_t max_elements; // the number of elements that can fit on this page
+
+ struct {
+ uint32_t used_elements; // the number of used elements on this page
+ uint32_t free_elements; // the number of free elements on this page
+
+ struct aral_page *prev; // the prev page on the list
+ struct aral_page *next; // the next page on the list
+ } aral_lock;
+
+ struct {
+ SPINLOCK spinlock;
+ ARAL_FREE *list;
+ } free;
+
+} ARAL_PAGE;
+
+typedef enum {
+ ARAL_LOCKLESS = (1 << 0),
+ ARAL_DEFRAGMENT = (1 << 1),
+ ARAL_ALLOCATED_STATS = (1 << 2),
+} ARAL_OPTIONS;
+
+struct aral {
+ struct {
+ char name[ARAL_MAX_NAME + 1];
+
+ ARAL_OPTIONS options;
+
+ size_t element_size; // calculated to take into account ARAL overheads
+ size_t max_allocation_size; // calculated in bytes
+ size_t max_page_elements; // calculated
+ size_t page_ptr_offset; // calculated
+ size_t natural_page_size; // calculated
+
+ size_t initial_page_elements;
+ size_t requested_element_size;
+ size_t requested_max_page_size;
+
+ struct {
+ bool enabled;
+ const char *filename;
+ char **cache_dir;
+ } mmap;
+ } config;
+
+ struct {
+ SPINLOCK spinlock;
+ size_t file_number; // for mmap
+ struct aral_page *pages; // linked list of pages
+
+ size_t user_malloc_operations;
+ size_t user_free_operations;
+ size_t defragment_operations;
+ size_t defragment_linked_list_traversals;
+ } aral_lock;
+
+ struct {
+ SPINLOCK spinlock;
+ size_t allocating_elements; // currently allocating elements
+ size_t allocation_size; // current / next allocation size
+ } adders;
+
+ struct {
+ size_t allocators; // the number of threads currently trying to allocate memory
+ } atomic;
+
+ struct aral_statistics *stats;
+};
+
+size_t aral_structures_from_stats(struct aral_statistics *stats) {
+ return __atomic_load_n(&stats->structures.allocated_bytes, __ATOMIC_RELAXED);
+}
+
+size_t aral_overhead_from_stats(struct aral_statistics *stats) {
+ return __atomic_load_n(&stats->malloc.allocated_bytes, __ATOMIC_RELAXED) -
+ __atomic_load_n(&stats->malloc.used_bytes, __ATOMIC_RELAXED);
+}
+
+size_t aral_overhead(ARAL *ar) {
+ return aral_overhead_from_stats(ar->stats);
+}
+
+size_t aral_structures(ARAL *ar) {
+ return aral_structures_from_stats(ar->stats);
+}
+
+struct aral_statistics *aral_statistics(ARAL *ar) {
+ return ar->stats;
+}
+
+#define ARAL_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
+static inline size_t natural_alignment(size_t size, size_t alignment) {
+ if(unlikely(size % alignment))
+ size = size + alignment - (size % alignment);
+
+ return size;
+}
+
+static size_t aral_align_alloc_size(ARAL *ar, uint64_t size) {
+ if(size % ar->config.natural_page_size)
+ size += ar->config.natural_page_size - (size % ar->config.natural_page_size) ;
+
+ if(size % ar->config.element_size)
+ size -= size % ar->config.element_size;
+
+ return size;
+}
+
+static inline void aral_lock(ARAL *ar) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ netdata_spinlock_lock(&ar->aral_lock.spinlock);
+}
+
+static inline void aral_unlock(ARAL *ar) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ netdata_spinlock_unlock(&ar->aral_lock.spinlock);
+}
+
+static inline void aral_page_free_lock(ARAL *ar, ARAL_PAGE *page) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ netdata_spinlock_lock(&page->free.spinlock);
+}
+
+static inline void aral_page_free_unlock(ARAL *ar, ARAL_PAGE *page) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ netdata_spinlock_unlock(&page->free.spinlock);
+}
+
+static inline bool aral_adders_trylock(ARAL *ar) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ return netdata_spinlock_trylock(&ar->adders.spinlock);
+
+ return true;
+}
+
+static inline void aral_adders_lock(ARAL *ar) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ netdata_spinlock_lock(&ar->adders.spinlock);
+}
+
+static inline void aral_adders_unlock(ARAL *ar) {
+ if(likely(!(ar->config.options & ARAL_LOCKLESS)))
+ netdata_spinlock_unlock(&ar->adders.spinlock);
+}
+
+static void aral_delete_leftover_files(const char *name, const char *path, const char *required_prefix) {
+ DIR *dir = opendir(path);
+ if(!dir) return;
+
+ char full_path[FILENAME_MAX + 1];
+ size_t len = strlen(required_prefix);
+
+ struct dirent *de = NULL;
+ while((de = readdir(dir))) {
+ if(de->d_type == DT_DIR)
+ continue;
+
+ if(strncmp(de->d_name, required_prefix, len) != 0)
+ continue;
+
+ snprintfz(full_path, FILENAME_MAX, "%s/%s", path, de->d_name);
+ info("ARAL: '%s' removing left-over file '%s'", name, full_path);
+ if(unlikely(unlink(full_path) == -1))
+ error("ARAL: '%s' cannot delete file '%s'", name, full_path);
+ }
+
+ closedir(dir);
+}
+
+// ----------------------------------------------------------------------------
+// check a free slot
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static inline void aral_free_validate_internal_check(ARAL *ar, ARAL_FREE *fr) {
+ if(unlikely(fr->size < ar->config.element_size))
+ fatal("ARAL: '%s' free item of size %zu, less than the expected element size %zu",
+ ar->config.name, fr->size, ar->config.element_size);
+
+ if(unlikely(fr->size % ar->config.element_size))
+ fatal("ARAL: '%s' free item of size %zu is not multiple to element size %zu",
+ ar->config.name, fr->size, ar->config.element_size);
+}
+#else
+#define aral_free_validate_internal_check(ar, fr) debug_dummy()
+#endif
+
+// ----------------------------------------------------------------------------
+// find the page a pointer belongs to
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static inline ARAL_PAGE *find_page_with_allocation_internal_check(ARAL *ar, void *ptr) {
+ aral_lock(ar);
+
+ uintptr_t seeking = (uintptr_t)ptr;
+ ARAL_PAGE *page;
+
+ for(page = ar->aral_lock.pages; page ; page = page->aral_lock.next) {
+ if(unlikely(seeking >= (uintptr_t)page->data && seeking < (uintptr_t)page->data + page->size))
+ break;
+ }
+
+ aral_unlock(ar);
+
+ return page;
+}
+#endif
+
+// ----------------------------------------------------------------------------
+// find a page with a free slot (there shouldn't be any)
+
+#ifdef NETDATA_ARAL_INTERNAL_CHECKS
+static inline ARAL_PAGE *find_page_with_free_slots_internal_check___with_aral_lock(ARAL *ar) {
+ ARAL_PAGE *page;
+
+ for(page = ar->aral_lock.pages; page ; page = page->next) {
+ if(page->aral_lock.free_elements)
+ break;
+
+ internal_fatal(page->size - page->aral_lock.used_elements * ar->config.element_size >= ar->config.element_size,
+ "ARAL: '%s' a page is marked full, but it is not!", ar->config.name);
+
+ internal_fatal(page->size < page->aral_lock.used_elements * ar->config.element_size,
+ "ARAL: '%s' a page has been overflown!", ar->config.name);
+ }
+
+ return page;
+}
+#endif
+
+size_t aral_next_allocation_size___adders_lock_needed(ARAL *ar) {
+ size_t size = ar->adders.allocation_size;
+
+ if(size > ar->config.max_allocation_size)
+ size = ar->config.max_allocation_size;
+ else
+ ar->adders.allocation_size = aral_align_alloc_size(ar, (uint64_t)ar->adders.allocation_size * 2);
+
+ return size;
+}
+
+static ARAL_PAGE *aral_create_page___no_lock_needed(ARAL *ar, size_t size TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
+ ARAL_PAGE *page = callocz(1, sizeof(ARAL_PAGE));
+ netdata_spinlock_init(&page->free.spinlock);
+ page->size = size;
+ page->max_elements = page->size / ar->config.element_size;
+ page->aral_lock.free_elements = page->max_elements;
+ page->free_elements_to_move_first = page->max_elements / 4;
+ if(unlikely(page->free_elements_to_move_first < 1))
+ page->free_elements_to_move_first = 1;
+
+ __atomic_add_fetch(&ar->stats->structures.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&ar->stats->structures.allocated_bytes, sizeof(ARAL_PAGE), __ATOMIC_RELAXED);
+
+ if(unlikely(ar->config.mmap.enabled)) {
+ ar->aral_lock.file_number++;
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/array_alloc.mmap/%s.%zu", *ar->config.mmap.cache_dir, ar->config.mmap.filename, ar->aral_lock.file_number);
+ page->filename = strdupz(filename);
+ page->data = netdata_mmap(page->filename, page->size, MAP_SHARED, 0, false, NULL);
+ if (unlikely(!page->data))
+ fatal("ARAL: '%s' cannot allocate aral buffer of size %zu on filename '%s'",
+ ar->config.name, page->size, page->filename);
+ __atomic_add_fetch(&ar->stats->mmap.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&ar->stats->mmap.allocated_bytes, page->size, __ATOMIC_RELAXED);
+ }
+ else {
+#ifdef NETDATA_TRACE_ALLOCATIONS
+ page->data = mallocz_int(page->size TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS);
+#else
+ page->data = mallocz(page->size);
+#endif
+ __atomic_add_fetch(&ar->stats->malloc.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&ar->stats->malloc.allocated_bytes, page->size, __ATOMIC_RELAXED);
+ }
+
+ // link the free space to its page
+ ARAL_FREE *fr = (ARAL_FREE *)page->data;
+ fr->size = page->size;
+ fr->next = NULL;
+ page->free.list = fr;
+
+ aral_free_validate_internal_check(ar, fr);
+
+ return page;
+}
+
+void aral_del_page___no_lock_needed(ARAL *ar, ARAL_PAGE *page TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
+
+ // free it
+ if (ar->config.mmap.enabled) {
+ netdata_munmap(page->data, page->size);
+
+ if (unlikely(unlink(page->filename) == 1))
+ error("Cannot delete file '%s'", page->filename);
+
+ freez((void *)page->filename);
+
+ __atomic_sub_fetch(&ar->stats->mmap.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_sub_fetch(&ar->stats->mmap.allocated_bytes, page->size, __ATOMIC_RELAXED);
+ }
+ else {
+#ifdef NETDATA_TRACE_ALLOCATIONS
+ freez_int(page->data TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS);
+#else
+ freez(page->data);
+#endif
+ __atomic_sub_fetch(&ar->stats->malloc.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_sub_fetch(&ar->stats->malloc.allocated_bytes, page->size, __ATOMIC_RELAXED);
+ }
+
+ freez(page);
+
+ __atomic_sub_fetch(&ar->stats->structures.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_sub_fetch(&ar->stats->structures.allocated_bytes, sizeof(ARAL_PAGE), __ATOMIC_RELAXED);
+}
+
+static inline void aral_insert_not_linked_page_with_free_items_to_proper_position___aral_lock_needed(ARAL *ar, ARAL_PAGE *page) {
+ ARAL_PAGE *first = ar->aral_lock.pages;
+
+ if (page->aral_lock.free_elements <= page->free_elements_to_move_first ||
+ !first ||
+ !first->aral_lock.free_elements ||
+ page->aral_lock.free_elements <= first->aral_lock.free_elements + ARAL_FREE_PAGES_DELTA_TO_REARRANGE_LIST) {
+ // first position
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ }
+ else {
+ ARAL_PAGE *second = first->aral_lock.next;
+
+ if (!second ||
+ !second->aral_lock.free_elements ||
+ page->aral_lock.free_elements <= second->aral_lock.free_elements)
+ // second position
+ DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(ar->aral_lock.pages, first, page, aral_lock.prev, aral_lock.next);
+ else
+ // third position
+ DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(ar->aral_lock.pages, second, page, aral_lock.prev, aral_lock.next);
+ }
+}
+
+static inline ARAL_PAGE *aral_acquire_a_free_slot(ARAL *ar TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
+ __atomic_add_fetch(&ar->atomic.allocators, 1, __ATOMIC_RELAXED);
+ aral_lock(ar);
+
+ ARAL_PAGE *page = ar->aral_lock.pages;
+
+ while(!page || !page->aral_lock.free_elements) {
+#ifdef NETDATA_ARAL_INTERNAL_CHECKS
+ internal_fatal(find_page_with_free_slots_internal_check___with_aral_lock(ar), "ARAL: '%s' found page with free slot!", ar->config.name);
+#endif
+ aral_unlock(ar);
+
+ if(aral_adders_trylock(ar)) {
+ if(ar->adders.allocating_elements < __atomic_load_n(&ar->atomic.allocators, __ATOMIC_RELAXED)) {
+
+ size_t size = aral_next_allocation_size___adders_lock_needed(ar);
+ ar->adders.allocating_elements += size / ar->config.element_size;
+ aral_adders_unlock(ar);
+
+ page = aral_create_page___no_lock_needed(ar, size TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS);
+
+ aral_lock(ar);
+ aral_insert_not_linked_page_with_free_items_to_proper_position___aral_lock_needed(ar, page);
+
+ aral_adders_lock(ar);
+ ar->adders.allocating_elements -= size / ar->config.element_size;
+ aral_adders_unlock(ar);
+
+ // we have a page that is all empty
+ // and only aral_lock() is held, so
+ // break the loop
+ break;
+ }
+
+ aral_adders_unlock(ar);
+ }
+
+ aral_lock(ar);
+ page = ar->aral_lock.pages;
+ }
+
+ __atomic_sub_fetch(&ar->atomic.allocators, 1, __ATOMIC_RELAXED);
+
+ // we have a page
+ // and aral locked
+
+ {
+ ARAL_PAGE *first = ar->aral_lock.pages;
+ ARAL_PAGE *second = first->aral_lock.next;
+
+ if (!second ||
+ !second->aral_lock.free_elements ||
+ first->aral_lock.free_elements <= second->aral_lock.free_elements + ARAL_FREE_PAGES_DELTA_TO_REARRANGE_LIST)
+ page = first;
+ else {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, second, aral_lock.prev, aral_lock.next);
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(ar->aral_lock.pages, second, aral_lock.prev, aral_lock.next);
+ page = second;
+ }
+ }
+
+ internal_fatal(!page || !page->aral_lock.free_elements,
+ "ARAL: '%s' selected page does not have a free slot in it",
+ ar->config.name);
+
+ internal_fatal(page->max_elements != page->aral_lock.used_elements + page->aral_lock.free_elements,
+ "ARAL: '%s' page element counters do not match, "
+ "page says it can handle %zu elements, "
+ "but there are %zu used and %zu free items, "
+ "total %zu items",
+ ar->config.name,
+ (size_t)page->max_elements,
+ (size_t)page->aral_lock.used_elements, (size_t)page->aral_lock.free_elements,
+ (size_t)page->aral_lock.used_elements + (size_t)page->aral_lock.free_elements
+ );
+
+ ar->aral_lock.user_malloc_operations++;
+
+ // acquire a slot for the caller
+ page->aral_lock.used_elements++;
+ if(--page->aral_lock.free_elements == 0) {
+ // we are done with this page
+ // move the full page last
+ // so that pages with free items remain first in the list
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ }
+
+ aral_unlock(ar);
+
+ return page;
+}
+
+void *aral_mallocz_internal(ARAL *ar TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
+
+ ARAL_PAGE *page = aral_acquire_a_free_slot(ar TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS);
+
+ aral_page_free_lock(ar, page);
+
+ internal_fatal(!page->free.list,
+ "ARAL: '%s' free item to use, cannot be NULL.", ar->config.name);
+
+ internal_fatal(page->free.list->size < ar->config.element_size,
+ "ARAL: '%s' free item size %zu, cannot be smaller than %zu",
+ ar->config.name, page->free.list->size, ar->config.element_size);
+
+ ARAL_FREE *found_fr = page->free.list;
+
+ // check if the remaining size (after we use this slot) is not enough for another element
+ if(unlikely(found_fr->size - ar->config.element_size < ar->config.element_size)) {
+ // we can use the entire free space entry
+
+ page->free.list = found_fr->next;
+ }
+ else {
+ // we can split the free space entry
+
+ uint8_t *data = (uint8_t *)found_fr;
+ ARAL_FREE *fr = (ARAL_FREE *)&data[ar->config.element_size];
+ fr->size = found_fr->size - ar->config.element_size;
+
+ // link the free slot first in the page
+ fr->next = found_fr->next;
+ page->free.list = fr;
+
+ aral_free_validate_internal_check(ar, fr);
+ }
+
+ aral_page_free_unlock(ar, page);
+
+ // put the page pointer after the element
+ uint8_t *data = (uint8_t *)found_fr;
+ ARAL_PAGE **page_ptr = (ARAL_PAGE **)&data[ar->config.page_ptr_offset];
+ *page_ptr = page;
+
+ if(unlikely(ar->config.mmap.enabled))
+ __atomic_add_fetch(&ar->stats->mmap.used_bytes, ar->config.element_size, __ATOMIC_RELAXED);
+ else
+ __atomic_add_fetch(&ar->stats->malloc.used_bytes, ar->config.element_size, __ATOMIC_RELAXED);
+
+ return (void *)found_fr;
+}
+
+static inline ARAL_PAGE *aral_ptr_to_page___must_NOT_have_aral_lock(ARAL *ar, void *ptr) {
+ // given a data pointer we returned before,
+ // find the ARAL_PAGE it belongs to
+
+ uint8_t *data = (uint8_t *)ptr;
+ ARAL_PAGE **page_ptr = (ARAL_PAGE **)&data[ar->config.page_ptr_offset];
+ ARAL_PAGE *page = *page_ptr;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ // make it NULL so that we will fail on double free
+ // do not enable this on production, because the MMAP file
+ // will need to be saved again!
+ *page_ptr = NULL;
+#endif
+
+#ifdef NETDATA_ARAL_INTERNAL_CHECKS
+ {
+ // find the page ptr belongs
+ ARAL_PAGE *page2 = find_page_with_allocation_internal_check(ar, ptr);
+
+ internal_fatal(page != page2,
+ "ARAL: '%s' page pointers do not match!",
+ ar->name);
+
+ internal_fatal(!page2,
+ "ARAL: '%s' free of pointer %p is not in ARAL address space.",
+ ar->name, ptr);
+ }
+#endif
+
+ internal_fatal(!page,
+ "ARAL: '%s' possible corruption or double free of pointer %p",
+ ar->config.name, ptr);
+
+ return page;
+}
+
+static void aral_defrag_sorted_page_position___aral_lock_needed(ARAL *ar, ARAL_PAGE *page) {
+ ARAL_PAGE *tmp;
+
+ int action = 0; (void)action;
+ size_t move_later = 0, move_earlier = 0;
+
+ for(tmp = page->aral_lock.next ;
+ tmp && tmp->aral_lock.free_elements && tmp->aral_lock.free_elements < page->aral_lock.free_elements ;
+ tmp = tmp->aral_lock.next)
+ move_later++;
+
+ if(!tmp && page->aral_lock.next) {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ action = 1;
+ }
+ else if(tmp != page->aral_lock.next) {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ DOUBLE_LINKED_LIST_INSERT_ITEM_BEFORE_UNSAFE(ar->aral_lock.pages, tmp, page, aral_lock.prev, aral_lock.next);
+ action = 2;
+ }
+ else {
+ for(tmp = (page == ar->aral_lock.pages) ? NULL : page->aral_lock.prev ;
+ tmp && (!tmp->aral_lock.free_elements || tmp->aral_lock.free_elements > page->aral_lock.free_elements);
+ tmp = (tmp == ar->aral_lock.pages) ? NULL : tmp->aral_lock.prev)
+ move_earlier++;
+
+ if(!tmp) {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ action = 3;
+ }
+ else if(tmp != page->aral_lock.prev){
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(ar->aral_lock.pages, tmp, page, aral_lock.prev, aral_lock.next);
+ action = 4;
+ }
+ }
+
+ ar->aral_lock.defragment_operations++;
+ ar->aral_lock.defragment_linked_list_traversals += move_earlier + move_later;
+
+ internal_fatal(page->aral_lock.next && page->aral_lock.next->aral_lock.free_elements && page->aral_lock.next->aral_lock.free_elements < page->aral_lock.free_elements,
+ "ARAL: '%s' item should be later in the list", ar->config.name);
+
+ internal_fatal(page != ar->aral_lock.pages && (!page->aral_lock.prev->aral_lock.free_elements || page->aral_lock.prev->aral_lock.free_elements > page->aral_lock.free_elements),
+ "ARAL: '%s' item should be earlier in the list", ar->config.name);
+}
+
+static inline void aral_move_page_with_free_list___aral_lock_needed(ARAL *ar, ARAL_PAGE *page) {
+ if(unlikely(page == ar->aral_lock.pages))
+ // we are the first already
+ return;
+
+ if(likely(!(ar->config.options & ARAL_DEFRAGMENT))) {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ aral_insert_not_linked_page_with_free_items_to_proper_position___aral_lock_needed(ar, page);
+ }
+ else
+ aral_defrag_sorted_page_position___aral_lock_needed(ar, page);
+}
+
+void aral_freez_internal(ARAL *ar, void *ptr TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
+ if(unlikely(!ptr)) return;
+
+ // get the page pointer
+ ARAL_PAGE *page = aral_ptr_to_page___must_NOT_have_aral_lock(ar, ptr);
+
+ if(unlikely(ar->config.mmap.enabled))
+ __atomic_sub_fetch(&ar->stats->mmap.used_bytes, ar->config.element_size, __ATOMIC_RELAXED);
+ else
+ __atomic_sub_fetch(&ar->stats->malloc.used_bytes, ar->config.element_size, __ATOMIC_RELAXED);
+
+ // make this element available
+ ARAL_FREE *fr = (ARAL_FREE *)ptr;
+ fr->size = ar->config.element_size;
+
+ aral_page_free_lock(ar, page);
+ fr->next = page->free.list;
+ page->free.list = fr;
+ aral_page_free_unlock(ar, page);
+
+ aral_lock(ar);
+
+ internal_fatal(!page->aral_lock.used_elements,
+ "ARAL: '%s' pointer %p is inside a page without any active allocations.",
+ ar->config.name, ptr);
+
+ internal_fatal(page->max_elements != page->aral_lock.used_elements + page->aral_lock.free_elements,
+ "ARAL: '%s' page element counters do not match, "
+ "page says it can handle %zu elements, "
+ "but there are %zu used and %zu free items, "
+ "total %zu items",
+ ar->config.name,
+ (size_t)page->max_elements,
+ (size_t)page->aral_lock.used_elements, (size_t)page->aral_lock.free_elements,
+ (size_t)page->aral_lock.used_elements + (size_t)page->aral_lock.free_elements
+ );
+
+ page->aral_lock.used_elements--;
+ page->aral_lock.free_elements++;
+
+ ar->aral_lock.user_free_operations++;
+
+ // if the page is empty, release it
+ if(unlikely(!page->aral_lock.used_elements)) {
+ bool is_this_page_the_last_one = ar->aral_lock.pages == page && !page->aral_lock.next;
+
+ if(!is_this_page_the_last_one)
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+
+ aral_unlock(ar);
+
+ if(!is_this_page_the_last_one)
+ aral_del_page___no_lock_needed(ar, page TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS);
+ }
+ else {
+ aral_move_page_with_free_list___aral_lock_needed(ar, page);
+ aral_unlock(ar);
+ }
+}
+
+void aral_destroy_internal(ARAL *ar TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS) {
+ aral_lock(ar);
+
+ ARAL_PAGE *page;
+ while((page = ar->aral_lock.pages)) {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(ar->aral_lock.pages, page, aral_lock.prev, aral_lock.next);
+ aral_del_page___no_lock_needed(ar, page TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS);
+ }
+
+ aral_unlock(ar);
+
+ if(ar->config.options & ARAL_ALLOCATED_STATS)
+ freez(ar->stats);
+
+ freez(ar);
+}
+
+size_t aral_element_size(ARAL *ar) {
+ return ar->config.requested_element_size;
+}
+
+ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size,
+ struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless) {
+ ARAL *ar = callocz(1, sizeof(ARAL));
+ ar->config.options = (lockless) ? ARAL_LOCKLESS : 0;
+ ar->config.requested_element_size = element_size;
+ ar->config.initial_page_elements = initial_page_elements;
+ ar->config.requested_max_page_size = max_page_size;
+ ar->config.mmap.filename = filename;
+ ar->config.mmap.cache_dir = cache_dir;
+ ar->config.mmap.enabled = mmap;
+ strncpyz(ar->config.name, name, ARAL_MAX_NAME);
+ netdata_spinlock_init(&ar->aral_lock.spinlock);
+
+ if(stats) {
+ ar->stats = stats;
+ ar->config.options &= ~ARAL_ALLOCATED_STATS;
+ }
+ else {
+ ar->stats = callocz(1, sizeof(struct aral_statistics));
+ ar->config.options |= ARAL_ALLOCATED_STATS;
+ }
+
+ long int page_size = sysconf(_SC_PAGE_SIZE);
+ if (unlikely(page_size == -1))
+ ar->config.natural_page_size = 4096;
+ else
+ ar->config.natural_page_size = page_size;
+
+ // we need to add a page pointer after the element
+ // so, first align the element size to the pointer size
+ ar->config.element_size = natural_alignment(ar->config.requested_element_size, sizeof(uintptr_t));
+
+ // then add the size of a pointer to it
+ ar->config.element_size += sizeof(uintptr_t);
+
+ // make sure it is at least what we need for an ARAL_FREE slot
+ if (ar->config.element_size < sizeof(ARAL_FREE))
+ ar->config.element_size = sizeof(ARAL_FREE);
+
+ // and finally align it to the natural alignment
+ ar->config.element_size = natural_alignment(ar->config.element_size, ARAL_NATURAL_ALIGNMENT);
+
+ ar->config.max_page_elements = ar->config.requested_max_page_size / ar->config.element_size;
+
+ // we write the page pointer just after each element
+ ar->config.page_ptr_offset = ar->config.element_size - sizeof(uintptr_t);
+
+ if(ar->config.requested_element_size + sizeof(uintptr_t) > ar->config.element_size)
+ fatal("ARAL: '%s' failed to calculate properly page_ptr_offset: "
+ "element size %zu, sizeof(uintptr_t) %zu, natural alignment %zu, "
+ "final element size %zu, page_ptr_offset %zu",
+ ar->config.name, ar->config.requested_element_size, sizeof(uintptr_t), ARAL_NATURAL_ALIGNMENT,
+ ar->config.element_size, ar->config.page_ptr_offset);
+
+ //info("ARAL: element size %zu, sizeof(uintptr_t) %zu, natural alignment %zu, final element size %zu, page_ptr_offset %zu",
+ // ar->element_size, sizeof(uintptr_t), ARAL_NATURAL_ALIGNMENT, ar->internal.element_size, ar->internal.page_ptr_offset);
+
+
+ if (ar->config.initial_page_elements < 2)
+ ar->config.initial_page_elements = 2;
+
+ if(ar->config.mmap.enabled && (!ar->config.mmap.cache_dir || !*ar->config.mmap.cache_dir)) {
+ error("ARAL: '%s' mmap cache directory is not configured properly, disabling mmap.", ar->config.name);
+ ar->config.mmap.enabled = false;
+ internal_fatal(true, "ARAL: '%s' mmap cache directory is not configured properly", ar->config.name);
+ }
+
+ uint64_t max_alloc_size;
+ if(!ar->config.max_page_elements)
+ max_alloc_size = ar->config.mmap.enabled ? ARAL_MAX_PAGE_SIZE_MMAP : ARAL_MAX_PAGE_SIZE_MALLOC;
+ else
+ max_alloc_size = ar->config.max_page_elements * ar->config.element_size;
+
+ ar->config.max_allocation_size = aral_align_alloc_size(ar, max_alloc_size);
+ ar->adders.allocation_size = aral_align_alloc_size(ar, (uint64_t)ar->config.element_size * ar->config.initial_page_elements);
+ ar->aral_lock.pages = NULL;
+ ar->aral_lock.file_number = 0;
+
+ if(ar->config.mmap.enabled) {
+ char directory_name[FILENAME_MAX + 1];
+ snprintfz(directory_name, FILENAME_MAX, "%s/array_alloc.mmap", *ar->config.mmap.cache_dir);
+ int r = mkdir(directory_name, 0775);
+ if (r != 0 && errno != EEXIST)
+ fatal("Cannot create directory '%s'", directory_name);
+
+ char file[FILENAME_MAX + 1];
+ snprintfz(file, FILENAME_MAX, "%s.", ar->config.mmap.filename);
+ aral_delete_leftover_files(ar->config.name, directory_name, file);
+ }
+
+ internal_error(true,
+ "ARAL: '%s' "
+ "element size %zu (requested %zu bytes), "
+ "min elements per page %zu (requested %zu), "
+ "max elements per page %zu, "
+ "max page size %zu bytes (requested %zu) "
+ , ar->config.name
+ , ar->config.element_size, ar->config.requested_element_size
+ , ar->adders.allocation_size / ar->config.element_size, ar->config.initial_page_elements
+ , ar->config.max_allocation_size / ar->config.element_size
+ , ar->config.max_allocation_size, ar->config.requested_max_page_size
+ );
+
+ __atomic_add_fetch(&ar->stats->structures.allocations, 1, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&ar->stats->structures.allocated_bytes, sizeof(ARAL), __ATOMIC_RELAXED);
+ return ar;
+}
+
+// ----------------------------------------------------------------------------
+// global aral caching
+
+#define ARAL_BY_SIZE_MAX_SIZE 1024
+
+struct aral_by_size {
+ ARAL *ar;
+ int32_t refcount;
+};
+
+struct {
+ struct aral_statistics shared_statistics;
+ SPINLOCK spinlock;
+ struct aral_by_size array[ARAL_BY_SIZE_MAX_SIZE + 1];
+} aral_by_size_globals = {};
+
+struct aral_statistics *aral_by_size_statistics(void) {
+ return &aral_by_size_globals.shared_statistics;
+}
+
+size_t aral_by_size_structures(void) {
+ return aral_structures_from_stats(&aral_by_size_globals.shared_statistics);
+}
+
+size_t aral_by_size_overhead(void) {
+ return aral_overhead_from_stats(&aral_by_size_globals.shared_statistics);
+}
+
+ARAL *aral_by_size_acquire(size_t size) {
+ netdata_spinlock_lock(&aral_by_size_globals.spinlock);
+
+ ARAL *ar = NULL;
+
+ if(size <= ARAL_BY_SIZE_MAX_SIZE && aral_by_size_globals.array[size].ar) {
+ ar = aral_by_size_globals.array[size].ar;
+ aral_by_size_globals.array[size].refcount++;
+
+ internal_fatal(aral_element_size(ar) != size, "DICTIONARY: aral has size %zu but we want %zu",
+ aral_element_size(ar), size);
+ }
+
+ if(!ar) {
+ char buf[30 + 1];
+ snprintf(buf, 30, "size-%zu", size);
+ ar = aral_create(buf,
+ size,
+ 0,
+ 65536 * ((size / 150) + 1),
+ &aral_by_size_globals.shared_statistics,
+ NULL, NULL, false, false);
+
+ if(size <= ARAL_BY_SIZE_MAX_SIZE) {
+ aral_by_size_globals.array[size].ar = ar;
+ aral_by_size_globals.array[size].refcount = 1;
+ }
+ }
+
+ netdata_spinlock_unlock(&aral_by_size_globals.spinlock);
+
+ return ar;
+}
+
+void aral_by_size_release(ARAL *ar) {
+ size_t size = aral_element_size(ar);
+
+ if(size <= ARAL_BY_SIZE_MAX_SIZE) {
+ netdata_spinlock_lock(&aral_by_size_globals.spinlock);
+
+ internal_fatal(aral_by_size_globals.array[size].ar != ar,
+ "ARAL BY SIZE: aral pointers do not match");
+
+ if(aral_by_size_globals.array[size].refcount <= 0)
+ fatal("ARAL BY SIZE: double release detected");
+
+ aral_by_size_globals.array[size].refcount--;
+ if(!aral_by_size_globals.array[size].refcount) {
+ aral_destroy(aral_by_size_globals.array[size].ar);
+ aral_by_size_globals.array[size].ar = NULL;
+ }
+
+ netdata_spinlock_unlock(&aral_by_size_globals.spinlock);
+ }
+ else
+ aral_destroy(ar);
+}
+
+// ----------------------------------------------------------------------------
+// unittest
+
+struct aral_unittest_config {
+ bool single_threaded;
+ bool stop;
+ ARAL *ar;
+ size_t elements;
+ size_t threads;
+ int errors;
+};
+
+static void *aral_test_thread(void *ptr) {
+ struct aral_unittest_config *auc = ptr;
+ ARAL *ar = auc->ar;
+ size_t elements = auc->elements;
+
+ void **pointers = callocz(elements, sizeof(void *));
+
+ do {
+ for (size_t i = 0; i < elements; i++) {
+ pointers[i] = aral_mallocz(ar);
+ }
+
+ for (size_t div = 5; div >= 2; div--) {
+ for (size_t i = 0; i < elements / div; i++) {
+ aral_freez(ar, pointers[i]);
+ pointers[i] = NULL;
+ }
+
+ for (size_t i = 0; i < elements / div; i++) {
+ pointers[i] = aral_mallocz(ar);
+ }
+ }
+
+ for (size_t step = 50; step >= 10; step -= 10) {
+ for (size_t i = 0; i < elements; i += step) {
+ aral_freez(ar, pointers[i]);
+ pointers[i] = NULL;
+ }
+
+ for (size_t i = 0; i < elements; i += step) {
+ pointers[i] = aral_mallocz(ar);
+ }
+ }
+
+ for (size_t i = 0; i < elements; i++) {
+ aral_freez(ar, pointers[i]);
+ pointers[i] = NULL;
+ }
+
+ if (auc->single_threaded && ar->aral_lock.pages && ar->aral_lock.pages->aral_lock.used_elements) {
+ fprintf(stderr, "\n\nARAL leftovers detected (1)\n\n");
+ __atomic_add_fetch(&auc->errors, 1, __ATOMIC_RELAXED);
+ }
+
+ if(!auc->single_threaded && __atomic_load_n(&auc->stop, __ATOMIC_RELAXED))
+ break;
+
+ for (size_t i = 0; i < elements; i++) {
+ pointers[i] = aral_mallocz(ar);
+ }
+
+ size_t increment = elements / ar->config.max_page_elements;
+ for (size_t all = increment; all <= elements / 2; all += increment) {
+
+ size_t to_free = (all % ar->config.max_page_elements) + 1;
+ size_t step = elements / to_free;
+ if(!step) step = 1;
+
+ // fprintf(stderr, "all %zu, to free %zu, step %zu\n", all, to_free, step);
+
+ size_t free_list[to_free];
+ for (size_t i = 0; i < to_free; i++) {
+ size_t pos = step * i;
+ aral_freez(ar, pointers[pos]);
+ pointers[pos] = NULL;
+ free_list[i] = pos;
+ }
+
+ for (size_t i = 0; i < to_free; i++) {
+ size_t pos = free_list[i];
+ pointers[pos] = aral_mallocz(ar);
+ }
+ }
+
+ for (size_t i = 0; i < elements; i++) {
+ aral_freez(ar, pointers[i]);
+ pointers[i] = NULL;
+ }
+
+ if (auc->single_threaded && ar->aral_lock.pages && ar->aral_lock.pages->aral_lock.used_elements) {
+ fprintf(stderr, "\n\nARAL leftovers detected (2)\n\n");
+ __atomic_add_fetch(&auc->errors, 1, __ATOMIC_RELAXED);
+ }
+
+ } while(!auc->single_threaded && !__atomic_load_n(&auc->stop, __ATOMIC_RELAXED));
+
+ freez(pointers);
+
+ return ptr;
+}
+
+int aral_stress_test(size_t threads, size_t elements, size_t seconds) {
+ fprintf(stderr, "Running stress test of %zu threads, with %zu elements each, for %zu seconds...\n",
+ threads, elements, seconds);
+
+ struct aral_unittest_config auc = {
+ .single_threaded = false,
+ .threads = threads,
+ .ar = aral_create("aral-stress-test", 20, 0, 8192, NULL, "aral-stress-test", NULL, false, false),
+ .elements = elements,
+ .errors = 0,
+ };
+
+ usec_t started_ut = now_monotonic_usec();
+ netdata_thread_t thread_ptrs[threads];
+
+ for(size_t i = 0; i < threads ; i++) {
+ char tag[NETDATA_THREAD_NAME_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_NAME_MAX, "TH[%zu]", i);
+ netdata_thread_create(&thread_ptrs[i], tag,
+ NETDATA_THREAD_OPTION_JOINABLE | NETDATA_THREAD_OPTION_DONT_LOG,
+ aral_test_thread, &auc);
+ }
+
+ size_t malloc_done = 0;
+ size_t free_done = 0;
+ size_t countdown = seconds;
+ while(countdown-- > 0) {
+ sleep_usec(1 * USEC_PER_SEC);
+ aral_lock(auc.ar);
+ size_t m = auc.ar->aral_lock.user_malloc_operations;
+ size_t f = auc.ar->aral_lock.user_free_operations;
+ aral_unlock(auc.ar);
+ fprintf(stderr, "ARAL executes %0.2f M malloc and %0.2f M free operations/s\n",
+ (double)(m - malloc_done) / 1000000.0, (double)(f - free_done) / 1000000.0);
+ malloc_done = m;
+ free_done = f;
+ }
+
+ __atomic_store_n(&auc.stop, true, __ATOMIC_RELAXED);
+
+// fprintf(stderr, "Cancelling the threads...\n");
+// for(size_t i = 0; i < threads ; i++) {
+// netdata_thread_cancel(thread_ptrs[i]);
+// }
+
+ fprintf(stderr, "Waiting the threads to finish...\n");
+ for(size_t i = 0; i < threads ; i++) {
+ netdata_thread_join(thread_ptrs[i], NULL);
+ }
+
+ usec_t ended_ut = now_monotonic_usec();
+
+ if (auc.ar->aral_lock.pages && auc.ar->aral_lock.pages->aral_lock.used_elements) {
+ fprintf(stderr, "\n\nARAL leftovers detected (3)\n\n");
+ __atomic_add_fetch(&auc.errors, 1, __ATOMIC_RELAXED);
+ }
+
+ info("ARAL: did %zu malloc, %zu free, "
+ "using %zu threads, in %llu usecs",
+ auc.ar->aral_lock.user_malloc_operations,
+ auc.ar->aral_lock.user_free_operations,
+ threads,
+ ended_ut - started_ut);
+
+ aral_destroy(auc.ar);
+
+ return auc.errors;
+}
+
+int aral_unittest(size_t elements) {
+ char *cache_dir = "/tmp/";
+
+ struct aral_unittest_config auc = {
+ .single_threaded = true,
+ .threads = 1,
+ .ar = aral_create("aral-test", 20, 0, 8192, NULL, "aral-test", &cache_dir, false, false),
+ .elements = elements,
+ .errors = 0,
+ };
+
+ aral_test_thread(&auc);
+
+ aral_destroy(auc.ar);
+
+ int errors = aral_stress_test(2, elements, 5);
+
+ return auc.errors + errors;
+}
diff --git a/libnetdata/aral/aral.h b/libnetdata/aral/aral.h
new file mode 100644
index 000000000..96f5a9c44
--- /dev/null
+++ b/libnetdata/aral/aral.h
@@ -0,0 +1,69 @@
+
+#ifndef ARAL_H
+#define ARAL_H 1
+
+#include "../libnetdata.h"
+
+#define ARAL_MAX_NAME 23
+
+typedef struct aral ARAL;
+
+struct aral_statistics {
+ struct {
+ size_t allocations;
+ size_t allocated_bytes;
+ } structures;
+
+ struct {
+ size_t allocations;
+ size_t allocated_bytes;
+ size_t used_bytes;
+ } malloc;
+
+ struct {
+ size_t allocations;
+ size_t allocated_bytes;
+ size_t used_bytes;
+ } mmap;
+};
+
+ARAL *aral_create(const char *name, size_t element_size, size_t initial_page_elements, size_t max_page_size,
+ struct aral_statistics *stats, const char *filename, char **cache_dir, bool mmap, bool lockless);
+size_t aral_element_size(ARAL *ar);
+size_t aral_overhead(ARAL *ar);
+size_t aral_structures(ARAL *ar);
+struct aral_statistics *aral_statistics(ARAL *ar);
+size_t aral_structures_from_stats(struct aral_statistics *stats);
+size_t aral_overhead_from_stats(struct aral_statistics *stats);
+
+ARAL *aral_by_size_acquire(size_t size);
+void aral_by_size_release(ARAL *ar);
+size_t aral_by_size_structures(void);
+size_t aral_by_size_overhead(void);
+struct aral_statistics *aral_by_size_statistics(void);
+
+int aral_unittest(size_t elements);
+
+#ifdef NETDATA_TRACE_ALLOCATIONS
+
+#define aral_mallocz(ar) aral_mallocz_internal(ar, __FILE__, __FUNCTION__, __LINE__)
+#define aral_freez(ar, ptr) aral_freez_internal(ar, ptr, __FILE__, __FUNCTION__, __LINE__)
+#define aral_destroy(ar) aral_destroy_internal(ar, __FILE__, __FUNCTION__, __LINE__)
+
+void *aral_mallocz_internal(ARAL *ar, const char *file, const char *function, size_t line);
+void aral_freez_internal(ARAL *ar, void *ptr, const char *file, const char *function, size_t line);
+void aral_destroy_internal(ARAL *ar, const char *file, const char *function, size_t line);
+
+#else // NETDATA_TRACE_ALLOCATIONS
+
+#define aral_mallocz(ar) aral_mallocz_internal(ar)
+#define aral_freez(ar, ptr) aral_freez_internal(ar, ptr)
+#define aral_destroy(ar) aral_destroy_internal(ar)
+
+void *aral_mallocz_internal(ARAL *ar);
+void aral_freez_internal(ARAL *ar, void *ptr);
+void aral_destroy_internal(ARAL *ar);
+
+#endif // NETDATA_TRACE_ALLOCATIONS
+
+#endif // ARAL_H
diff --git a/libnetdata/arrayalloc/Makefile.am b/libnetdata/arrayalloc/Makefile.am
deleted file mode 100644
index 161784b8f..000000000
--- a/libnetdata/arrayalloc/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
diff --git a/libnetdata/arrayalloc/README.md b/libnetdata/arrayalloc/README.md
deleted file mode 100644
index 2f21bf3ff..000000000
--- a/libnetdata/arrayalloc/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-# Array Allocator
-
diff --git a/libnetdata/arrayalloc/arrayalloc.c b/libnetdata/arrayalloc/arrayalloc.c
deleted file mode 100644
index f337279ae..000000000
--- a/libnetdata/arrayalloc/arrayalloc.c
+++ /dev/null
@@ -1,489 +0,0 @@
-#include "../libnetdata.h"
-#include "arrayalloc.h"
-#include "daemon/common.h"
-
-// max file size
-#define ARAL_MAX_PAGE_SIZE_MMAP (1*1024*1024*1024)
-
-// max malloc size
-// optimal at current versions of libc is up to 256k
-// ideal to have the same overhead as libc is 4k
-#define ARAL_MAX_PAGE_SIZE_MALLOC (64*1024)
-
-typedef struct arrayalloc_free {
- size_t size;
- struct arrayalloc_page *page;
- struct arrayalloc_free *next;
-} ARAL_FREE;
-
-typedef struct arrayalloc_page {
- const char *filename;
- size_t size; // the total size of the page
- size_t used_elements; // the total number of used elements on this page
- uint8_t *data;
- ARAL_FREE *free_list;
- struct arrayalloc_page *prev; // the prev page on the list
- struct arrayalloc_page *next; // the next page on the list
-} ARAL_PAGE;
-
-#define ARAL_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
-static inline size_t natural_alignment(size_t size, size_t alignment) {
- if(unlikely(size % alignment))
- size = size + alignment - (size % alignment);
-
- return size;
-}
-
-static void arrayalloc_delete_leftover_files(const char *path, const char *required_prefix) {
- DIR *dir = opendir(path);
- if(!dir) return;
-
- char fullpath[FILENAME_MAX + 1];
- size_t len = strlen(required_prefix);
-
- struct dirent *de = NULL;
- while((de = readdir(dir))) {
- if(de->d_type == DT_DIR)
- continue;
-
- if(strncmp(de->d_name, required_prefix, len) != 0)
- continue;
-
- snprintfz(fullpath, FILENAME_MAX, "%s/%s", path, de->d_name);
- info("ARRAYALLOC: removing left-over file '%s'", fullpath);
- if(unlikely(unlink(fullpath) == -1))
- error("Cannot delete file '%s'", fullpath);
- }
-
- closedir(dir);
-}
-
-// ----------------------------------------------------------------------------
-// arrayalloc_init()
-
-static void arrayalloc_init(ARAL *ar) {
- static netdata_mutex_t mutex = NETDATA_MUTEX_INITIALIZER;
- netdata_mutex_lock(&mutex);
-
- if(!ar->internal.initialized) {
- netdata_mutex_init(&ar->internal.mutex);
-
- long int page_size = sysconf(_SC_PAGE_SIZE);
- if (unlikely(page_size == -1))
- ar->internal.natural_page_size = 4096;
- else
- ar->internal.natural_page_size = page_size;
-
- // we need to add a page pointer after the element
- // so, first align the element size to the pointer size
- ar->internal.element_size = natural_alignment(ar->requested_element_size, sizeof(uintptr_t));
-
- // then add the size of a pointer to it
- ar->internal.element_size += sizeof(uintptr_t);
-
- // make sure it is at least what we need for an ARAL_FREE slot
- if (ar->internal.element_size < sizeof(ARAL_FREE))
- ar->internal.element_size = sizeof(ARAL_FREE);
-
- // and finally align it to the natural alignment
- ar->internal.element_size = natural_alignment(ar->internal.element_size, ARAL_NATURAL_ALIGNMENT);
-
- // we write the page pointer just after each element
- ar->internal.page_ptr_offset = ar->internal.element_size - sizeof(uintptr_t);
-
- if(ar->requested_element_size + sizeof(uintptr_t) > ar->internal.element_size)
- fatal("ARRAYALLOC: failed to calculate properly page_ptr_offset: element size %zu, sizeof(uintptr_t) %zu, natural alignment %zu, final element size %zu, page_ptr_offset %zu",
- ar->requested_element_size, sizeof(uintptr_t), ARAL_NATURAL_ALIGNMENT, ar->internal.element_size, ar->internal.page_ptr_offset);
-
- //info("ARRAYALLOC: element size %zu, sizeof(uintptr_t) %zu, natural alignment %zu, final element size %zu, page_ptr_offset %zu",
- // ar->element_size, sizeof(uintptr_t), ARAL_NATURAL_ALIGNMENT, ar->internal.element_size, ar->internal.page_ptr_offset);
-
- if (ar->initial_elements < 10)
- ar->initial_elements = 10;
-
- ar->internal.mmap = (ar->use_mmap && ar->cache_dir && *ar->cache_dir) ? true : false;
- ar->internal.max_alloc_size = ar->internal.mmap ? ARAL_MAX_PAGE_SIZE_MMAP : ARAL_MAX_PAGE_SIZE_MALLOC;
-
- if(ar->internal.max_alloc_size % ar->internal.natural_page_size)
- ar->internal.max_alloc_size += ar->internal.natural_page_size - (ar->internal.max_alloc_size % ar->internal.natural_page_size) ;
-
- if(ar->internal.max_alloc_size % ar->internal.element_size)
- ar->internal.max_alloc_size -= ar->internal.max_alloc_size % ar->internal.element_size;
-
- ar->internal.pages = NULL;
- ar->internal.allocation_multiplier = 1;
- ar->internal.file_number = 0;
-
- if(ar->internal.mmap) {
- char directory_name[FILENAME_MAX + 1];
- snprintfz(directory_name, FILENAME_MAX, "%s/array_alloc.mmap", *ar->cache_dir);
- int r = mkdir(directory_name, 0775);
- if (r != 0 && errno != EEXIST)
- fatal("Cannot create directory '%s'", directory_name);
-
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s.", ar->filename);
- arrayalloc_delete_leftover_files(directory_name, filename);
- }
-
- ar->internal.initialized = true;
- }
-
- netdata_mutex_unlock(&mutex);
-}
-
-// ----------------------------------------------------------------------------
-// check a free slot
-
-#ifdef NETDATA_INTERNAL_CHECKS
-static inline void arrayalloc_free_validate_internal_check(ARAL *ar, ARAL_FREE *fr) {
- if(fr->size < ar->internal.element_size)
- fatal("ARRAYALLOC: free item of size %zu, less than the expected element size %zu", fr->size, ar->internal.element_size);
-
- if(fr->size % ar->internal.element_size)
- fatal("ARRAYALLOC: free item of size %zu is not multiple to element size %zu", fr->size, ar->internal.element_size);
-}
-#else
-#define arrayalloc_free_validate_internal_check(ar, fr) debug_dummy()
-#endif
-
-// ----------------------------------------------------------------------------
-// find the page a pointer belongs to
-
-#ifdef NETDATA_INTERNAL_CHECKS
-static inline ARAL_PAGE *find_page_with_allocation_internal_check(ARAL *ar, void *ptr) {
- uintptr_t seeking = (uintptr_t)ptr;
- ARAL_PAGE *page;
-
- for(page = ar->internal.pages; page ; page = page->next) {
- if(unlikely(seeking >= (uintptr_t)page->data && seeking < (uintptr_t)page->data + page->size))
- break;
- }
-
- return page;
-}
-#endif
-
-// ----------------------------------------------------------------------------
-// find a page with a free slot (there shouldn't be any)
-
-#ifdef NETDATA_INTERNAL_CHECKS
-static inline ARAL_PAGE *find_page_with_free_slots_internal_check(ARAL *ar) {
- ARAL_PAGE *page;
-
- for(page = ar->internal.pages; page ; page = page->next) {
- if(page->free_list)
- break;
-
- internal_fatal(page->size - page->used_elements * ar->internal.element_size >= ar->internal.element_size,
- "ARRAYALLOC: a page is marked full, but it is not!");
-
- internal_fatal(page->size < page->used_elements * ar->internal.element_size,
- "ARRAYALLOC: a page has been overflown!");
- }
-
- return page;
-}
-#endif
-
-#ifdef NETDATA_TRACE_ALLOCATIONS
-static void arrayalloc_add_page(ARAL *ar, const char *file, const char *function, size_t line) {
-#else
-static void arrayalloc_add_page(ARAL *ar) {
-#endif
- if(unlikely(!ar->internal.initialized))
- arrayalloc_init(ar);
-
- ARAL_PAGE *page = callocz(1, sizeof(ARAL_PAGE));
- page->size = ar->initial_elements * ar->internal.element_size * ar->internal.allocation_multiplier;
- if(page->size > ar->internal.max_alloc_size)
- page->size = ar->internal.max_alloc_size;
- else
- ar->internal.allocation_multiplier *= 2;
-
- if(ar->internal.mmap) {
- ar->internal.file_number++;
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/array_alloc.mmap/%s.%zu", *ar->cache_dir, ar->filename, ar->internal.file_number);
- page->filename = strdupz(filename);
- page->data = netdata_mmap(page->filename, page->size, MAP_SHARED, 0);
- if (unlikely(!page->data))
- fatal("Cannot allocate arrayalloc buffer of size %zu on filename '%s'", page->size, page->filename);
- }
- else {
-#ifdef NETDATA_TRACE_ALLOCATIONS
- page->data = mallocz_int(page->size, file, function, line);
-#else
- page->data = mallocz(page->size);
-#endif
- }
-
- // link the free space to its page
- ARAL_FREE *fr = (ARAL_FREE *)page->data;
- fr->size = page->size;
- fr->page = page;
- fr->next = NULL;
- page->free_list = fr;
-
- // link the new page at the front of the list of pages
- DOUBLE_LINKED_LIST_PREPEND_UNSAFE(ar->internal.pages, page, prev, next);
-
- arrayalloc_free_validate_internal_check(ar, fr);
-}
-
-static void arrayalloc_lock(ARAL *ar) {
- if(!ar->internal.lockless)
- netdata_mutex_lock(&ar->internal.mutex);
-}
-
-static void arrayalloc_unlock(ARAL *ar) {
- if(!ar->internal.lockless)
- netdata_mutex_unlock(&ar->internal.mutex);
-}
-
-ARAL *arrayalloc_create(size_t element_size, size_t elements, const char *filename, char **cache_dir, bool mmap) {
- ARAL *ar = callocz(1, sizeof(ARAL));
- ar->requested_element_size = element_size;
- ar->initial_elements = elements;
- ar->filename = filename;
- ar->cache_dir = cache_dir;
- ar->use_mmap = mmap;
- return ar;
-}
-
-#ifdef NETDATA_TRACE_ALLOCATIONS
-void *arrayalloc_mallocz_int(ARAL *ar, const char *file, const char *function, size_t line) {
-#else
-void *arrayalloc_mallocz(ARAL *ar) {
-#endif
- if(unlikely(!ar->internal.initialized))
- arrayalloc_init(ar);
-
- arrayalloc_lock(ar);
-
- if(unlikely(!ar->internal.pages || !ar->internal.pages->free_list)) {
- internal_fatal(find_page_with_free_slots_internal_check(ar) != NULL,
- "ARRAYALLOC: first page does not have any free slots, but there is another that has!");
-
-#ifdef NETDATA_TRACE_ALLOCATIONS
- arrayalloc_add_page(ar, file, function, line);
-#else
- arrayalloc_add_page(ar);
-#endif
- }
-
- ARAL_PAGE *page = ar->internal.pages;
- ARAL_FREE *found_fr = page->free_list;
-
- internal_fatal(!found_fr,
- "ARRAYALLOC: free item to use, cannot be NULL.");
-
- internal_fatal(found_fr->size < ar->internal.element_size,
- "ARRAYALLOC: free item size %zu, cannot be smaller than %zu",
- found_fr->size, ar->internal.element_size);
-
- if(unlikely(found_fr->size - ar->internal.element_size < ar->internal.element_size)) {
- // we can use the entire free space entry
-
- page->free_list = found_fr->next;
-
- if(unlikely(!page->free_list)) {
- // we are done with this page
- // move the full page last
- // so that pages with free items remain first in the list
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(ar->internal.pages, page, prev, next);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(ar->internal.pages, page, prev, next);
- }
- }
- else {
- // we can split the free space entry
-
- uint8_t *data = (uint8_t *)found_fr;
- ARAL_FREE *fr = (ARAL_FREE *)&data[ar->internal.element_size];
- fr->page = page;
- fr->size = found_fr->size - ar->internal.element_size;
-
- // link the free slot first in the page
- fr->next = found_fr->next;
- page->free_list = fr;
-
- arrayalloc_free_validate_internal_check(ar, fr);
- }
-
- page->used_elements++;
-
- // put the page pointer after the element
- uint8_t *data = (uint8_t *)found_fr;
- ARAL_PAGE **page_ptr = (ARAL_PAGE **)&data[ar->internal.page_ptr_offset];
- *page_ptr = page;
-
- arrayalloc_unlock(ar);
- return (void *)found_fr;
-}
-
-#ifdef NETDATA_TRACE_ALLOCATIONS
-void arrayalloc_freez_int(ARAL *ar, void *ptr, const char *file, const char *function, size_t line) {
-#else
-void arrayalloc_freez(ARAL *ar, void *ptr) {
-#endif
- if(unlikely(!ptr)) return;
- arrayalloc_lock(ar);
-
- // get the page pointer
- ARAL_PAGE *page;
- {
- uint8_t *data = (uint8_t *)ptr;
- ARAL_PAGE **page_ptr = (ARAL_PAGE **)&data[ar->internal.page_ptr_offset];
- page = *page_ptr;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- // make it NULL so that we will fail on double free
- // do not enable this on production, because the MMAP file
- // will need to be saved again!
- *page_ptr = NULL;
-#endif
- }
-
-#ifdef NETDATA_ARRAYALLOC_INTERNAL_CHECKS
- {
- // find the page ptr belongs
- ARAL_PAGE *page2 = find_page_with_allocation_internal_check(ar, ptr);
-
- if(unlikely(page != page2))
- fatal("ARRAYALLOC: page pointers do not match!");
-
- if (unlikely(!page2))
- fatal("ARRAYALLOC: free of pointer %p is not in arrayalloc address space.", ptr);
- }
-#endif
-
- if(unlikely(!page))
- fatal("ARRAYALLOC: possible corruption or double free of pointer %p", ptr);
-
- if (unlikely(!page->used_elements))
- fatal("ARRAYALLOC: free of pointer %p is inside a page without any active allocations.", ptr);
-
- page->used_elements--;
-
- // make this element available
- ARAL_FREE *fr = (ARAL_FREE *)ptr;
- fr->page = page;
- fr->size = ar->internal.element_size;
- fr->next = page->free_list;
- page->free_list = fr;
-
- // if the page is empty, release it
- if(!page->used_elements) {
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(ar->internal.pages, page, prev, next);
-
- // free it
- if(ar->internal.mmap) {
- netdata_munmap(page->data, page->size);
- if (unlikely(unlink(page->filename) == 1))
- error("Cannot delete file '%s'", page->filename);
- freez((void *)page->filename);
- }
- else {
-#ifdef NETDATA_TRACE_ALLOCATIONS
- freez_int(page->data, file, function, line);
-#else
- freez(page->data);
-#endif
- }
-
- freez(page);
- }
- else if(page != ar->internal.pages) {
- // move the page with free item first
- // so that the next allocation will use this page
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(ar->internal.pages, page, prev, next);
- DOUBLE_LINKED_LIST_PREPEND_UNSAFE(ar->internal.pages, page, prev, next);
- }
-
- arrayalloc_unlock(ar);
-}
-
-int aral_unittest(size_t elements) {
- char *cache_dir = "/tmp/";
- ARAL *ar = arrayalloc_create(20, 10, "test-aral", &cache_dir, false);
-
- void *pointers[elements];
-
- for(size_t i = 0; i < elements ;i++) {
- pointers[i] = arrayalloc_mallocz(ar);
- }
-
- for(size_t div = 5; div >= 2 ;div--) {
- for (size_t i = 0; i < elements / div; i++) {
- arrayalloc_freez(ar, pointers[i]);
- }
-
- for (size_t i = 0; i < elements / div; i++) {
- pointers[i] = arrayalloc_mallocz(ar);
- }
- }
-
- for(size_t step = 50; step >= 10 ;step -= 10) {
- for (size_t i = 0; i < elements; i += step) {
- arrayalloc_freez(ar, pointers[i]);
- }
-
- for (size_t i = 0; i < elements; i += step) {
- pointers[i] = arrayalloc_mallocz(ar);
- }
- }
-
- for(size_t i = 0; i < elements ;i++) {
- arrayalloc_freez(ar, pointers[i]);
- }
-
- if(ar->internal.pages) {
- fprintf(stderr, "ARAL leftovers detected (1)");
- return 1;
- }
-
- size_t ops = 0;
- size_t increment = elements / 10;
- size_t allocated = 0;
- for(size_t all = increment; all <= elements ; all += increment) {
-
- for(; allocated < all ; allocated++) {
- pointers[allocated] = arrayalloc_mallocz(ar);
- ops++;
- }
-
- size_t to_free = now_realtime_usec() % all;
- size_t free_list[to_free];
- for(size_t i = 0; i < to_free ;i++) {
- size_t pos;
- do {
- pos = now_realtime_usec() % all;
- } while(!pointers[pos]);
-
- arrayalloc_freez(ar, pointers[pos]);
- pointers[pos] = NULL;
- free_list[i] = pos;
- ops++;
- }
-
- for(size_t i = 0; i < to_free ;i++) {
- size_t pos = free_list[i];
- pointers[pos] = arrayalloc_mallocz(ar);
- ops++;
- }
- }
-
- for(size_t i = 0; i < allocated - 1 ;i++) {
- arrayalloc_freez(ar, pointers[i]);
- ops++;
- }
-
- arrayalloc_freez(ar, pointers[allocated - 1]);
-
- if(ar->internal.pages) {
- fprintf(stderr, "ARAL leftovers detected (2)");
- return 1;
- }
-
- return 0;
-}
diff --git a/libnetdata/arrayalloc/arrayalloc.h b/libnetdata/arrayalloc/arrayalloc.h
deleted file mode 100644
index cf80b73fd..000000000
--- a/libnetdata/arrayalloc/arrayalloc.h
+++ /dev/null
@@ -1,48 +0,0 @@
-
-#ifndef ARRAYALLOC_H
-#define ARRAYALLOC_H 1
-
-#include "../libnetdata.h"
-
-typedef struct arrayalloc {
- size_t requested_element_size;
- size_t initial_elements;
- const char *filename;
- char **cache_dir;
- bool use_mmap;
-
- // private members - do not touch
- struct {
- bool mmap;
- bool lockless;
- bool initialized;
- size_t element_size;
- size_t page_ptr_offset;
- size_t file_number;
- size_t natural_page_size;
- size_t allocation_multiplier;
- size_t max_alloc_size;
- netdata_mutex_t mutex;
- struct arrayalloc_page *pages;
- } internal;
-} ARAL;
-
-ARAL *arrayalloc_create(size_t element_size, size_t elements, const char *filename, char **cache_dir, bool mmap);
-int aral_unittest(size_t elements);
-
-#ifdef NETDATA_TRACE_ALLOCATIONS
-
-#define arrayalloc_mallocz(ar) arrayalloc_mallocz_int(ar, __FILE__, __FUNCTION__, __LINE__)
-#define arrayalloc_freez(ar, ptr) arrayalloc_freez_int(ar, ptr, __FILE__, __FUNCTION__, __LINE__)
-
-void *arrayalloc_mallocz_int(ARAL *ar, const char *file, const char *function, size_t line);
-void arrayalloc_freez_int(ARAL *ar, void *ptr, const char *file, const char *function, size_t line);
-
-#else // NETDATA_TRACE_ALLOCATIONS
-
-void *arrayalloc_mallocz(ARAL *ar);
-void arrayalloc_freez(ARAL *ar, void *ptr);
-
-#endif // NETDATA_TRACE_ALLOCATIONS
-
-#endif // ARRAYALLOC_H
diff --git a/libnetdata/avl/README.md b/libnetdata/avl/README.md
index 36392bd79..2b03fec4a 100644
--- a/libnetdata/avl/README.md
+++ b/libnetdata/avl/README.md
@@ -1,6 +1,10 @@
# AVL
diff --git a/libnetdata/buffer/README.md b/libnetdata/buffer/README.md
index c5f66e6e3..6a84fd8a3 100644
--- a/libnetdata/buffer/README.md
+++ b/libnetdata/buffer/README.md
@@ -1,6 +1,10 @@
# BUFFER
diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c
index d0940588f..eeb283209 100644
--- a/libnetdata/buffer/buffer.c
+++ b/libnetdata/buffer/buffer.c
@@ -442,28 +442,28 @@ void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minute
buffer_need_bytes(wb, 36);
char *b = &wb->buffer[wb->len];
- char *p = b;
-
- *p++ = '0' + year / 1000; year %= 1000;
- *p++ = '0' + year / 100; year %= 100;
- *p++ = '0' + year / 10;
- *p++ = '0' + year % 10;
- *p++ = '-';
- *p++ = '0' + month / 10;
- *p++ = '0' + month % 10;
- *p++ = '-';
- *p++ = '0' + day / 10;
- *p++ = '0' + day % 10;
- *p++ = ' ';
- *p++ = '0' + hours / 10;
- *p++ = '0' + hours % 10;
- *p++ = ':';
- *p++ = '0' + minutes / 10;
- *p++ = '0' + minutes % 10;
- *p++ = ':';
- *p++ = '0' + seconds / 10;
- *p++ = '0' + seconds % 10;
- *p = '\0';
+ char *p = b;
+
+ *p++ = '0' + year / 1000; year %= 1000;
+ *p++ = '0' + year / 100; year %= 100;
+ *p++ = '0' + year / 10;
+ *p++ = '0' + year % 10;
+ *p++ = '-';
+ *p++ = '0' + month / 10;
+ *p++ = '0' + month % 10;
+ *p++ = '-';
+ *p++ = '0' + day / 10;
+ *p++ = '0' + day % 10;
+ *p++ = ' ';
+ *p++ = '0' + hours / 10;
+ *p++ = '0' + hours % 10;
+ *p++ = ':';
+ *p++ = '0' + minutes / 10;
+ *p++ = '0' + minutes % 10;
+ *p++ = ':';
+ *p++ = '0' + seconds / 10;
+ *p++ = '0' + seconds % 10;
+ *p = '\0';
wb->len += (size_t)(p - b);
@@ -472,7 +472,7 @@ void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minute
buffer_overflow_check(wb);
}
-BUFFER *buffer_create(size_t size)
+BUFFER *buffer_create(size_t size, size_t *statistics)
{
BUFFER *b;
@@ -483,9 +483,13 @@ BUFFER *buffer_create(size_t size)
b->buffer[0] = '\0';
b->size = size;
b->contenttype = CT_TEXT_PLAIN;
+ b->statistics = statistics;
buffer_overflow_init(b);
buffer_overflow_check(b);
+ if(b->statistics)
+ __atomic_add_fetch(b->statistics, b->size + sizeof(BUFFER) + sizeof(BUFFER_OVERFLOW_EOF) + 2, __ATOMIC_RELAXED);
+
return(b);
}
@@ -496,6 +500,9 @@ void buffer_free(BUFFER *b) {
debug(D_WEB_BUFFER, "Freeing web buffer of size %zu.", b->size);
+ if(b->statistics)
+ __atomic_sub_fetch(b->statistics, b->size + sizeof(BUFFER) + sizeof(BUFFER_OVERFLOW_EOF) + 2, __ATOMIC_RELAXED);
+
freez(b->buffer);
freez(b);
}
@@ -510,9 +517,7 @@ void buffer_increase(BUFFER *b, size_t free_size_required) {
size_t minimum = WEB_DATA_LENGTH_INCREASE_STEP;
if(minimum > wanted) wanted = minimum;
- size_t optimal = b->size;
- if(b->size > 5*1024*1024) optimal = b->size / 2;
-
+ size_t optimal = (b->size > 5*1024*1024) ? b->size / 2 : b->size;
if(optimal > wanted) wanted = optimal;
debug(D_WEB_BUFFER, "Increasing data buffer from size %zu to %zu.", b->size, b->size + wanted);
@@ -520,6 +525,9 @@ void buffer_increase(BUFFER *b, size_t free_size_required) {
b->buffer = reallocz(b->buffer, b->size + wanted + sizeof(BUFFER_OVERFLOW_EOF) + 2);
b->size += wanted;
+ if(b->statistics)
+ __atomic_add_fetch(b->statistics, wanted, __ATOMIC_RELAXED);
+
buffer_overflow_init(b);
buffer_overflow_check(b);
}
diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h
index ce6f52899..0fa3495b4 100644
--- a/libnetdata/buffer/buffer.h
+++ b/libnetdata/buffer/buffer.h
@@ -15,6 +15,7 @@ typedef struct web_buffer {
uint8_t options; // options related to the content
time_t date; // the timestamp this content has been generated
time_t expires; // the timestamp this content expires
+ size_t *statistics;
} BUFFER;
// options
@@ -61,7 +62,7 @@ void buffer_rrd_value(BUFFER *wb, NETDATA_DOUBLE value);
void buffer_date(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
void buffer_jsdate(BUFFER *wb, int year, int month, int day, int hours, int minutes, int seconds);
-BUFFER *buffer_create(size_t size);
+BUFFER *buffer_create(size_t size, size_t *statistics);
void buffer_free(BUFFER *b);
void buffer_increase(BUFFER *b, size_t free_size_required);
diff --git a/libnetdata/circular_buffer/README.md b/libnetdata/circular_buffer/README.md
index 4482173d7..23980dff3 100644
--- a/libnetdata/circular_buffer/README.md
+++ b/libnetdata/circular_buffer/README.md
@@ -1,6 +1,10 @@
# Circular Buffer
diff --git a/libnetdata/circular_buffer/circular_buffer.c b/libnetdata/circular_buffer/circular_buffer.c
index c791b420b..b2bded179 100644
--- a/libnetdata/circular_buffer/circular_buffer.c
+++ b/libnetdata/circular_buffer/circular_buffer.c
@@ -1,16 +1,24 @@
#include "../libnetdata.h"
-struct circular_buffer *cbuffer_new(size_t initial, size_t max) {
- struct circular_buffer *result = mallocz(sizeof(*result));
- result->size = initial;
- result->data = mallocz(initial);
- result->write = 0;
- result->read = 0;
- result->max_size = max;
- return result;
+struct circular_buffer *cbuffer_new(size_t initial, size_t max, size_t *statistics) {
+ struct circular_buffer *buf = mallocz(sizeof(struct circular_buffer));
+ buf->size = initial;
+ buf->data = mallocz(initial);
+ buf->write = 0;
+ buf->read = 0;
+ buf->max_size = max;
+ buf->statistics = statistics;
+
+ if(buf->statistics)
+ __atomic_add_fetch(buf->statistics, sizeof(struct circular_buffer) + buf->size, __ATOMIC_RELAXED);
+
+ return buf;
}
void cbuffer_free(struct circular_buffer *buf) {
+ if(buf && buf->statistics)
+ __atomic_sub_fetch(buf->statistics, sizeof(struct circular_buffer) + buf->size, __ATOMIC_RELAXED);
+
freez(buf->data);
freez(buf);
}
@@ -19,6 +27,8 @@ static int cbuffer_realloc_unsafe(struct circular_buffer *buf) {
// Check that we can grow
if (buf->size >= buf->max_size)
return 1;
+
+ size_t old_size = buf->size;
size_t new_size = buf->size * 2;
if (new_size > buf->max_size)
new_size = buf->max_size;
@@ -43,6 +53,10 @@ static int cbuffer_realloc_unsafe(struct circular_buffer *buf) {
freez(buf->data);
buf->data = new_data;
buf->size = new_size;
+
+ if(buf->statistics)
+ __atomic_add_fetch(buf->statistics, new_size - old_size, __ATOMIC_RELAXED);
+
return 0;
}
diff --git a/libnetdata/circular_buffer/circular_buffer.h b/libnetdata/circular_buffer/circular_buffer.h
index 8c42aa807..9d29a84d7 100644
--- a/libnetdata/circular_buffer/circular_buffer.h
+++ b/libnetdata/circular_buffer/circular_buffer.h
@@ -5,10 +5,11 @@
struct circular_buffer {
size_t size, write, read, max_size;
+ size_t *statistics;
char *data;
};
-struct circular_buffer *cbuffer_new(size_t initial, size_t max);
+struct circular_buffer *cbuffer_new(size_t initial, size_t max, size_t *statistics);
void cbuffer_free(struct circular_buffer *buf);
int cbuffer_add_unsafe(struct circular_buffer *buf, const char *d, size_t d_len);
void cbuffer_remove_unsafe(struct circular_buffer *buf, size_t num);
diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c
index cabc0000e..19c66f0a5 100644
--- a/libnetdata/clocks/clocks.c
+++ b/libnetdata/clocks/clocks.c
@@ -189,9 +189,13 @@ void sleep_to_absolute_time(usec_t usec) {
.tv_nsec = (suseconds_t)((usec % USEC_PER_SEC) * NSEC_PER_USEC)
};
+ errno = 0;
int ret = 0;
while( (ret = clock_nanosleep(clock, TIMER_ABSTIME, &req, NULL)) != 0 ) {
- if(ret == EINTR) continue;
+ if(ret == EINTR) {
+ errno = 0;
+ continue;
+ }
else {
if (ret == EINVAL) {
if (!einval_printed) {
@@ -296,7 +300,9 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
if(unlikely(hb->randomness > tick / 2)) {
// TODO: The heartbeat tick should be specified at the heartbeat_init() function
usec_t tmp = (now_realtime_usec() * clock_realtime_resolution) % (tick / 2);
- info("heartbeat randomness of %llu is too big for a tick of %llu - setting it to %llu", hb->randomness, tick, tmp);
+
+ error_limit_static_global_var(erl, 10, 0);
+ error_limit(&erl, "heartbeat randomness of %llu is too big for a tick of %llu - setting it to %llu", hb->randomness, tick, tmp);
hb->randomness = tmp;
}
@@ -311,7 +317,7 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
// sleep_usec() has a loop to guarantee we will sleep for at least the requested time.
// According the specs, when we sleep for a relative time, clock adjustments should not affect the duration
// we sleep.
- sleep_usec(next - now);
+ sleep_usec_with_now(next - now, now);
now = now_realtime_usec();
dt = now - hb->realtime;
@@ -322,11 +328,13 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
if(unlikely(now < next)) {
errno = 0;
- error("heartbeat clock: woke up %llu microseconds earlier than expected (can be due to the CLOCK_REALTIME set to the past).", next - now);
+ error_limit_static_global_var(erl, 10, 0);
+ error_limit(&erl, "heartbeat clock: woke up %llu microseconds earlier than expected (can be due to the CLOCK_REALTIME set to the past).", next - now);
}
else if(unlikely(now - next > tick / 2)) {
errno = 0;
- error("heartbeat clock: woke up %llu microseconds later than expected (can be due to system load or the CLOCK_REALTIME set to the future).", now - next);
+ error_limit_static_global_var(erl, 10, 0);
+ error_limit(&erl, "heartbeat clock: woke up %llu microseconds later than expected (can be due to system load or the CLOCK_REALTIME set to the future).", now - next);
}
if(unlikely(!hb->realtime)) {
@@ -338,7 +346,7 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
return dt;
}
-void sleep_usec(usec_t usec) {
+void sleep_usec_with_now(usec_t usec, usec_t started_ut) {
// we expect microseconds (1.000.000 per second)
// but timespec is nanoseconds (1.000.000.000 per second)
struct timespec rem = { 0, 0 }, req = {
@@ -346,21 +354,37 @@ void sleep_usec(usec_t usec) {
.tv_nsec = (suseconds_t) ((usec % USEC_PER_SEC) * NSEC_PER_USEC)
};
-#ifdef __linux__
- while (clock_nanosleep(CLOCK_REALTIME, 0, &req, &rem) != 0) {
-#else
+ // make sure errno is not EINTR
+ errno = 0;
+
+ if(!started_ut)
+ started_ut = now_realtime_usec();
+
+ usec_t end_ut = started_ut + usec;
+
while (nanosleep(&req, &rem) != 0) {
-#endif
if (likely(errno == EINTR && (rem.tv_sec || rem.tv_nsec))) {
req = rem;
rem = (struct timespec){ 0, 0 };
+
+ // break an infinite loop
+ errno = 0;
+
+ usec_t now_ut = now_realtime_usec();
+ if(now_ut >= end_ut)
+ break;
+
+ usec_t remaining_ut = (usec_t)req.tv_sec * USEC_PER_SEC + (usec_t)req.tv_nsec * NSEC_PER_USEC > usec;
+ usec_t check_ut = now_ut - started_ut;
+ if(remaining_ut > check_ut) {
+ req = (struct timespec){
+ .tv_sec = (time_t) ( check_ut / USEC_PER_SEC),
+ .tv_nsec = (suseconds_t) ((check_ut % USEC_PER_SEC) * NSEC_PER_USEC)
+ };
+ }
}
else {
-#ifdef __linux__
- error("Cannot clock_nanosleep(CLOCK_REALTIME) for %llu microseconds.", usec);
-#else
error("Cannot nanosleep() for %llu microseconds.", usec);
-#endif
break;
}
}
diff --git a/libnetdata/clocks/clocks.h b/libnetdata/clocks/clocks.h
index 7738a2c8e..b050b6254 100644
--- a/libnetdata/clocks/clocks.h
+++ b/libnetdata/clocks/clocks.h
@@ -141,7 +141,8 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick);
void heartbeat_statistics(usec_t *min_ptr, usec_t *max_ptr, usec_t *average_ptr, size_t *count_ptr);
-void sleep_usec(usec_t usec);
+void sleep_usec_with_now(usec_t usec, usec_t started_ut);
+#define sleep_usec(usec) sleep_usec_with_now(usec, 0);
void clocks_init(void);
diff --git a/libnetdata/completion/completion.c b/libnetdata/completion/completion.c
index b5ac86e4f..6257e0299 100644
--- a/libnetdata/completion/completion.c
+++ b/libnetdata/completion/completion.c
@@ -5,6 +5,7 @@
void completion_init(struct completion *p)
{
p->completed = 0;
+ p->completed_jobs = 0;
fatal_assert(0 == uv_cond_init(&p->cond));
fatal_assert(0 == uv_mutex_init(&p->mutex));
}
@@ -32,3 +33,32 @@ void completion_mark_complete(struct completion *p)
uv_cond_broadcast(&p->cond);
uv_mutex_unlock(&p->mutex);
}
+
+unsigned completion_wait_for_a_job(struct completion *p, unsigned completed_jobs)
+{
+ uv_mutex_lock(&p->mutex);
+ while (0 == p->completed && p->completed_jobs <= completed_jobs) {
+ uv_cond_wait(&p->cond, &p->mutex);
+ }
+ completed_jobs = p->completed_jobs;
+ uv_mutex_unlock(&p->mutex);
+
+ return completed_jobs;
+}
+
+void completion_mark_complete_a_job(struct completion *p)
+{
+ uv_mutex_lock(&p->mutex);
+ p->completed_jobs++;
+ uv_cond_broadcast(&p->cond);
+ uv_mutex_unlock(&p->mutex);
+}
+
+bool completion_is_done(struct completion *p)
+{
+ bool ret;
+ uv_mutex_lock(&p->mutex);
+ ret = p->completed;
+ uv_mutex_unlock(&p->mutex);
+ return ret;
+}
diff --git a/libnetdata/completion/completion.h b/libnetdata/completion/completion.h
index 667360a42..723f73688 100644
--- a/libnetdata/completion/completion.h
+++ b/libnetdata/completion/completion.h
@@ -9,6 +9,7 @@ struct completion {
uv_mutex_t mutex;
uv_cond_t cond;
volatile unsigned completed;
+ volatile unsigned completed_jobs;
};
void completion_init(struct completion *p);
@@ -19,4 +20,8 @@ void completion_wait_for(struct completion *p);
void completion_mark_complete(struct completion *p);
+unsigned completion_wait_for_a_job(struct completion *p, unsigned completed_jobs);
+void completion_mark_complete_a_job(struct completion *p);
+bool completion_is_done(struct completion *p);
+
#endif /* NETDATA_COMPLETION_H */
diff --git a/libnetdata/config/README.md b/libnetdata/config/README.md
index 2eccf7a21..c34cf9255 100644
--- a/libnetdata/config/README.md
+++ b/libnetdata/config/README.md
@@ -1,6 +1,10 @@
# Netdata ini config files
diff --git a/libnetdata/dictionary/README.md b/libnetdata/dictionary/README.md
index 6d7e55392..508c4e031 100644
--- a/libnetdata/dictionary/README.md
+++ b/libnetdata/dictionary/README.md
@@ -1,5 +1,9 @@
# Dictionaries
diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c
index 0277e067f..061b671ab 100644
--- a/libnetdata/dictionary/dictionary.c
+++ b/libnetdata/dictionary/dictionary.c
@@ -143,6 +143,8 @@ struct dictionary {
DICT_OPTIONS options; // the configuration flags of the dictionary (they never change - no atomics)
DICT_FLAGS flags; // run time flags for the dictionary (they change all the time - atomics needed)
+ ARAL *value_aral;
+
struct { // support for multiple indexing engines
Pvoid_t JudyHSArray; // the hash table
netdata_rwlock_t rwlock; // protect the index
@@ -179,7 +181,9 @@ struct dictionary {
#endif
};
+// ----------------------------------------------------------------------------
// forward definitions of functions used in reverse order in the code
+
static void garbage_collect_pending_deletes(DICTIONARY *dict);
static inline void item_linked_list_remove(DICTIONARY *dict, DICTIONARY_ITEM *item);
static size_t dict_item_free_with_hooks(DICTIONARY *dict, DICTIONARY_ITEM *item);
@@ -260,7 +264,7 @@ static inline void pointer_del(DICTIONARY *dict __maybe_unused, DICTIONARY_ITEM
static inline void DICTIONARY_STATS_PLUS_MEMORY(DICTIONARY *dict, size_t key_size, size_t item_size, size_t value_size) {
if(key_size)
- __atomic_fetch_add(&dict->stats->memory.indexed, (long)key_size, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&dict->stats->memory.index, (long)JUDYHS_INDEX_SIZE_ESTIMATE(key_size), __ATOMIC_RELAXED);
if(item_size)
__atomic_fetch_add(&dict->stats->memory.dict, (long)item_size, __ATOMIC_RELAXED);
@@ -270,7 +274,7 @@ static inline void DICTIONARY_STATS_PLUS_MEMORY(DICTIONARY *dict, size_t key_siz
}
static inline void DICTIONARY_STATS_MINUS_MEMORY(DICTIONARY *dict, size_t key_size, size_t item_size, size_t value_size) {
if(key_size)
- __atomic_fetch_sub(&dict->stats->memory.indexed, (long)key_size, __ATOMIC_RELAXED);
+ __atomic_fetch_sub(&dict->stats->memory.index, (long)JUDYHS_INDEX_SIZE_ESTIMATE(key_size), __ATOMIC_RELAXED);
if(item_size)
__atomic_fetch_sub(&dict->stats->memory.dict, (long)item_size, __ATOMIC_RELAXED);
@@ -380,7 +384,7 @@ size_t dictionary_referenced_items(DICTIONARY *dict) {
long int dictionary_stats_for_registry(DICTIONARY *dict) {
if(unlikely(!dict)) return 0;
- return (dict->stats->memory.indexed + dict->stats->memory.dict);
+ return (dict->stats->memory.index + dict->stats->memory.dict);
}
void dictionary_version_increment(DICTIONARY *dict) {
__atomic_fetch_add(&dict->version, 1, __ATOMIC_SEQ_CST);
@@ -789,7 +793,7 @@ static void garbage_collect_pending_deletes(DICTIONARY *dict) {
// we didn't get a reference
if(item_is_not_referenced_and_can_be_removed(dict, item)) {
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(dict->items.list, item, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(dict->items.list, item, prev, next);
dict_item_free_with_hooks(dict, item);
deleted++;
@@ -1167,9 +1171,9 @@ static inline void item_linked_list_add(DICTIONARY *dict, DICTIONARY_ITEM *item)
ll_recursive_lock(dict, DICTIONARY_LOCK_WRITE);
if(dict->options & DICT_OPTION_ADD_IN_FRONT)
- DOUBLE_LINKED_LIST_PREPEND_UNSAFE(dict->items.list, item, prev, next);
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(dict->items.list, item, prev, next);
else
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(dict->items.list, item, prev, next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(dict->items.list, item, prev, next);
#ifdef NETDATA_INTERNAL_CHECKS
item->ll_adder_pid = gettid();
@@ -1186,7 +1190,7 @@ static inline void item_linked_list_add(DICTIONARY *dict, DICTIONARY_ITEM *item)
static inline void item_linked_list_remove(DICTIONARY *dict, DICTIONARY_ITEM *item) {
ll_recursive_lock(dict, DICTIONARY_LOCK_WRITE);
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(dict->items.list, item, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(dict->items.list, item, prev, next);
#ifdef NETDATA_INTERNAL_CHECKS
item->ll_remover_pid = gettid();
@@ -1234,11 +1238,45 @@ static inline size_t item_get_name_len(const DICTIONARY_ITEM *item) {
return strlen(item->caller_name);
}
+static ARAL *dict_items_aral = NULL;
+static ARAL *dict_shared_items_aral = NULL;
+
+void dictionary_static_items_aral_init(void) {
+ static SPINLOCK spinlock;
+
+ if(unlikely(!dict_items_aral || !dict_shared_items_aral)) {
+ netdata_spinlock_lock(&spinlock);
+
+ // we have to check again
+ if(!dict_items_aral)
+ dict_items_aral = aral_create(
+ "dict-items",
+ sizeof(DICTIONARY_ITEM),
+ 0,
+ 65536,
+ aral_by_size_statistics(),
+ NULL, NULL, false, false);
+
+ // we have to check again
+ if(!dict_shared_items_aral)
+ dict_shared_items_aral = aral_create(
+ "dict-shared-items",
+ sizeof(DICTIONARY_ITEM_SHARED),
+ 0,
+ 65536,
+ aral_by_size_statistics(),
+ NULL, NULL, false, false);
+
+ netdata_spinlock_unlock(&spinlock);
+ }
+}
+
static DICTIONARY_ITEM *dict_item_create(DICTIONARY *dict __maybe_unused, size_t *allocated_bytes, DICTIONARY_ITEM *master_item) {
DICTIONARY_ITEM *item;
size_t size = sizeof(DICTIONARY_ITEM);
- item = callocz(1, size);
+ item = aral_mallocz(dict_items_aral);
+ memset(item, 0, sizeof(DICTIONARY_ITEM));
#ifdef NETDATA_INTERNAL_CHECKS
item->creator_pid = gettid();
@@ -1257,7 +1295,9 @@ static DICTIONARY_ITEM *dict_item_create(DICTIONARY *dict __maybe_unused, size_t
}
else {
size = sizeof(DICTIONARY_ITEM_SHARED);
- item->shared = callocz(1, size);
+ item->shared = aral_mallocz(dict_shared_items_aral);
+ memset(item->shared, 0, sizeof(DICTIONARY_ITEM_SHARED));
+
item->shared->links = 1;
*allocated_bytes += size;
}
@@ -1268,20 +1308,39 @@ static DICTIONARY_ITEM *dict_item_create(DICTIONARY *dict __maybe_unused, size_t
return item;
}
-static void *dict_item_value_create(void *value, size_t value_len) {
+static inline void *dict_item_value_mallocz(DICTIONARY *dict, size_t value_len) {
+ if(dict->value_aral) {
+ internal_fatal(aral_element_size(dict->value_aral) != value_len,
+ "DICTIONARY: item value size %zu does not match the configured fixed one %zu",
+ value_len, aral_element_size(dict->value_aral));
+ return aral_mallocz(dict->value_aral);
+ }
+ else
+ return mallocz(value_len);
+}
+
+static inline void dict_item_value_freez(DICTIONARY *dict, void *ptr) {
+ if(dict->value_aral)
+ aral_freez(dict->value_aral, ptr);
+ else
+ freez(ptr);
+}
+
+static void *dict_item_value_create(DICTIONARY *dict, void *value, size_t value_len) {
void *ptr = NULL;
if(likely(value_len)) {
if (likely(value)) {
// a value has been supplied
// copy it
- ptr = mallocz(value_len);
+ ptr = dict_item_value_mallocz(dict, value_len);
memcpy(ptr, value, value_len);
}
else {
// no value has been supplied
// allocate a clear memory block
- ptr = callocz(1, value_len);
+ ptr = dict_item_value_mallocz(dict, value_len);
+ memset(ptr, 0, value_len);
}
}
// else
@@ -1320,7 +1379,7 @@ static DICTIONARY_ITEM *dict_item_create_with_hooks(DICTIONARY *dict, const char
if(unlikely(dict->options & DICT_OPTION_VALUE_LINK_DONT_CLONE))
item->shared->value = value;
else
- item->shared->value = dict_item_value_create(value, value_len);
+ item->shared->value = dict_item_value_create(dict, value, value_len);
item->shared->value_len = value_len;
value_size += value_len;
@@ -1360,7 +1419,7 @@ static void dict_item_reset_value_with_hooks(DICTIONARY *dict, DICTIONARY_ITEM *
void *old_value = item->shared->value;
void *new_value = NULL;
if(value_len) {
- new_value = mallocz(value_len);
+ new_value = dict_item_value_mallocz(dict, value_len);
if(value) memcpy(new_value, value, value_len);
else memset(new_value, 0, value_len);
}
@@ -1368,7 +1427,7 @@ static void dict_item_reset_value_with_hooks(DICTIONARY *dict, DICTIONARY_ITEM *
item->shared->value_len = value_len;
debug(D_DICTIONARY, "Dictionary: freeing old value of '%s'", item_get_name(item));
- freez(old_value);
+ dict_item_value_freez(dict, old_value);
}
dictionary_execute_insert_callback(dict, item, constructor_data);
@@ -1391,17 +1450,18 @@ static size_t dict_item_free_with_hooks(DICTIONARY *dict, DICTIONARY_ITEM *item)
if(unlikely(!(dict->options & DICT_OPTION_VALUE_LINK_DONT_CLONE))) {
debug(D_DICTIONARY, "Dictionary freeing value of '%s'", item_get_name(item));
- freez(item->shared->value);
+ dict_item_value_freez(dict, item->shared->value);
item->shared->value = NULL;
}
value_size += item->shared->value_len;
- freez(item->shared);
+ aral_freez(dict_shared_items_aral, item->shared);
item->shared = NULL;
item_size += sizeof(DICTIONARY_ITEM_SHARED);
}
- freez(item);
+ aral_freez(dict_items_aral, item);
+
item_size += sizeof(DICTIONARY_ITEM);
DICTIONARY_STATS_MINUS_MEMORY(dict, key_size, item_size, value_size);
@@ -1749,6 +1809,9 @@ static bool dictionary_free_all_resources(DICTIONARY *dict, size_t *mem, bool fo
dict_size += sizeof(DICTIONARY);
DICTIONARY_STATS_MINUS_MEMORY(dict, 0, sizeof(DICTIONARY), 0);
+ if(dict->value_aral)
+ aral_by_size_release(dict->value_aral);
+
freez(dict);
internal_error(
@@ -1934,19 +1997,34 @@ static bool api_is_name_good_with_trace(DICTIONARY *dict __maybe_unused, const c
// ----------------------------------------------------------------------------
// API - dictionary management
-static DICTIONARY *dictionary_create_internal(DICT_OPTIONS options, struct dictionary_stats *stats) {
+static DICTIONARY *dictionary_create_internal(DICT_OPTIONS options, struct dictionary_stats *stats, size_t fixed_size) {
cleanup_destroyed_dictionaries();
DICTIONARY *dict = callocz(1, sizeof(DICTIONARY));
dict->options = options;
dict->stats = stats;
+ if((dict->options & DICT_OPTION_FIXED_SIZE) && !fixed_size) {
+ dict->options &= ~DICT_OPTION_FIXED_SIZE;
+ internal_fatal(true, "DICTIONARY: requested fixed size dictionary, without setting the size");
+ }
+ if(!(dict->options & DICT_OPTION_FIXED_SIZE) && fixed_size) {
+ dict->options |= DICT_OPTION_FIXED_SIZE;
+ internal_fatal(true, "DICTIONARY: set a fixed size for the items, without setting DICT_OPTION_FIXED_SIZE flag");
+ }
+
+ if(dict->options & DICT_OPTION_FIXED_SIZE)
+ dict->value_aral = aral_by_size_acquire(fixed_size);
+ else
+ dict->value_aral = NULL;
+
size_t dict_size = 0;
dict_size += sizeof(DICTIONARY);
dict_size += dictionary_locks_init(dict);
dict_size += reference_counter_init(dict);
dict_size += hashtable_init_unsafe(dict);
+ dictionary_static_items_aral_init();
pointer_index_init(dict);
DICTIONARY_STATS_PLUS_MEMORY(dict, 0, dict_size, 0);
@@ -1955,12 +2033,12 @@ static DICTIONARY *dictionary_create_internal(DICT_OPTIONS options, struct dicti
}
#ifdef NETDATA_INTERNAL_CHECKS
-DICTIONARY *dictionary_create_advanced_with_trace(DICT_OPTIONS options, struct dictionary_stats *stats, const char *function, size_t line, const char *file) {
+DICTIONARY *dictionary_create_advanced_with_trace(DICT_OPTIONS options, struct dictionary_stats *stats, size_t fixed_size, const char *function, size_t line, const char *file) {
#else
-DICTIONARY *dictionary_create_advanced(DICT_OPTIONS options, struct dictionary_stats *stats) {
+DICTIONARY *dictionary_create_advanced(DICT_OPTIONS options, struct dictionary_stats *stats, size_t fixed_size) {
#endif
- DICTIONARY *dict = dictionary_create_internal(options, stats?stats:&dictionary_stats_category_other);
+ DICTIONARY *dict = dictionary_create_internal(options, stats?stats:&dictionary_stats_category_other, fixed_size);
#ifdef NETDATA_INTERNAL_CHECKS
dict->creation_function = function;
@@ -1978,7 +2056,9 @@ DICTIONARY *dictionary_create_view_with_trace(DICTIONARY *master, const char *fu
DICTIONARY *dictionary_create_view(DICTIONARY *master) {
#endif
- DICTIONARY *dict = dictionary_create_internal(master->options, master->stats);
+ DICTIONARY *dict = dictionary_create_internal(master->options, master->stats,
+ master->value_aral ? aral_element_size(master->value_aral) : 0);
+
dict->master = master;
dictionary_hooks_allocate(master);
@@ -3295,7 +3375,7 @@ static int dictionary_unittest_view_threads() {
// threads testing of dictionary
struct dictionary_stats stats_master = {};
struct dictionary_stats stats_view = {};
- tv.master = dictionary_create_advanced(DICT_OPTION_NAME_LINK_DONT_CLONE | DICT_OPTION_DONT_OVERWRITE_VALUE, &stats_master);
+ tv.master = dictionary_create_advanced(DICT_OPTION_NAME_LINK_DONT_CLONE | DICT_OPTION_DONT_OVERWRITE_VALUE, &stats_master, 0);
tv.view = dictionary_create_view(tv.master);
tv.view->stats = &stats_view;
@@ -3388,7 +3468,7 @@ static int dictionary_unittest_view_threads() {
size_t dictionary_unittest_views(void) {
size_t errors = 0;
struct dictionary_stats stats = {};
- DICTIONARY *master = dictionary_create_advanced(DICT_OPTION_NONE, &stats);
+ DICTIONARY *master = dictionary_create_advanced(DICT_OPTION_NONE, &stats, 0);
DICTIONARY *view = dictionary_create_view(master);
fprintf(stderr, "\n\nChecking dictionary views...\n");
diff --git a/libnetdata/dictionary/dictionary.h b/libnetdata/dictionary/dictionary.h
index 0e7b3d39f..58220def0 100644
--- a/libnetdata/dictionary/dictionary.h
+++ b/libnetdata/dictionary/dictionary.h
@@ -53,6 +53,7 @@ typedef enum dictionary_options {
DICT_OPTION_NAME_LINK_DONT_CLONE = (1 << 2), // don't copy the name, just point to the one provided (default: copy)
DICT_OPTION_DONT_OVERWRITE_VALUE = (1 << 3), // don't overwrite values of dictionary items (default: overwrite)
DICT_OPTION_ADD_IN_FRONT = (1 << 4), // add dictionary items at the front of the linked list (default: at the end)
+ DICT_OPTION_FIXED_SIZE = (1 << 5), // the items of the dictionary have a fixed size
} DICT_OPTIONS;
struct dictionary_stats {
@@ -91,7 +92,7 @@ struct dictionary_stats {
// memory
struct {
- long indexed; // bytes of keys indexed (indication of the index size)
+ long index; // bytes of keys indexed (indication of the index size)
long values; // bytes of caller structures
long dict; // bytes of the structures dictionary needs
} memory;
@@ -107,12 +108,12 @@ struct dictionary_stats {
// Create a dictionary
#ifdef NETDATA_INTERNAL_CHECKS
-#define dictionary_create(options) dictionary_create_advanced_with_trace(options, NULL, __FUNCTION__, __LINE__, __FILE__)
-#define dictionary_create_advanced(options, stats) dictionary_create_advanced_with_trace(options, stats, __FUNCTION__, __LINE__, __FILE__)
-DICTIONARY *dictionary_create_advanced_with_trace(DICT_OPTIONS options, struct dictionary_stats *stats, const char *function, size_t line, const char *file);
+#define dictionary_create(options) dictionary_create_advanced_with_trace(options, NULL, 0, __FUNCTION__, __LINE__, __FILE__)
+#define dictionary_create_advanced(options, stats, fixed_size) dictionary_create_advanced_with_trace(options, stats, fixed_size, __FUNCTION__, __LINE__, __FILE__)
+DICTIONARY *dictionary_create_advanced_with_trace(DICT_OPTIONS options, struct dictionary_stats *stats, size_t fixed_size, const char *function, size_t line, const char *file);
#else
-#define dictionary_create(options) dictionary_create_advanced(options, NULL);
-DICTIONARY *dictionary_create_advanced(DICT_OPTIONS options, struct dictionary_stats *stats);
+#define dictionary_create(options) dictionary_create_advanced(options, NULL, 0);
+DICTIONARY *dictionary_create_advanced(DICT_OPTIONS options, struct dictionary_stats *stats, size_t fixed_size);
#endif
// Create a view on a dictionary
diff --git a/libnetdata/ebpf/README.md b/libnetdata/ebpf/README.md
index 534867f31..c2dabe102 100644
--- a/libnetdata/ebpf/README.md
+++ b/libnetdata/ebpf/README.md
@@ -1,5 +1,13 @@
+# eBPF library
+
+Netdata's eBPF library supports the [eBPF collector](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/README.md).
diff --git a/libnetdata/ebpf/ebpf.c b/libnetdata/ebpf/ebpf.c
index 382485e5f..7cad59785 100644
--- a/libnetdata/ebpf/ebpf.c
+++ b/libnetdata/ebpf/ebpf.c
@@ -809,7 +809,7 @@ static void ebpf_select_mode_string(char *output, size_t len, netdata_run_mode_t
*
* Convert the string given as argument to value present in enum.
*
- * @param str value read from configuraion file.
+ * @param str value read from configuration file.
*
* @return It returns the value to be used.
*/
@@ -901,7 +901,7 @@ netdata_ebpf_program_loaded_t ebpf_convert_core_type(char *str, netdata_run_mode
/**
* Adjust Thread Load
*
- * Adjust thread configuraton according specified load.
+ * Adjust thread configuration according specified load.
*
* @param mod the main structure that will be adjusted.
* @param file the btf file used with thread.
@@ -1060,7 +1060,7 @@ static netdata_ebpf_load_mode_t ebpf_select_load_mode(struct btf *btf_file, netd
* Update configuration for a specific thread.
*
* @param modules structure that will be updated
- * @oaram origin specify the configuration file loaded
+ * @param origin specify the configuration file loaded
* @param btf_file a pointer to the loaded btf file.
* @param is_rhf is Red Hat family?
*/
@@ -1124,7 +1124,7 @@ void ebpf_update_module(ebpf_module_t *em, struct btf *btf_file, int kver, int i
error("Cannot load the ebpf configuration file %s", em->config_file);
return;
}
- // If user defined data globaly, we will have here EBPF_LOADED_FROM_USER, we need to consider this, to avoid
+ // If user defined data globally, we will have here EBPF_LOADED_FROM_USER, we need to consider this, to avoid
// forcing users to configure thread by thread.
origin = (!(em->load & NETDATA_EBPF_LOAD_SOURCE)) ? EBPF_LOADED_FROM_STOCK : em->load & NETDATA_EBPF_LOAD_SOURCE;
} else
@@ -1139,7 +1139,7 @@ void ebpf_update_module(ebpf_module_t *em, struct btf *btf_file, int kver, int i
* Apps and cgroup has internal cleanup that needs attaching tracers to release_task, to avoid overload the function
* we will enable this integration by default, if and only if, we are running with trampolines.
*
- * @param em a poiter to the main thread structure.
+ * @param em a pointer to the main thread structure.
* @param mode is the mode used with different
*/
void ebpf_adjust_apps_cgroup(ebpf_module_t *em, netdata_ebpf_program_loaded_t mode)
@@ -1160,7 +1160,8 @@ void ebpf_adjust_apps_cgroup(ebpf_module_t *em, netdata_ebpf_program_loaded_t mo
* Helper used to get address from /proc/kallsym
*
* @param fa address structure
- * @param fd file descriptor loaded inside kernel.
+ * @param fd file descriptor loaded inside kernel. If a negative value is given
+ * the function will load address and it won't update hash table.
*/
void ebpf_load_addresses(ebpf_addresses_t *fa, int fd)
{
@@ -1182,11 +1183,15 @@ void ebpf_load_addresses(ebpf_addresses_t *fa, int fd)
char *fcnt = procfile_lineword(ff, l, 2);
uint32_t hash = simple_hash(fcnt);
if (fa->hash == hash && !strcmp(fcnt, fa->function)) {
- char addr[128];
- snprintf(addr, 127, "0x%s", procfile_lineword(ff, l, 0));
- fa->addr = (unsigned long) strtoul(addr, NULL, 16);
- uint32_t key = 0;
- bpf_map_update_elem(fd, &key, &fa->addr, BPF_ANY);
+ if (fd > 0) {
+ char addr[128];
+ snprintf(addr, 127, "0x%s", procfile_lineword(ff, l, 0));
+ fa->addr = (unsigned long) strtoul(addr, NULL, 16);
+ uint32_t key = 0;
+ bpf_map_update_elem(fd, &key, &fa->addr, BPF_ANY);
+ } else
+ fa->addr = 1;
+ break;
}
}
diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h
index 5cff5134f..cf3fa7ccd 100644
--- a/libnetdata/ebpf/ebpf.h
+++ b/libnetdata/ebpf/ebpf.h
@@ -206,7 +206,7 @@ typedef struct ebpf_specify_name {
typedef enum netdata_ebpf_load_mode {
EBPF_LOAD_LEGACY = 1<<0, // Select legacy mode, this means we will load binaries
- EBPF_LOAD_CORE = 1<<1, // When CO-RE is used, it is necessary to use the souce code
+ EBPF_LOAD_CORE = 1<<1, // When CO-RE is used, it is necessary to use the source code
EBPF_LOAD_PLAY_DICE = 1<<2, // Take a look on environment and choose the best option
EBPF_LOADED_FROM_STOCK = 1<<3, // Configuration loaded from Stock file
EBPF_LOADED_FROM_USER = 1<<4 // Configuration loaded from user
diff --git a/libnetdata/eval/eval.c b/libnetdata/eval/eval.c
index 0e429a08c..c7570bd2f 100644
--- a/libnetdata/eval/eval.c
+++ b/libnetdata/eval/eval.c
@@ -1126,7 +1126,7 @@ EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, in
return NULL;
}
- BUFFER *out = buffer_create(1024);
+ BUFFER *out = buffer_create(1024, NULL);
print_parsed_as_node(out, op, &err);
if(err != EVAL_ERROR_OK) {
error("failed to re-generate expression '%s' with reason: %s", string, expression_strerror(err));
@@ -1141,7 +1141,7 @@ EVAL_EXPRESSION *expression_parse(const char *string, const char **failed_at, in
exp->parsed_as = strdupz(buffer_tostring(out));
buffer_free(out);
- exp->error_msg = buffer_create(100);
+ exp->error_msg = buffer_create(100, NULL);
exp->nodes = (void *)op;
return exp;
diff --git a/libnetdata/json/README.md b/libnetdata/json/README.md
index 2e04b8b6b..e772f114d 100644
--- a/libnetdata/json/README.md
+++ b/libnetdata/json/README.md
@@ -1,6 +1,10 @@
# json
diff --git a/libnetdata/json/json.c b/libnetdata/json/json.c
index d5f62edaf..532b677ce 100644
--- a/libnetdata/json/json.c
+++ b/libnetdata/json/json.c
@@ -90,7 +90,7 @@ jsmntok_t *json_tokenise(char *js, size_t len, size_t *count)
*/
int json_callback_print(JSON_ENTRY *e)
{
- BUFFER *wb=buffer_create(300);
+ BUFFER *wb=buffer_create(300, NULL);
buffer_sprintf(wb,"%s = ", e->name);
char txt[50];
diff --git a/libnetdata/july/Makefile.am b/libnetdata/july/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/libnetdata/july/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/july/README.md b/libnetdata/july/README.md
new file mode 100644
index 000000000..df2a3d38c
--- /dev/null
+++ b/libnetdata/july/README.md
@@ -0,0 +1,14 @@
+
+
+
+# July
+
+An interface similar to `Judy` that uses minimal allocations (that can be cached)
+for items that are mainly appended (just a few insertions in the middle)
+
diff --git a/libnetdata/july/july.c b/libnetdata/july/july.c
new file mode 100644
index 000000000..0ad5f13e5
--- /dev/null
+++ b/libnetdata/july/july.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "july.h"
+
+#define JULYL_MIN_ENTRIES 10
+
+struct JulyL_item {
+ Word_t index;
+ void *value;
+};
+
+struct JulyL {
+ size_t entries;
+ size_t used;
+
+ // statistics
+ size_t bytes;
+ size_t bytes_moved;
+ size_t reallocs;
+
+ struct {
+ struct JulyL *prev;
+ struct JulyL *next;
+ } cache;
+
+ struct JulyL_item array[];
+};
+
+// ----------------------------------------------------------------------------
+// JulyL cache
+
+static struct {
+ struct {
+ SPINLOCK spinlock;
+ struct JulyL *available_items;
+ size_t available;
+ } protected;
+
+ struct {
+ size_t bytes;
+ size_t allocated;
+ size_t bytes_moved;
+ size_t reallocs;
+ } atomics;
+} julyl_globals = {
+ .protected = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .available_items = NULL,
+ .available = 0,
+ },
+ .atomics = {
+ .bytes = 0,
+ .allocated = 0,
+ .bytes_moved = 0,
+ .reallocs = 0,
+ },
+};
+
+void julyl_cleanup1(void) {
+ struct JulyL *item = NULL;
+
+ if(!netdata_spinlock_trylock(&julyl_globals.protected.spinlock))
+ return;
+
+ if(julyl_globals.protected.available_items && julyl_globals.protected.available > 10) {
+ item = julyl_globals.protected.available_items;
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(julyl_globals.protected.available_items, item, cache.prev, cache.next);
+ julyl_globals.protected.available--;
+ }
+
+ netdata_spinlock_unlock(&julyl_globals.protected.spinlock);
+
+ if(item) {
+ size_t bytes = item->bytes;
+ freez(item);
+ __atomic_sub_fetch(&julyl_globals.atomics.bytes, bytes, __ATOMIC_RELAXED);
+ __atomic_sub_fetch(&julyl_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
+ }
+}
+
+struct JulyL *julyl_get(void) {
+ struct JulyL *j;
+
+ netdata_spinlock_lock(&julyl_globals.protected.spinlock);
+
+ j = julyl_globals.protected.available_items;
+ if(likely(j)) {
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(julyl_globals.protected.available_items, j, cache.prev, cache.next);
+ julyl_globals.protected.available--;
+ }
+
+ netdata_spinlock_unlock(&julyl_globals.protected.spinlock);
+
+ if(unlikely(!j)) {
+ size_t bytes = sizeof(struct JulyL) + JULYL_MIN_ENTRIES * sizeof(struct JulyL_item);
+ j = mallocz(bytes);
+ j->bytes = bytes;
+ j->entries = JULYL_MIN_ENTRIES;
+ __atomic_add_fetch(&julyl_globals.atomics.bytes, bytes, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&julyl_globals.atomics.allocated, 1, __ATOMIC_RELAXED);
+ }
+
+ j->used = 0;
+ j->bytes_moved = 0;
+ j->reallocs = 0;
+ j->cache.next = j->cache.prev = NULL;
+ return j;
+}
+
+static void julyl_release(struct JulyL *j) {
+ if(unlikely(!j)) return;
+
+ __atomic_add_fetch(&julyl_globals.atomics.bytes_moved, j->bytes_moved, __ATOMIC_RELAXED);
+ __atomic_add_fetch(&julyl_globals.atomics.reallocs, j->reallocs, __ATOMIC_RELAXED);
+
+ netdata_spinlock_lock(&julyl_globals.protected.spinlock);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(julyl_globals.protected.available_items, j, cache.prev, cache.next);
+ julyl_globals.protected.available++;
+ netdata_spinlock_unlock(&julyl_globals.protected.spinlock);
+}
+
+size_t julyl_cache_size(void) {
+ return __atomic_load_n(&julyl_globals.atomics.bytes, __ATOMIC_RELAXED);
+}
+
+size_t julyl_bytes_moved(void) {
+ return __atomic_load_n(&julyl_globals.atomics.bytes_moved, __ATOMIC_RELAXED);
+}
+
+// ----------------------------------------------------------------------------
+// JulyL
+
+size_t JulyLGet_binary_search_position_of_index(const struct JulyL *July, Word_t Index) {
+ // return the position of the first item >= Index
+
+ size_t left = 0;
+ size_t right = July->used;
+ while(left < right) {
+ size_t middle = (left + right) >> 1;
+
+ if(July->array[middle].index > Index)
+ right = middle;
+
+ else
+ left = middle + 1;
+ }
+
+ internal_fatal(left > July->used, "JULY: invalid position returned");
+
+ if(left > 0 && July->array[left - 1].index == Index)
+ return left - 1;
+
+ internal_fatal( (left < July->used && July->array[left].index < Index) ||
+ (left > 0 && July->array[left - 1].index >= Index)
+ , "JULY: wrong item returned");
+
+ return left;
+}
+
+PPvoid_t JulyLGet(Pcvoid_t PArray, Word_t Index, PJError_t PJError __maybe_unused) {
+ const struct JulyL *July = PArray;
+ if(!July)
+ return NULL;
+
+ size_t pos = JulyLGet_binary_search_position_of_index(July, Index);
+
+ if(unlikely(pos >= July->used || July->array[pos].index != Index))
+ return NULL;
+
+ return (PPvoid_t)&July->array[pos].value;
+}
+
+PPvoid_t JulyLIns(PPvoid_t PPArray, Word_t Index, PJError_t PJError __maybe_unused) {
+ struct JulyL *July = *PPArray;
+ if(unlikely(!July)) {
+ July = julyl_get();
+ July->used = 0;
+ *PPArray = July;
+ }
+
+ size_t pos = JulyLGet_binary_search_position_of_index(July, Index);
+
+ if((pos == July->used || July->array[pos].index != Index)) {
+ // we have to add this entry
+
+ if (unlikely(July->used == July->entries)) {
+ // we have to expand the array
+ size_t bytes = sizeof(struct JulyL) + July->entries * 2 * sizeof(struct JulyL_item);
+ __atomic_add_fetch(&julyl_globals.atomics.bytes, bytes - July->bytes, __ATOMIC_RELAXED);
+ July = reallocz(July, bytes);
+ July->bytes = bytes;
+ July->entries *= 2;
+ July->reallocs++;
+ *PPArray = July;
+ }
+
+ if (unlikely(pos != July->used)) {
+ // we have to shift some members to make room
+ size_t size = (July->used - pos) * sizeof(struct JulyL_item);
+ memmove(&July->array[pos + 1], &July->array[pos], size);
+ July->bytes_moved += size;
+ }
+
+ July->used++;
+ July->array[pos].value = NULL;
+ July->array[pos].index = Index;
+ }
+
+ return &July->array[pos].value;
+}
+
+PPvoid_t JulyLFirst(Pcvoid_t PArray, Word_t *Index, PJError_t PJError __maybe_unused) {
+ const struct JulyL *July = PArray;
+ if(!July)
+ return NULL;
+
+ size_t pos = JulyLGet_binary_search_position_of_index(July, *Index);
+ // pos is >= Index
+
+ if(unlikely(pos == July->used))
+ return NULL;
+
+ *Index = July->array[pos].index;
+ return (PPvoid_t)&July->array[pos].value;
+}
+
+PPvoid_t JulyLNext(Pcvoid_t PArray, Word_t *Index, PJError_t PJError __maybe_unused) {
+ const struct JulyL *July = PArray;
+ if(!July)
+ return NULL;
+
+ size_t pos = JulyLGet_binary_search_position_of_index(July, *Index);
+ // pos is >= Index
+
+ if(unlikely(pos == July->used))
+ return NULL;
+
+ if(July->array[pos].index == *Index) {
+ pos++;
+
+ if(unlikely(pos == July->used))
+ return NULL;
+ }
+
+ *Index = July->array[pos].index;
+ return (PPvoid_t)&July->array[pos].value;
+}
+
+PPvoid_t JulyLLast(Pcvoid_t PArray, Word_t *Index, PJError_t PJError __maybe_unused) {
+ const struct JulyL *July = PArray;
+ if(!July)
+ return NULL;
+
+ size_t pos = JulyLGet_binary_search_position_of_index(July, *Index);
+ // pos is >= Index
+
+ if(pos > 0 && (pos == July->used || July->array[pos].index > *Index))
+ pos--;
+
+ if(unlikely(pos == 0 && July->array[0].index > *Index))
+ return NULL;
+
+ *Index = July->array[pos].index;
+ return (PPvoid_t)&July->array[pos].value;
+}
+
+PPvoid_t JulyLPrev(Pcvoid_t PArray, Word_t *Index, PJError_t PJError __maybe_unused) {
+ const struct JulyL *July = PArray;
+ if(!July)
+ return NULL;
+
+ size_t pos = JulyLGet_binary_search_position_of_index(July, *Index);
+ // pos is >= Index
+
+ if(unlikely(pos == 0 || July->used == 0))
+ return NULL;
+
+ // get the previous one
+ pos--;
+
+ *Index = July->array[pos].index;
+ return (PPvoid_t)&July->array[pos].value;
+}
+
+Word_t JulyLFreeArray(PPvoid_t PPArray, PJError_t PJError __maybe_unused) {
+ struct JulyL *July = *PPArray;
+ if(unlikely(!July))
+ return 0;
+
+ size_t bytes = July->bytes;
+ julyl_release(July);
+ *PPArray = NULL;
+ return bytes;
+}
+
+// ----------------------------------------------------------------------------
+// unittest
+
+#define item_index(i) (((i) * 2) + 100)
+
+int julytest(void) {
+ Word_t entries = 10000;
+ Pvoid_t array = NULL;
+
+ // test additions
+ for(Word_t i = 0; i < entries ;i++) {
+ Pvoid_t *PValue = JulyLIns(&array, item_index(i), PJE0);
+ if(!PValue)
+ fatal("JULY: cannot insert item %lu", item_index(i));
+
+ *PValue = (void *)(item_index(i));
+ }
+
+ // test successful finds
+ for(Word_t i = 0; i < entries ;i++) {
+ Pvoid_t *PValue = JulyLGet(array, item_index(i), PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find item %lu", item_index(i));
+
+ if(*PValue != (void *)(item_index(i)))
+ fatal("JULY: item %lu has the value %lu", item_index(i), (unsigned long)(*PValue));
+ }
+
+ // test finding the first item
+ for(Word_t i = 0; i < entries ;i++) {
+ Word_t index = item_index(i);
+ Pvoid_t *PValue = JulyLFirst(array, &index, PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find first item %lu", item_index(i));
+
+ if(*PValue != (void *)(item_index(i)))
+ fatal("JULY: item %lu has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i))
+ fatal("JULY: item %lu has index %lu", item_index(i), index);
+ }
+
+ // test finding the next item
+ for(Word_t i = 0; i < entries - 1 ;i++) {
+ Word_t index = item_index(i);
+ Pvoid_t *PValue = JulyLNext(array, &index, PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find next item %lu", item_index(i));
+
+ if(*PValue != (void *)(item_index(i + 1)))
+ fatal("JULY: item %lu next has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i + 1))
+ fatal("JULY: item %lu next has index %lu", item_index(i), index);
+ }
+
+ // test finding the last item
+ for(Word_t i = 0; i < entries ;i++) {
+ Word_t index = item_index(i);
+ Pvoid_t *PValue = JulyLLast(array, &index, PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find last item %lu", item_index(i));
+
+ if(*PValue != (void *)(item_index(i)))
+ fatal("JULY: item %lu has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i))
+ fatal("JULY: item %lu has index %lu", item_index(i), index);
+ }
+
+ // test finding the prev item
+ for(Word_t i = 1; i < entries ;i++) {
+ Word_t index = item_index(i);
+ Pvoid_t *PValue = JulyLPrev(array, &index, PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find prev item %lu", item_index(i));
+
+ if(*PValue != (void *)(item_index(i - 1)))
+ fatal("JULY: item %lu prev has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i - 1))
+ fatal("JULY: item %lu prev has index %lu", item_index(i), index);
+ }
+
+ // test full traversal forward
+ {
+ Word_t i = 0;
+ Word_t index = 0;
+ bool first = true;
+ Pvoid_t *PValue;
+ while((PValue = JulyLFirstThenNext(array, &index, &first))) {
+ if(*PValue != (void *)(item_index(i)))
+ fatal("JULY: item %lu traversal has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i))
+ fatal("JULY: item %lu traversal has index %lu", item_index(i), index);
+
+ i++;
+ }
+
+ if(i != entries)
+ fatal("JULY: expected to forward traverse %lu entries, but traversed %lu", entries, i);
+ }
+
+ // test full traversal backward
+ {
+ Word_t i = 0;
+ Word_t index = (Word_t)(-1);
+ bool first = true;
+ Pvoid_t *PValue;
+ while((PValue = JulyLLastThenPrev(array, &index, &first))) {
+ if(*PValue != (void *)(item_index(entries - i - 1)))
+ fatal("JULY: item %lu traversal has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(entries - i - 1))
+ fatal("JULY: item %lu traversal has index %lu", item_index(i), index);
+
+ i++;
+ }
+
+ if(i != entries)
+ fatal("JULY: expected to back traverse %lu entries, but traversed %lu", entries, i);
+ }
+
+ // test finding non-existing first item
+ for(Word_t i = 0; i < entries ;i++) {
+ Word_t index = item_index(i) - 1;
+ Pvoid_t *PValue = JulyLFirst(array, &index, PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find first item %lu", item_index(i) - 1);
+
+ if(*PValue != (void *)(item_index(i)))
+ fatal("JULY: item %lu has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i))
+ fatal("JULY: item %lu has index %lu", item_index(i), index);
+ }
+
+ // test finding non-existing last item
+ for(Word_t i = 0; i < entries ;i++) {
+ Word_t index = item_index(i) + 1;
+ Pvoid_t *PValue = JulyLLast(array, &index, PJE0);
+ if(!PValue)
+ fatal("JULY: cannot find last item %lu", item_index(i) + 1);
+
+ if(*PValue != (void *)(item_index(i)))
+ fatal("JULY: item %lu has the value %lu", item_index(i), (unsigned long)(*PValue));
+
+ if(index != item_index(i))
+ fatal("JULY: item %lu has index %lu", item_index(i), index);
+ }
+
+ JulyLFreeArray(&array, PJE0);
+
+ return 0;
+}
+
+
diff --git a/libnetdata/july/july.h b/libnetdata/july/july.h
new file mode 100644
index 000000000..672ed44e4
--- /dev/null
+++ b/libnetdata/july/july.h
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_JULY_H
+#define NETDATA_JULY_H 1
+
+#include "../libnetdata.h"
+
+// #define PDC_USE_JULYL 1
+
+PPvoid_t JulyLGet(Pcvoid_t PArray, Word_t Index, PJError_t PJError);
+PPvoid_t JulyLIns(PPvoid_t PPArray, Word_t Index, PJError_t PJError);
+PPvoid_t JulyLFirst(Pcvoid_t PArray, Word_t *Index, PJError_t PJError);
+PPvoid_t JulyLNext(Pcvoid_t PArray, Word_t *Index, PJError_t PJError);
+PPvoid_t JulyLLast(Pcvoid_t PArray, Word_t *Index, PJError_t PJError);
+PPvoid_t JulyLPrev(Pcvoid_t PArray, Word_t *Index, PJError_t PJError);
+Word_t JulyLFreeArray(PPvoid_t PPArray, PJError_t PJError);
+
+static inline PPvoid_t JulyLFirstThenNext(Pcvoid_t PArray, Word_t * PIndex, bool *first) {
+ if(unlikely(*first)) {
+ *first = false;
+ return JulyLFirst(PArray, PIndex, PJE0);
+ }
+
+ return JulyLNext(PArray, PIndex, PJE0);
+}
+
+static inline PPvoid_t JulyLLastThenPrev(Pcvoid_t PArray, Word_t * PIndex, bool *first) {
+ if(unlikely(*first)) {
+ *first = false;
+ return JulyLLast(PArray, PIndex, PJE0);
+ }
+
+ return JulyLPrev(PArray, PIndex, PJE0);
+}
+
+void julyl_cleanup1(void);
+size_t julyl_cache_size(void);
+size_t julyl_bytes_moved(void);
+
+#endif // NETDATA_JULY_H
diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c
index cc04a97eb..f6b6b026a 100644
--- a/libnetdata/libnetdata.c
+++ b/libnetdata/libnetdata.c
@@ -21,6 +21,81 @@ int enable_ksm = 0;
volatile sig_atomic_t netdata_exit = 0;
const char *program_version = VERSION;
+#define MAX_JUDY_SIZE_TO_ARAL 24
+static bool judy_sizes_config[MAX_JUDY_SIZE_TO_ARAL + 1] = {
+ [3] = true,
+ [4] = true,
+ [5] = true,
+ [6] = true,
+ [7] = true,
+ [8] = true,
+ [10] = true,
+ [11] = true,
+ [15] = true,
+ [23] = true,
+};
+static ARAL *judy_sizes_aral[MAX_JUDY_SIZE_TO_ARAL + 1] = {};
+
+struct aral_statistics judy_sizes_aral_statistics = {};
+
+void aral_judy_init(void) {
+ for(size_t Words = 0; Words <= MAX_JUDY_SIZE_TO_ARAL; Words++)
+ if(judy_sizes_config[Words]) {
+ char buf[30+1];
+ snprintfz(buf, 30, "judy-%zu", Words * sizeof(Word_t));
+ judy_sizes_aral[Words] = aral_create(
+ buf,
+ Words * sizeof(Word_t),
+ 0,
+ 65536,
+ &judy_sizes_aral_statistics,
+ NULL, NULL, false, false);
+ }
+}
+
+size_t judy_aral_overhead(void) {
+ return aral_overhead_from_stats(&judy_sizes_aral_statistics);
+}
+
+size_t judy_aral_structures(void) {
+ return aral_structures_from_stats(&judy_sizes_aral_statistics);
+}
+
+static ARAL *judy_size_aral(Word_t Words) {
+ if(Words <= MAX_JUDY_SIZE_TO_ARAL && judy_sizes_aral[Words])
+ return judy_sizes_aral[Words];
+
+ return NULL;
+}
+
+inline Word_t JudyMalloc(Word_t Words) {
+ Word_t Addr;
+
+ ARAL *ar = judy_size_aral(Words);
+ if(ar)
+ Addr = (Word_t) aral_mallocz(ar);
+ else
+ Addr = (Word_t) mallocz(Words * sizeof(Word_t));
+
+ return(Addr);
+}
+
+inline void JudyFree(void * PWord, Word_t Words) {
+ ARAL *ar = judy_size_aral(Words);
+ if(ar)
+ aral_freez(ar, PWord);
+ else
+ freez(PWord);
+}
+
+Word_t JudyMallocVirtual(Word_t Words) {
+ return JudyMalloc(Words);
+}
+
+void JudyFreeVirtual(void * PWord, Word_t Words) {
+ JudyFree(PWord, Words);
+}
+
// ----------------------------------------------------------------------------
// memory allocation functions that handle failures
@@ -150,27 +225,6 @@ void posix_memfree(void *ptr) {
libc_free(ptr);
}
-Word_t JudyMalloc(Word_t Words) {
- Word_t Addr;
-
- Addr = (Word_t) mallocz(Words * sizeof(Word_t));
- return(Addr);
-}
-void JudyFree(void * PWord, Word_t Words) {
- (void)Words;
- freez(PWord);
-}
-Word_t JudyMallocVirtual(Word_t Words) {
- Word_t Addr;
-
- Addr = (Word_t) mallocz(Words * sizeof(Word_t));
- return(Addr);
-}
-void JudyFreeVirtual(void * PWord, Word_t Words) {
- (void)Words;
- freez(PWord);
-}
-
#define MALLOC_ALIGNMENT (sizeof(uintptr_t) * 2)
#define size_t_atomic_count(op, var, size) __atomic_## op ##_fetch(&(var), size, __ATOMIC_RELAXED)
#define size_t_atomic_bytes(op, var, size) __atomic_## op ##_fetch(&(var), ((size) % MALLOC_ALIGNMENT)?((size) + MALLOC_ALIGNMENT - ((size) % MALLOC_ALIGNMENT)):(size), __ATOMIC_RELAXED)
@@ -1176,7 +1230,7 @@ static int memory_file_open(const char *filename, size_t size) {
return fd;
}
-static inline int madvise_sequential(void *mem, size_t len) {
+inline int madvise_sequential(void *mem, size_t len) {
static int logger = 1;
int ret = madvise(mem, len, MADV_SEQUENTIAL);
@@ -1184,7 +1238,15 @@ static inline int madvise_sequential(void *mem, size_t len) {
return ret;
}
-static inline int madvise_dontfork(void *mem, size_t len) {
+inline int madvise_random(void *mem, size_t len) {
+ static int logger = 1;
+ int ret = madvise(mem, len, MADV_RANDOM);
+
+ if (ret != 0 && logger-- > 0) error("madvise(MADV_RANDOM) failed.");
+ return ret;
+}
+
+inline int madvise_dontfork(void *mem, size_t len) {
static int logger = 1;
int ret = madvise(mem, len, MADV_DONTFORK);
@@ -1192,7 +1254,7 @@ static inline int madvise_dontfork(void *mem, size_t len) {
return ret;
}
-static inline int madvise_willneed(void *mem, size_t len) {
+inline int madvise_willneed(void *mem, size_t len) {
static int logger = 1;
int ret = madvise(mem, len, MADV_WILLNEED);
@@ -1200,24 +1262,27 @@ static inline int madvise_willneed(void *mem, size_t len) {
return ret;
}
+inline int madvise_dontneed(void *mem, size_t len) {
+ static int logger = 1;
+ int ret = madvise(mem, len, MADV_DONTNEED);
+
+ if (ret != 0 && logger-- > 0) error("madvise(MADV_DONTNEED) failed.");
+ return ret;
+}
+
+inline int madvise_dontdump(void *mem __maybe_unused, size_t len __maybe_unused) {
#if __linux__
-static inline int madvise_dontdump(void *mem, size_t len) {
static int logger = 1;
int ret = madvise(mem, len, MADV_DONTDUMP);
if (ret != 0 && logger-- > 0) error("madvise(MADV_DONTDUMP) failed.");
return ret;
-}
#else
-static inline int madvise_dontdump(void *mem, size_t len) {
- UNUSED(mem);
- UNUSED(len);
-
return 0;
-}
#endif
+}
-static inline int madvise_mergeable(void *mem, size_t len) {
+inline int madvise_mergeable(void *mem __maybe_unused, size_t len __maybe_unused) {
#ifdef MADV_MERGEABLE
static int logger = 1;
int ret = madvise(mem, len, MADV_MERGEABLE);
@@ -1225,14 +1290,12 @@ static inline int madvise_mergeable(void *mem, size_t len) {
if (ret != 0 && logger-- > 0) error("madvise(MADV_MERGEABLE) failed.");
return ret;
#else
- UNUSED(mem);
- UNUSED(len);
-
return 0;
#endif
}
-void *netdata_mmap(const char *filename, size_t size, int flags, int ksm) {
+void *netdata_mmap(const char *filename, size_t size, int flags, int ksm, bool read_only, int *open_fd)
+{
// info("netdata_mmap('%s', %zu", filename, size);
// MAP_SHARED is used in memory mode map
@@ -1271,7 +1334,7 @@ void *netdata_mmap(const char *filename, size_t size, int flags, int ksm) {
fd_for_mmap = -1;
}
- mem = mmap(NULL, size, PROT_READ | PROT_WRITE, flags, fd_for_mmap, 0);
+ mem = mmap(NULL, size, read_only ? PROT_READ : PROT_READ | PROT_WRITE, flags, fd_for_mmap, 0);
if (mem != MAP_FAILED) {
#ifdef NETDATA_TRACE_ALLOCATIONS
@@ -1288,15 +1351,20 @@ void *netdata_mmap(const char *filename, size_t size, int flags, int ksm) {
else info("Cannot seek to beginning of file '%s'.", filename);
}
- madvise_sequential(mem, size);
+ // madvise_sequential(mem, size);
madvise_dontfork(mem, size);
madvise_dontdump(mem, size);
- if(flags & MAP_SHARED) madvise_willneed(mem, size);
+ // if(flags & MAP_SHARED) madvise_willneed(mem, size);
if(ksm) madvise_mergeable(mem, size);
}
cleanup:
- if(fd != -1) close(fd);
+ if(fd != -1) {
+ if (open_fd)
+ *open_fd = fd;
+ else
+ close(fd);
+ }
if(mem == MAP_FAILED) return NULL;
errno = 0;
return mem;
@@ -1934,3 +2002,70 @@ bool run_command_and_copy_output_to_stdout(const char *command, int max_line_len
netdata_pclose(NULL, fp, pid);
return true;
}
+
+void for_each_open_fd(OPEN_FD_ACTION action, OPEN_FD_EXCLUDE excluded_fds){
+ int fd;
+
+ switch(action){
+ case OPEN_FD_ACTION_CLOSE:
+ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDIN)) (void)close(STDIN_FILENO);
+ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDOUT)) (void)close(STDOUT_FILENO);
+ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDERR)) (void)close(STDERR_FILENO);
+ break;
+ case OPEN_FD_ACTION_FD_CLOEXEC:
+ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDIN)) (void)fcntl(STDIN_FILENO, F_SETFD, FD_CLOEXEC);
+ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDOUT)) (void)fcntl(STDOUT_FILENO, F_SETFD, FD_CLOEXEC);
+ if(!(excluded_fds & OPEN_FD_EXCLUDE_STDERR)) (void)fcntl(STDERR_FILENO, F_SETFD, FD_CLOEXEC);
+ break;
+ default:
+ break; // do nothing
+ }
+
+#if defined(HAVE_CLOSE_RANGE)
+ if(close_range(STDERR_FILENO + 1, ~0U, (action == OPEN_FD_ACTION_FD_CLOEXEC ? CLOSE_RANGE_CLOEXEC : 0)) == 0) return;
+ error("close_range() failed, will try to close fds manually");
+#endif
+
+ DIR *dir = opendir("/proc/self/fd");
+ if (dir == NULL) {
+ struct rlimit rl;
+ int open_max = -1;
+
+ if(getrlimit(RLIMIT_NOFILE, &rl) == 0 && rl.rlim_max != RLIM_INFINITY) open_max = rl.rlim_max;
+#ifdef _SC_OPEN_MAX
+ else open_max = sysconf(_SC_OPEN_MAX);
+#endif
+
+ if (open_max == -1) open_max = 65535; // 65535 arbitrary default if everything else fails
+
+ for (fd = STDERR_FILENO + 1; fd < open_max; fd++) {
+ switch(action){
+ case OPEN_FD_ACTION_CLOSE:
+ if(fd_is_valid(fd)) (void)close(fd);
+ break;
+ case OPEN_FD_ACTION_FD_CLOEXEC:
+ (void)fcntl(fd, F_SETFD, FD_CLOEXEC);
+ break;
+ default:
+ break; // do nothing
+ }
+ }
+ } else {
+ struct dirent *entry;
+ while ((entry = readdir(dir)) != NULL) {
+ fd = str2i(entry->d_name);
+ if(unlikely((fd == STDIN_FILENO ) || (fd == STDOUT_FILENO) || (fd == STDERR_FILENO) )) continue;
+ switch(action){
+ case OPEN_FD_ACTION_CLOSE:
+ if(fd_is_valid(fd)) (void)close(fd);
+ break;
+ case OPEN_FD_ACTION_FD_CLOEXEC:
+ (void)fcntl(fd, F_SETFD, FD_CLOEXEC);
+ break;
+ default:
+ break; // do nothing
+ }
+ }
+ closedir(dir);
+ }
+}
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
index 58eaa9ded..c504bd4bd 100644
--- a/libnetdata/libnetdata.h
+++ b/libnetdata/libnetdata.h
@@ -11,10 +11,18 @@ extern "C" {
#include
#endif
+#define JUDYHS_INDEX_SIZE_ESTIMATE(key_bytes) (((key_bytes) + sizeof(Word_t) - 1) / sizeof(Word_t) * 4)
+
#if defined(NETDATA_DEV_MODE) && !defined(NETDATA_INTERNAL_CHECKS)
#define NETDATA_INTERNAL_CHECKS 1
#endif
+#if SIZEOF_VOID_P == 4
+#define ENV32BIT 1
+#else
+#define ENV64BIT 1
+#endif
+
// NETDATA_TRACE_ALLOCATIONS does not work under musl libc, so don't enable it
//#if defined(NETDATA_INTERNAL_CHECKS) && !defined(NETDATA_TRACE_ALLOCATIONS)
//#define NETDATA_TRACE_ALLOCATIONS 1
@@ -217,6 +225,10 @@ extern "C" {
#define WARNUNUSED
#endif
+void aral_judy_init(void);
+size_t judy_aral_overhead(void);
+size_t judy_aral_structures(void);
+
#define ABS(x) (((x) < 0)? (-(x)) : (x))
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
@@ -225,8 +237,9 @@ extern "C" {
// ---------------------------------------------------------------------------------------------
// double linked list management
+// inspired by https://github.com/troydhanson/uthash/blob/master/src/utlist.h
-#define DOUBLE_LINKED_LIST_PREPEND_UNSAFE(head, item, prev, next) \
+#define DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(head, item, prev, next) \
do { \
(item)->next = (head); \
\
@@ -240,7 +253,7 @@ extern "C" {
(head) = (item); \
} while (0)
-#define DOUBLE_LINKED_LIST_APPEND_UNSAFE(head, item, prev, next) \
+#define DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next) \
do { \
if(likely(head)) { \
(item)->prev = (head)->prev; \
@@ -256,39 +269,97 @@ extern "C" {
\
} while (0)
-#define DOUBLE_LINKED_LIST_REMOVE_UNSAFE(head, item, prev, next) \
+#define DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(head, item, prev, next) \
do { \
fatal_assert((head) != NULL); \
fatal_assert((item)->prev != NULL); \
\
- if((item)->prev == (item)) { \
+ if((item)->prev == (item)) \
/* it is the only item in the list */ \
(head) = NULL; \
- } \
+ \
else if((item) == (head)) { \
/* it is the first item */ \
+ fatal_assert((item)->next != NULL); \
(item)->next->prev = (item)->prev; \
(head) = (item)->next; \
} \
else { \
+ /* it is any other item */ \
(item)->prev->next = (item)->next; \
- if ((item)->next) { \
+ \
+ if ((item)->next) \
(item)->next->prev = (item)->prev; \
- } \
- else { \
+ else \
(head)->prev = (item)->prev; \
- } \
} \
\
(item)->next = NULL; \
(item)->prev = NULL; \
} while (0)
+#define DOUBLE_LINKED_LIST_INSERT_ITEM_BEFORE_UNSAFE(head, existing, item, prev, next) \
+ do { \
+ if (existing) { \
+ fatal_assert((head) != NULL); \
+ fatal_assert((item) != NULL); \
+ \
+ (item)->next = (existing); \
+ (item)->prev = (existing)->prev; \
+ (existing)->prev = (item); \
+ \
+ if ((head) == (existing)) \
+ (head) = (item); \
+ else \
+ (item)->prev->next = (item); \
+ \
+ } \
+ else \
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(head, item, prev, next); \
+ \
+ } while (0)
+
+#define DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(head, existing, item, prev, next) \
+ do { \
+ if (existing) { \
+ fatal_assert((head) != NULL); \
+ fatal_assert((item) != NULL); \
+ \
+ (item)->next = (existing)->next; \
+ (item)->prev = (existing); \
+ (existing)->next = (item); \
+ \
+ if ((item)->next) \
+ (item)->next->prev = (item); \
+ else \
+ (head)->prev = (item); \
+ } \
+ else \
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(head, item, prev, next); \
+ \
+ } while (0)
+
+#define DOUBLE_LINKED_LIST_APPEND_LIST_UNSAFE(head, head2, prev, next) \
+ do { \
+ if (head2) { \
+ if (head) { \
+ __typeof(head2) _head2_last_item = (head2)->prev; \
+ \
+ (head2)->prev = (head)->prev; \
+ (head)->prev->next = (head2); \
+ \
+ (head)->prev = _head2_last_item; \
+ } \
+ else \
+ (head) = (head2); \
+ } \
+ } while (0)
+
#define DOUBLE_LINKED_LIST_FOREACH_FORWARD(head, var, prev, next) \
for ((var) = (head); (var) ; (var) = (var)->next)
#define DOUBLE_LINKED_LIST_FOREACH_BACKWARD(head, var, prev, next) \
- for ((var) = (head)?(head)->prev:NULL; (var) && (var) != (head)->prev ; (var) = (var)->prev)
+ for ((var) = (head) ? (head)->prev : NULL ; (var) ; (var) = ((var) == (head)) ? NULL : (var)->prev)
// ---------------------------------------------------------------------------------------------
@@ -301,6 +372,14 @@ char *mystrsep(char **ptr, char *s);
char *trim(char *s); // remove leading and trailing spaces; may return NULL
char *trim_all(char *buffer); // like trim(), but also remove duplicate spaces inside the string; may return NULL
+int madvise_sequential(void *mem, size_t len);
+int madvise_random(void *mem, size_t len);
+int madvise_dontfork(void *mem, size_t len);
+int madvise_willneed(void *mem, size_t len);
+int madvise_dontneed(void *mem, size_t len);
+int madvise_dontdump(void *mem, size_t len);
+int madvise_mergeable(void *mem, size_t len);
+
int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args);
int snprintfz(char *dst, size_t n, const char *fmt, ...) PRINTFLIKE(3, 4);
@@ -335,7 +414,7 @@ void posix_memfree(void *ptr);
void json_escape_string(char *dst, const char *src, size_t size);
void json_fix_string(char *s);
-void *netdata_mmap(const char *filename, size_t size, int flags, int ksm);
+void *netdata_mmap(const char *filename, size_t size, int flags, int ksm, bool read_only, int *open_fd);
int netdata_munmap(void *ptr, size_t size);
int memory_file_save(const char *filename, void *mem, size_t size);
@@ -418,10 +497,22 @@ static inline char *get_word(char **words, size_t num_words, size_t index) {
bool run_command_and_copy_output_to_stdout(const char *command, int max_line_length);
+typedef enum {
+ OPEN_FD_ACTION_CLOSE,
+ OPEN_FD_ACTION_FD_CLOEXEC
+} OPEN_FD_ACTION;
+typedef enum {
+ OPEN_FD_EXCLUDE_STDIN = 0x01,
+ OPEN_FD_EXCLUDE_STDOUT = 0x02,
+ OPEN_FD_EXCLUDE_STDERR = 0x04
+} OPEN_FD_EXCLUDE;
+void for_each_open_fd(OPEN_FD_ACTION action, OPEN_FD_EXCLUDE excluded_fds);
+
void netdata_cleanup_and_exit(int ret) NORETURN;
void send_statistics(const char *action, const char *action_result, const char *action_data);
extern char *netdata_configured_host_prefix;
#include "libjudy/src/Judy.h"
+#include "july/july.h"
#include "os.h"
#include "storage_number/storage_number.h"
#include "threads/threads.h"
@@ -453,7 +544,7 @@ extern char *netdata_configured_host_prefix;
#include "json/json.h"
#include "health/health.h"
#include "string/utf8.h"
-#include "arrayalloc/arrayalloc.h"
+#include "libnetdata/aral/aral.h"
#include "onewayalloc/onewayalloc.h"
#include "worker_utilization/worker_utilization.h"
@@ -500,6 +591,76 @@ struct malloc_trace {
};
#endif // NETDATA_TRACE_ALLOCATIONS
+static inline PPvoid_t JudyLFirstThenNext(Pcvoid_t PArray, Word_t * PIndex, bool *first) {
+ if(unlikely(*first)) {
+ *first = false;
+ return JudyLFirst(PArray, PIndex, PJE0);
+ }
+
+ return JudyLNext(PArray, PIndex, PJE0);
+}
+
+static inline PPvoid_t JudyLLastThenPrev(Pcvoid_t PArray, Word_t * PIndex, bool *first) {
+ if(unlikely(*first)) {
+ *first = false;
+ return JudyLLast(PArray, PIndex, PJE0);
+ }
+
+ return JudyLPrev(PArray, PIndex, PJE0);
+}
+
+static inline size_t indexing_partition_old(Word_t ptr, Word_t modulo) {
+ size_t total = 0;
+
+ total += (ptr & 0xff) >> 0;
+ total += (ptr & 0xff00) >> 8;
+ total += (ptr & 0xff0000) >> 16;
+ total += (ptr & 0xff000000) >> 24;
+
+ if(sizeof(Word_t) > 4) {
+ total += (ptr & 0xff00000000) >> 32;
+ total += (ptr & 0xff0000000000) >> 40;
+ total += (ptr & 0xff000000000000) >> 48;
+ total += (ptr & 0xff00000000000000) >> 56;
+ }
+
+ return (total % modulo);
+}
+
+static uint32_t murmur32(uint32_t h) __attribute__((const));
+static inline uint32_t murmur32(uint32_t h) {
+ h ^= h >> 16;
+ h *= 0x85ebca6b;
+ h ^= h >> 13;
+ h *= 0xc2b2ae35;
+ h ^= h >> 16;
+
+ return h;
+}
+
+static uint64_t murmur64(uint64_t h) __attribute__((const));
+static inline uint64_t murmur64(uint64_t k) {
+ k ^= k >> 33;
+ k *= 0xff51afd7ed558ccdUL;
+ k ^= k >> 33;
+ k *= 0xc4ceb9fe1a85ec53UL;
+ k ^= k >> 33;
+
+ return k;
+}
+
+static inline size_t indexing_partition(Word_t ptr, Word_t modulo) __attribute__((const));
+static inline size_t indexing_partition(Word_t ptr, Word_t modulo) {
+ if(sizeof(Word_t) == 8) {
+ uint64_t hash = murmur64(ptr);
+ return hash % modulo;
+ }
+ else {
+ uint32_t hash = murmur32(ptr);
+ return hash % modulo;
+ }
+}
+
# ifdef __cplusplus
}
# endif
diff --git a/libnetdata/locks/README.md b/libnetdata/locks/README.md
index 9132edc43..8810e3d17 100644
--- a/libnetdata/locks/README.md
+++ b/libnetdata/locks/README.md
@@ -1,5 +1,10 @@
## How to trace netdata locks
diff --git a/libnetdata/locks/locks.c b/libnetdata/locks/locks.c
index f7191be52..e73456d70 100644
--- a/libnetdata/locks/locks.c
+++ b/libnetdata/locks/locks.c
@@ -15,8 +15,6 @@
#ifndef NETDATA_THREAD_LOCKS_ARRAY_SIZE
#define NETDATA_THREAD_LOCKS_ARRAY_SIZE 10
#endif
-static __thread netdata_rwlock_t *netdata_thread_locks[NETDATA_THREAD_LOCKS_ARRAY_SIZE];
-
#endif // NETDATA_TRACE_RWLOCKS
@@ -120,8 +118,6 @@ int __netdata_mutex_unlock(netdata_mutex_t *mutex) {
#ifdef NETDATA_TRACE_RWLOCKS
-#warning NETDATA_TRACE_RWLOCKS ENABLED - EXPECT A LOT OF OUTPUT
-
int netdata_mutex_init_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_mutex_t *mutex) {
debug(D_LOCKS, "MUTEX_LOCK: netdata_mutex_init(%p) from %lu@%s, %s()", mutex, line, file, function);
@@ -283,12 +279,16 @@ int __netdata_rwlock_trywrlock(netdata_rwlock_t *rwlock) {
// https://www.youtube.com/watch?v=rmGJc9PXpuE&t=41s
void netdata_spinlock_init(SPINLOCK *spinlock) {
- *spinlock = NETDATA_SPINLOCK_INITIALIZER;
+ memset(spinlock, 0, sizeof(SPINLOCK));
}
void netdata_spinlock_lock(SPINLOCK *spinlock) {
static const struct timespec ns = { .tv_sec = 0, .tv_nsec = 1 };
+#ifdef NETDATA_INTERNAL_CHECKS
+ size_t spins = 0;
+#endif
+
netdata_thread_disable_cancelability();
for(int i = 1;
@@ -296,254 +296,104 @@ void netdata_spinlock_lock(SPINLOCK *spinlock) {
__atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE)
; i++
) {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ spins++;
+#endif
if(unlikely(i == 8)) {
i = 0;
nanosleep(&ns, NULL);
}
}
+
// we have the lock
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ spinlock->spins += spins;
+ spinlock->locker_pid = gettid();
+#endif
}
void netdata_spinlock_unlock(SPINLOCK *spinlock) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ spinlock->locker_pid = 0;
+#endif
__atomic_clear(&spinlock->locked, __ATOMIC_RELEASE);
netdata_thread_enable_cancelability();
}
-#ifdef NETDATA_TRACE_RWLOCKS
+bool netdata_spinlock_trylock(SPINLOCK *spinlock) {
+ netdata_thread_disable_cancelability();
-// ----------------------------------------------------------------------------
-// lockers list
+ if(!__atomic_load_n(&spinlock->locked, __ATOMIC_RELAXED) &&
+ !__atomic_test_and_set(&spinlock->locked, __ATOMIC_ACQUIRE))
+ // we got the lock
+ return true;
-void not_supported_by_posix_rwlocks(const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock, char locktype, const char *reason) {
- __netdata_mutex_lock(&rwlock->lockers_mutex);
- fprintf(stderr,
- "RW_LOCK FATAL ON LOCK %p: %d '%s' (function %s() %lu@%s) attempts to acquire a '%c' lock, but it is not supported by POSIX because: %s. At this attempt, the task is holding %zu rwlocks and %zu mutexes. There are %zu readers and %zu writers holding this lock:\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- locktype,
- reason,
- netdata_locks_acquired_rwlocks, netdata_locks_acquired_mutexes,
- rwlock->readers, rwlock->writers);
-
- int i;
- usec_t now = now_monotonic_high_precision_usec();
- netdata_rwlock_locker *p;
- for(i = 1, p = rwlock->lockers; p ;p = p->next, i++) {
- fprintf(stderr,
- " => %i: RW_LOCK %p: process %d '%s' (function %s() %lu@%s) is having %zu '%c' lock for %llu usec.\n",
- i, rwlock,
- p->pid, p->tag,
- p->function, p->line, p->file,
- p->callers, p->lock,
- (now - p->start_s));
- }
- __netdata_mutex_unlock(&rwlock->lockers_mutex);
+ // we didn't get the lock
+ return false;
}
-static void log_rwlock_lockers(const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock, const char *reason, char locktype) {
-
- // this function can only be used by one thread at a time
- // because otherwise, the threads may deadlock waiting for each other
- static netdata_mutex_t log_lockers_mutex = NETDATA_MUTEX_INITIALIZER;
- __netdata_mutex_lock(&log_lockers_mutex);
-
- // now work on this locker
- __netdata_mutex_lock(&rwlock->lockers_mutex);
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d '%s' (function %s() %lu@%s) %s a '%c' lock (while holding %zu rwlocks and %zu mutexes). There are %zu readers and %zu writers holding this lock:\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- reason, locktype,
- netdata_locks_acquired_rwlocks, netdata_locks_acquired_mutexes,
- rwlock->readers, rwlock->writers);
-
- int i;
- usec_t now = now_monotonic_high_precision_usec();
- netdata_rwlock_locker *p;
- for(i = 1, p = rwlock->lockers; p ;p = p->next, i++) {
- fprintf(stderr,
- " => %i: RW_LOCK %p: process %d '%s' (function %s() %lu@%s) is having %zu '%c' lock for %llu usec.\n",
- i, rwlock,
- p->pid, p->tag,
- p->function, p->line, p->file,
- p->callers, p->lock,
- (now - p->start_s));
-
- if(p->all_caller_locks) {
- // find the lock in the netdata_thread_locks[]
- // and remove it
- int k;
- for(k = 0; k < NETDATA_THREAD_LOCKS_ARRAY_SIZE ;k++) {
- if (p->all_caller_locks[k] && p->all_caller_locks[k] != rwlock) {
-
- // lock the other lock lockers list
- __netdata_mutex_lock(&p->all_caller_locks[k]->lockers_mutex);
-
- // print the list of lockers of the other lock
- netdata_rwlock_locker *r;
- int j;
- for(j = 1, r = p->all_caller_locks[k]->lockers; r ;r = r->next, j++) {
- fprintf(
- stderr,
- " ~~~> %i: RW_LOCK %p: process %d '%s' (function %s() %lu@%s) is having %zu '%c' lock for %llu usec.\n",
- j,
- p->all_caller_locks[k],
- r->pid,
- r->tag,
- r->function,
- r->line,
- r->file,
- r->callers,
- r->lock,
- (now - r->start_s));
- }
-
- // unlock the other lock lockers list
- __netdata_mutex_unlock(&p->all_caller_locks[k]->lockers_mutex);
- }
- }
- }
+#ifdef NETDATA_TRACE_RWLOCKS
- }
- __netdata_mutex_unlock(&rwlock->lockers_mutex);
+// ----------------------------------------------------------------------------
+// lockers list
- // unlock this function for other threads
- __netdata_mutex_unlock(&log_lockers_mutex);
-}
-
-static netdata_rwlock_locker *add_rwlock_locker(const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock, char lock_type) {
- netdata_rwlock_locker *p = mallocz(sizeof(netdata_rwlock_locker));
- p->pid = gettid();
- p->tag = netdata_thread_tag();
- p->lock = lock_type;
- p->file = file;
- p->function = function;
- p->line = line;
- p->callers = 1;
- p->all_caller_locks = netdata_thread_locks;
- p->start_s = now_monotonic_high_precision_usec();
-
- // find a slot in the netdata_thread_locks[]
- int i;
- for(i = 0; i < NETDATA_THREAD_LOCKS_ARRAY_SIZE ;i++) {
- if (!netdata_thread_locks[i]) {
- netdata_thread_locks[i] = rwlock;
- break;
- }
- }
+static netdata_rwlock_locker *find_rwlock_locker(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
+ pid_t pid = gettid();
+ netdata_rwlock_locker *locker = NULL;
__netdata_mutex_lock(&rwlock->lockers_mutex);
- p->next = rwlock->lockers;
- rwlock->lockers = p;
- if(lock_type == 'R') rwlock->readers++;
- if(lock_type == 'W') rwlock->writers++;
+ Pvoid_t *PValue = JudyLGet(rwlock->lockers_pid_JudyL, pid, PJE0);
+ if(PValue && *PValue)
+ locker = *PValue;
__netdata_mutex_unlock(&rwlock->lockers_mutex);
- return p;
+ return locker;
}
-static void remove_rwlock_locker(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock, netdata_rwlock_locker *locker) {
- usec_t end_s = now_monotonic_high_precision_usec();
+static netdata_rwlock_locker *add_rwlock_locker(const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock, LOCKER_REQUEST lock_type) {
+ netdata_rwlock_locker *locker;
- if(locker->callers == 0)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) callers should be positive but it is zero\n",
- rwlock,
- locker->pid, locker->tag,
- locker->function, locker->line, locker->file);
-
- if(locker->callers > 1 && locker->lock != 'R')
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) only 'R' locks support multiple holders, but here we have %zu callers holding a '%c' lock.\n",
- rwlock,
- locker->pid, locker->tag,
- locker->function, locker->line, locker->file,
- locker->callers, locker->lock);
-
- __netdata_mutex_lock(&rwlock->lockers_mutex);
- locker->callers--;
-
- if(!locker->callers) {
- int doit = 0;
-
- if (rwlock->lockers == locker) {
- rwlock->lockers = locker->next;
- doit = 1;
- } else {
- netdata_rwlock_locker *p;
- for (p = rwlock->lockers; p && p->next != locker; p = p->next)
- ;
- if (p && p->next == locker) {
- p->next = locker->next;
- doit = 1;
- }
- }
- if(doit) {
- if(locker->lock == 'R') rwlock->readers--;
- if(locker->lock == 'W') rwlock->writers--;
- }
+ locker = find_rwlock_locker(file, function, line, rwlock);
+ if(locker) {
+ locker->lock |= lock_type;
+ locker->refcount++;
+ }
+ else {
+ locker = mallocz(sizeof(netdata_rwlock_locker));
+ locker->pid = gettid();
+ locker->tag = netdata_thread_tag();
+ locker->refcount = 1;
+ locker->lock = lock_type;
+ locker->got_it = false;
+ locker->file = file;
+ locker->function = function;
+ locker->line = line;
- if(!doit) {
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) with %zu x '%c' lock is not found.\n",
- rwlock,
- locker->pid, locker->tag,
- locker->function, locker->line, locker->file,
- locker->callers, locker->lock);
- }
- else {
- // find the lock in the netdata_thread_locks[]
- // and remove it
- int i;
- for(i = 0; i < NETDATA_THREAD_LOCKS_ARRAY_SIZE ;i++) {
- if (netdata_thread_locks[i] == rwlock)
- netdata_thread_locks[i] = NULL;
- }
-
- if(end_s - locker->start_s >= NETDATA_TRACE_RWLOCKS_HOLD_TIME_TO_IGNORE_USEC)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) holded a '%c' for %llu usec.\n",
- rwlock,
- locker->pid, locker->tag,
- locker->function, locker->line, locker->file,
- locker->lock, end_s - locker->start_s);
-
- freez(locker);
- }
+ __netdata_mutex_lock(&rwlock->lockers_mutex);
+ DOUBLE_LINKED_LIST_APPEND_UNSAFE(rwlock->lockers, locker, prev, next);
+ Pvoid_t *PValue = JudyLIns(&rwlock->lockers_pid_JudyL, locker->pid, PJE0);
+ *PValue = locker;
+ if (lock_type == RWLOCK_REQUEST_READ || lock_type == RWLOCK_REQUEST_TRYREAD) rwlock->readers++;
+ if (lock_type == RWLOCK_REQUEST_WRITE || lock_type == RWLOCK_REQUEST_TRYWRITE) rwlock->writers++;
+ __netdata_mutex_unlock(&rwlock->lockers_mutex);
}
- __netdata_mutex_unlock(&rwlock->lockers_mutex);
+ return locker;
}
-static netdata_rwlock_locker *find_rwlock_locker(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- pid_t pid = gettid();
- netdata_rwlock_locker *p;
-
+static void remove_rwlock_locker(const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock, netdata_rwlock_locker *locker) {
__netdata_mutex_lock(&rwlock->lockers_mutex);
- for(p = rwlock->lockers; p ;p = p->next) {
- if(p->pid == pid) break;
+ locker->refcount--;
+ if(!locker->refcount) {
+ DOUBLE_LINKED_LIST_REMOVE_UNSAFE(rwlock->lockers, locker, prev, next);
+ JudyLDel(&rwlock->lockers_pid_JudyL, locker->pid, PJE0);
+ if (locker->lock == RWLOCK_REQUEST_READ || locker->lock == RWLOCK_REQUEST_TRYREAD) rwlock->readers--;
+ else if (locker->lock == RWLOCK_REQUEST_WRITE || locker->lock == RWLOCK_REQUEST_TRYWRITE) rwlock->writers--;
+ freez(locker);
}
__netdata_mutex_unlock(&rwlock->lockers_mutex);
-
- return p;
-}
-
-static netdata_rwlock_locker *update_or_add_rwlock_locker(const char *file, const char *function, const unsigned long line, netdata_rwlock_t *rwlock, netdata_rwlock_locker *locker, char locktype) {
- if(!locker) {
- return add_rwlock_locker(file, function, line, rwlock, locktype);
- }
- else if(locker->lock == 'R' && locktype == 'R') {
- __netdata_mutex_lock(&rwlock->lockers_mutex);
- locker->callers++;
- __netdata_mutex_unlock(&rwlock->lockers_mutex);
- return locker;
- }
- else {
- not_supported_by_posix_rwlocks(file, function, line, rwlock, locktype, "DEADLOCK - WANTS TO CHANGE LOCK TYPE BUT ALREADY HAS THIS LOCKED");
- return locker;
- }
}
// ----------------------------------------------------------------------------
@@ -551,84 +401,41 @@ static netdata_rwlock_locker *update_or_add_rwlock_locker(const char *file, cons
int netdata_rwlock_destroy_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(%p) from %lu@%s, %s()", rwlock, line, file, function);
-
- if(rwlock->readers)
- error("RW_LOCK: destroying a rwlock with %zu readers in it", rwlock->readers);
- if(rwlock->writers)
- error("RW_LOCK: destroying a rwlock with %zu writers in it", rwlock->writers);
int ret = __netdata_rwlock_destroy(rwlock);
if(!ret) {
while (rwlock->lockers)
remove_rwlock_locker(file, function, line, rwlock, rwlock->lockers);
-
- if (rwlock->readers)
- error("RW_LOCK: internal error - empty rwlock with %zu readers in it", rwlock->readers);
- if (rwlock->writers)
- error("RW_LOCK: internal error - empty rwlock with %zu writers in it", rwlock->writers);
}
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_destroy(%p) = %d, from %lu@%s, %s()", rwlock, ret, line, file, function);
-
return ret;
}
int netdata_rwlock_init_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(%p) from %lu@%s, %s()", rwlock, line, file, function);
int ret = __netdata_rwlock_init(rwlock);
if(!ret) {
__netdata_mutex_init(&rwlock->lockers_mutex);
+ rwlock->lockers_pid_JudyL = NULL;
rwlock->lockers = NULL;
rwlock->readers = 0;
rwlock->writers = 0;
}
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_init(%p) = %d, from %lu@%s, %s()", rwlock, ret, line, file, function);
-
return ret;
}
int netdata_rwlock_rdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(%p) from %lu@%s, %s()", rwlock, line, file, function);
-
- netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock);
-
-#ifdef NETDATA_TRACE_RWLOCKS_LOG_NESTED
- if(locker && locker->lock == 'R') {
- log_rwlock_lockers(file, function, line, rwlock, "NESTED READ LOCK REQUEST", 'R');
- }
-#endif // NETDATA_TRACE_RWLOCKS_LOG_NESTED
+ netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_READ);
- int log = 0;
- if(rwlock->writers) {
- log_rwlock_lockers(file, function, line, rwlock, "WANTS", 'R');
- log = 1;
- }
-
- usec_t start_s = now_monotonic_high_precision_usec();
int ret = __netdata_rwlock_rdlock(rwlock);
- usec_t end_s = now_monotonic_high_precision_usec();
-
- if(!ret) {
- locker = update_or_add_rwlock_locker(file, function, line, rwlock, locker, 'R');
- if(log) log_rwlock_lockers(file, function, line, rwlock, "GOT", 'R');
-
- }
-
- if(end_s - start_s >= NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) WAITED for a READ lock for %llu usec.\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- end_s - start_s);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_rdlock(%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, end_s - start_s, line, file, function);
+ if(!ret)
+ locker->got_it = true;
+ else
+ remove_rwlock_locker(file, function, line, rwlock, locker);
return ret;
}
@@ -636,36 +443,13 @@ int netdata_rwlock_rdlock_debug(const char *file __maybe_unused, const char *fun
int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(%p) from %lu@%s, %s()", rwlock, line, file, function);
-
- netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock);
- if(locker)
- not_supported_by_posix_rwlocks(file, function, line, rwlock, 'W', "DEADLOCK - WANTS A WRITE LOCK BUT ALREADY HAVE THIS LOCKED");
+ netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_WRITE);
- int log = 0;
- if(rwlock->readers) {
- log_rwlock_lockers(file, function, line, rwlock, "WANTS", 'W');
- log = 1;
- }
-
- usec_t start_s = now_monotonic_high_precision_usec();
int ret = __netdata_rwlock_wrlock(rwlock);
- usec_t end_s = now_monotonic_high_precision_usec();
-
- if(!ret){
- locker = update_or_add_rwlock_locker(file, function, line, rwlock, locker, 'W');
- if(log) log_rwlock_lockers(file, function, line, rwlock, "GOT", 'W');
- }
-
- if(end_s - start_s >= NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) WAITED for a WRITE lock for %llu usec.\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- end_s - start_s);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_wrlock(%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, end_s - start_s, line, file, function);
+ if(!ret)
+ locker->got_it = true;
+ else
+ remove_rwlock_locker(file, function, line, rwlock, locker);
return ret;
}
@@ -673,83 +457,42 @@ int netdata_rwlock_wrlock_debug(const char *file __maybe_unused, const char *fun
int netdata_rwlock_unlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(%p) from %lu@%s, %s()", rwlock, line, file, function);
-
netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock);
+
if(unlikely(!locker))
- not_supported_by_posix_rwlocks(file, function, line, rwlock, 'U', "UNLOCK WITHOUT LOCK");
+ fatal("UNLOCK WITHOUT LOCK");
- usec_t start_s = now_monotonic_high_precision_usec();
int ret = __netdata_rwlock_unlock(rwlock);
- usec_t end_s = now_monotonic_high_precision_usec();
-
- if(end_s - start_s >= NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) WAITED to UNLOCK for %llu usec.\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- end_s - start_s);
-
- if(likely(!ret && locker)) remove_rwlock_locker(file, function, line, rwlock, locker);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_unlock(%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, end_s - start_s, line, file, function);
+ if(likely(!ret))
+ remove_rwlock_locker(file, function, line, rwlock, locker);
return ret;
}
int netdata_rwlock_tryrdlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(%p) from %lu@%s, %s()", rwlock, line, file, function);
- netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock);
- if(locker && locker->lock == 'W')
- not_supported_by_posix_rwlocks(file, function, line, rwlock, 'R', "DEADLOCK - WANTS A READ LOCK BUT IT HAS A WRITE LOCK ALREADY");
+ netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_TRYREAD);
- usec_t start_s = now_monotonic_high_precision_usec();
int ret = __netdata_rwlock_tryrdlock(rwlock);
- usec_t end_s = now_monotonic_high_precision_usec();
-
if(!ret)
- locker = update_or_add_rwlock_locker(file, function, line, rwlock, locker, 'R');
-
- if(end_s - start_s >= NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) WAITED to TRYREAD for %llu usec.\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- end_s - start_s);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_tryrdlock(%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, end_s - start_s, line, file, function);
+ locker->got_it = true;
+ else
+ remove_rwlock_locker(file, function, line, rwlock, locker);
return ret;
}
int netdata_rwlock_trywrlock_debug(const char *file __maybe_unused, const char *function __maybe_unused,
const unsigned long line __maybe_unused, netdata_rwlock_t *rwlock) {
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(%p) from %lu@%s, %s()", rwlock, line, file, function);
- netdata_rwlock_locker *locker = find_rwlock_locker(file, function, line, rwlock);
- if(locker)
- not_supported_by_posix_rwlocks(file, function, line, rwlock, 'W', "ALREADY HAS THIS LOCK");
+ netdata_rwlock_locker *locker = add_rwlock_locker(file, function, line, rwlock, RWLOCK_REQUEST_TRYWRITE);
- usec_t start_s = now_monotonic_high_precision_usec();
int ret = __netdata_rwlock_trywrlock(rwlock);
- usec_t end_s = now_monotonic_high_precision_usec();
-
if(!ret)
- locker = update_or_add_rwlock_locker(file, function, line, rwlock, locker, 'W');
-
- if(end_s - start_s >= NETDATA_TRACE_RWLOCKS_WAIT_TIME_TO_IGNORE_USEC)
- fprintf(stderr,
- "RW_LOCK ON LOCK %p: %d, '%s' (function %s() %lu@%s) WAITED to TRYWRITE for %llu usec.\n",
- rwlock,
- gettid(), netdata_thread_tag(),
- function, line, file,
- end_s - start_s);
-
- debug(D_LOCKS, "RW_LOCK: netdata_rwlock_trywrlock(%p) = %d in %llu usec, from %lu@%s, %s()", rwlock, ret, end_s - start_s, line, file, function);
+ locker->got_it = true;
+ else
+ remove_rwlock_locker(file, function, line, rwlock, locker);
return ret;
}
diff --git a/libnetdata/locks/locks.h b/libnetdata/locks/locks.h
index 4d2d1655c..89b110d5e 100644
--- a/libnetdata/locks/locks.h
+++ b/libnetdata/locks/locks.h
@@ -11,24 +11,39 @@ typedef pthread_mutex_t netdata_mutex_t;
typedef struct netdata_spinlock {
bool locked;
+#ifdef NETDATA_INTERNAL_CHECKS
+ pid_t locker_pid;
+ size_t spins;
+#endif
} SPINLOCK;
-#define NETDATA_SPINLOCK_INITIALIZER (SPINLOCK){ .locked = false }
+
+#define NETDATA_SPINLOCK_INITIALIZER \
+ { .locked = false }
+
void netdata_spinlock_init(SPINLOCK *spinlock);
void netdata_spinlock_lock(SPINLOCK *spinlock);
void netdata_spinlock_unlock(SPINLOCK *spinlock);
+bool netdata_spinlock_trylock(SPINLOCK *spinlock);
#ifdef NETDATA_TRACE_RWLOCKS
+
+typedef enum {
+ RWLOCK_REQUEST_READ = (1 << 0),
+ RWLOCK_REQUEST_WRITE = (1 << 1),
+ RWLOCK_REQUEST_TRYREAD = (1 << 2),
+ RWLOCK_REQUEST_TRYWRITE = (1 << 3),
+} LOCKER_REQUEST;
+
typedef struct netdata_rwlock_locker {
+ LOCKER_REQUEST lock;
+ bool got_it;
pid_t pid;
+ size_t refcount;
const char *tag;
- char lock; // 'R', 'W'
const char *file;
const char *function;
unsigned long line;
- size_t callers;
- usec_t start_s;
- struct netdata_rwlock_t **all_caller_locks;
- struct netdata_rwlock_locker *next;
+ struct netdata_rwlock_locker *next, *prev;
} netdata_rwlock_locker;
typedef struct netdata_rwlock_t {
@@ -37,6 +52,7 @@ typedef struct netdata_rwlock_t {
size_t writers; // the number of writers on the lock
netdata_mutex_t lockers_mutex; // a mutex to protect the linked list of the lock holding threads
netdata_rwlock_locker *lockers; // the linked list of the lock holding threads
+ Pvoid_t lockers_pid_JudyL;
} netdata_rwlock_t;
#define NETDATA_RWLOCK_INITIALIZER { \
@@ -44,7 +60,8 @@ typedef struct netdata_rwlock_t {
.readers = 0, \
.writers = 0, \
.lockers_mutex = NETDATA_MUTEX_INITIALIZER, \
- .lockers = NULL \
+ .lockers = NULL, \
+ .lockers_pid_JudyL = NULL, \
}
#else // NETDATA_TRACE_RWLOCKS
diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md
index a767dd446..5f9e5bc7b 100644
--- a/libnetdata/log/README.md
+++ b/libnetdata/log/README.md
@@ -1,5 +1,15 @@
+# Log
+
+The netdata log library supports debug, info, error and fatal error logging.
+By default we have an access log, an error log and a collectors log.
+
diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c
index fb3b2d034..1dcdba9c2 100644
--- a/libnetdata/log/log.c
+++ b/libnetdata/log/log.c
@@ -14,6 +14,7 @@ uint64_t debug_flags = 0;
int access_log_syslog = 1;
int error_log_syslog = 1;
+int collector_log_syslog = 1;
int output_log_syslog = 1; // debug log
int health_log_syslog = 1;
@@ -23,11 +24,15 @@ FILE *stdaccess = NULL;
int stdhealth_fd = -1;
FILE *stdhealth = NULL;
+int stdcollector_fd = -1;
+FILE *stderror = NULL;
+
const char *stdaccess_filename = NULL;
const char *stderr_filename = NULL;
const char *stdout_filename = NULL;
const char *facility_log = NULL;
const char *stdhealth_filename = NULL;
+const char *stdcollector_filename = NULL;
#ifdef ENABLE_ACLK
const char *aclklog_filename = NULL;
@@ -573,8 +578,14 @@ void reopen_all_log_files() {
if(stdout_filename)
open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
- if(stderr_filename)
- open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
+ if(stdcollector_filename)
+ open_log_file(STDERR_FILENO, stderr, stdcollector_filename, &collector_log_syslog, 0, NULL);
+
+ if(stderr_filename) {
+ log_lock();
+ stderror = open_log_file(stdcollector_fd, stderror, stderr_filename, &error_log_syslog, 1, &stdcollector_fd);
+ log_unlock();
+ }
#ifdef ENABLE_ACLK
if (aclklog_enabled)
@@ -593,7 +604,11 @@ void open_all_log_files() {
open_log_file(STDIN_FILENO, stdin, "/dev/null", NULL, 0, NULL);
open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
- open_log_file(STDERR_FILENO, stderr, stderr_filename, &error_log_syslog, 0, NULL);
+ open_log_file(STDERR_FILENO, stderr, stdcollector_filename, &collector_log_syslog, 0, NULL);
+
+ log_lock();
+ stderror = open_log_file(stdcollector_fd, NULL, stderr_filename, &error_log_syslog, 1, &stdcollector_fd);
+ log_unlock();
#ifdef ENABLE_ACLK
if(aclklog_enabled)
@@ -616,7 +631,9 @@ int error_log_limit(int reset) {
static time_t start = 0;
static unsigned long counter = 0, prevented = 0;
- // fprintf(stderr, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period);
+ FILE *fp = (!stderror) ? stderr : stderror;
+
+ // fprintf(fp, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period);
// do not throttle if the period is 0
if(error_log_throttle_period == 0)
@@ -638,7 +655,7 @@ int error_log_limit(int reset) {
char date[LOG_DATE_LENGTH];
log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
fprintf(
- stderr,
+ fp,
"%s: %s LOG FLOOD PROTECTION reset for process '%s' "
"(prevented %lu logs in the last %"PRId64" seconds).\n",
date,
@@ -661,7 +678,7 @@ int error_log_limit(int reset) {
char date[LOG_DATE_LENGTH];
log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
fprintf(
- stderr,
+ fp,
"%s: %s LOG FLOOD PROTECTION resuming logging from process '%s' "
"(prevented %lu logs in the last %"PRId64" seconds).\n",
date,
@@ -685,7 +702,7 @@ int error_log_limit(int reset) {
char date[LOG_DATE_LENGTH];
log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
fprintf(
- stderr,
+ fp,
"%s: %s LOG FLOOD PROTECTION too many logs (%lu logs in %"PRId64" seconds, threshold is set to %lu logs "
"in %"PRId64" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.\n",
date,
@@ -758,9 +775,10 @@ void debug_int( const char *file, const char *function, const unsigned long line
// ----------------------------------------------------------------------------
// info log
-void info_int( const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... )
+void info_int( int is_collector, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... )
{
va_list args;
+ FILE *fp = (is_collector || !stderror) ? stderr : stderror;
log_lock();
@@ -770,7 +788,7 @@ void info_int( const char *file __maybe_unused, const char *function __maybe_unu
return;
}
- if(error_log_syslog) {
+ if(collector_log_syslog) {
va_start( args, fmt );
vsyslog(LOG_INFO, fmt, args );
va_end( args );
@@ -781,14 +799,15 @@ void info_int( const char *file __maybe_unused, const char *function __maybe_unu
va_start( args, fmt );
#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(stderr, "%s: %s INFO : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
+ fprintf(fp, "%s: %s INFO : %s : (%04lu@%-20.20s:%-15.15s): ",
+ date, program_name, netdata_thread_tag(), line, file, function);
#else
- fprintf(stderr, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag());
+ fprintf(fp, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag());
#endif
- vfprintf( stderr, fmt, args );
+ vfprintf(fp, fmt, args );
va_end( args );
- fputc('\n', stderr);
+ fputc('\n', fp);
log_unlock();
}
@@ -819,6 +838,8 @@ static const char *strerror_result_string(const char *a, const char *b) { (void)
#endif
void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
+ FILE *fp = (!stderror) ? stderr : stderror;
+
if(erl->sleep_ut)
sleep_usec(erl->sleep_ut);
@@ -842,7 +863,7 @@ void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __ma
return;
}
- if(error_log_syslog) {
+ if(collector_log_syslog) {
va_start( args, fmt );
vsyslog(LOG_ERR, fmt, args );
va_end( args );
@@ -853,26 +874,29 @@ void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __ma
va_start( args, fmt );
#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function);
+ fprintf(fp, "%s: %s %-5.5s : %s : (%04lu@%-20.20s:%-15.15s): ",
+ date, program_name, prefix, netdata_thread_tag(), line, file, function);
#else
- fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
+ fprintf(fp, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
#endif
- vfprintf( stderr, fmt, args );
+ vfprintf(fp, fmt, args );
va_end( args );
if(erl->count > 1)
- fprintf(stderr, " (similar messages repeated %zu times in the last %llu secs)", erl->count, (unsigned long long)(erl->last_logged ? now - erl->last_logged : 0));
+ fprintf(fp, " (similar messages repeated %zu times in the last %llu secs)",
+ erl->count, (unsigned long long)(erl->last_logged ? now - erl->last_logged : 0));
if(erl->sleep_ut)
- fprintf(stderr, " (sleeping for %llu microseconds every time this happens)", erl->sleep_ut);
+ fprintf(fp, " (sleeping for %llu microseconds every time this happens)", erl->sleep_ut);
if(__errno) {
char buf[1024];
- fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
+ fprintf(fp,
+ " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
errno = 0;
}
else
- fputc('\n', stderr);
+ fputc('\n', fp);
erl->last_logged = now;
erl->count = 0;
@@ -880,9 +904,10 @@ void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __ma
log_unlock();
}
-void error_int(const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
+void error_int(int is_collector, const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
// save a copy of errno - just in case this function generates a new error
int __errno = errno;
+ FILE *fp = (is_collector || !stderror) ? stderr : stderror;
va_list args;
@@ -894,7 +919,7 @@ void error_int(const char *prefix, const char *file __maybe_unused, const char *
return;
}
- if(error_log_syslog) {
+ if(collector_log_syslog) {
va_start( args, fmt );
vsyslog(LOG_ERR, fmt, args );
va_end( args );
@@ -905,20 +930,22 @@ void error_int(const char *prefix, const char *file __maybe_unused, const char *
va_start( args, fmt );
#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(stderr, "%s: %s %-5.5s : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, prefix, netdata_thread_tag(), line, file, function);
+ fprintf(fp, "%s: %s %-5.5s : %s : (%04lu@%-20.20s:%-15.15s): ",
+ date, program_name, prefix, netdata_thread_tag(), line, file, function);
#else
- fprintf(stderr, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
+ fprintf(fp, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
#endif
- vfprintf( stderr, fmt, args );
+ vfprintf(fp, fmt, args );
va_end( args );
if(__errno) {
char buf[1024];
- fprintf(stderr, " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
+ fprintf(fp,
+ " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
errno = 0;
}
else
- fputc('\n', stderr);
+ fputc('\n', fp);
log_unlock();
}
@@ -933,23 +960,27 @@ static void crash_netdata(void) {
#ifdef HAVE_BACKTRACE
#define BT_BUF_SIZE 100
static void print_call_stack(void) {
+ FILE *fp = (!stderror) ? stderr : stderror;
+
int nptrs;
void *buffer[BT_BUF_SIZE];
nptrs = backtrace(buffer, BT_BUF_SIZE);
if(nptrs)
- backtrace_symbols_fd(buffer, nptrs, fileno(stderr));
+ backtrace_symbols_fd(buffer, nptrs, fileno(fp));
}
#endif
void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ FILE *fp = (!stderror) ? stderr : stderror;
+
// save a copy of errno - just in case this function generates a new error
int __errno = errno;
va_list args;
const char *thread_tag;
char os_threadname[NETDATA_THREAD_NAME_MAX + 1];
- if(error_log_syslog) {
+ if(collector_log_syslog) {
va_start( args, fmt );
vsyslog(LOG_CRIT, fmt, args );
va_end( args );
@@ -970,15 +1001,16 @@ void fatal_int( const char *file, const char *function, const unsigned long line
va_start( args, fmt );
#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(stderr, "%s: %s FATAL : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, thread_tag, line, file, function);
+ fprintf(fp,
+ "%s: %s FATAL : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, thread_tag, line, file, function);
#else
- fprintf(stderr, "%s: %s FATAL : %s : ", date, program_name, thread_tag);
+ fprintf(fp, "%s: %s FATAL : %s : ", date, program_name, thread_tag);
#endif
- vfprintf( stderr, fmt, args );
+ vfprintf(fp, fmt, args );
va_end( args );
perror(" # ");
- fputc('\n', stderr);
+ fputc('\n', fp);
log_unlock();
@@ -986,7 +1018,15 @@ void fatal_int( const char *file, const char *function, const unsigned long line
snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, __errno);
char action_result[60+1];
- snprintfz(action_result, 60, "%s:%s", program_name, strncmp(thread_tag, "STREAM_RECEIVER", strlen("STREAM_RECEIVER")) ? thread_tag : "[x]");
+ const char *tag_to_send = thread_tag;
+
+ // anonymize thread names
+ if(strncmp(thread_tag, THREAD_TAG_STREAM_RECEIVER, strlen(THREAD_TAG_STREAM_RECEIVER)) == 0)
+ tag_to_send = THREAD_TAG_STREAM_RECEIVER;
+ if(strncmp(thread_tag, THREAD_TAG_STREAM_SENDER, strlen(THREAD_TAG_STREAM_SENDER)) == 0)
+ tag_to_send = THREAD_TAG_STREAM_SENDER;
+
+ snprintfz(action_result, 60, "%s:%s", program_name, tag_to_send);
send_statistics("FATAL", action_result, action_data);
#ifdef HAVE_BACKTRACE
diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h
index 11dab4c1d..3d9f0927d 100644
--- a/libnetdata/log/log.h
+++ b/libnetdata/log/log.h
@@ -61,10 +61,14 @@ extern FILE *stdaccess;
extern int stdhealth_fd;
extern FILE *stdhealth;
+extern int stdcollector_fd;
+extern FILE *stderror;
+
extern const char *stdaccess_filename;
extern const char *stderr_filename;
extern const char *stdout_filename;
extern const char *stdhealth_filename;
+extern const char *stdcollector_filename;
extern const char *facility_log;
#ifdef ENABLE_ACLK
@@ -106,7 +110,7 @@ typedef struct error_with_limit {
#ifdef NETDATA_INTERNAL_CHECKS
#define debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
-#define internal_error(condition, args...) do { if(unlikely(condition)) error_int("IERR", __FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#define internal_error(condition, args...) do { if(unlikely(condition)) error_int(0, "IERR", __FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
#define internal_fatal(condition, args...) do { if(unlikely(condition)) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
#else
#define debug(type, args...) debug_dummy()
@@ -114,17 +118,20 @@ typedef struct error_with_limit {
#define internal_fatal(args...) debug_dummy()
#endif
-#define info(args...) info_int(__FILE__, __FUNCTION__, __LINE__, ##args)
-#define infoerr(args...) error_int("INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define error(args...) error_int("ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define info(args...) info_int(0, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define collector_info(args...) info_int(1, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define infoerr(args...) error_int(0, "INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define error(args...) error_int(0, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define collector_infoerr(args...) error_int(1, "INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
+#define collector_error(args...) error_int(1, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
#define error_limit(erl, args...) error_limit_int(erl, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args)
#define fatal_assert(expr) ((expr) ? (void)(0) : fatal_int(__FILE__, __FUNCTION__, __LINE__, "Assertion `%s' failed", #expr))
void send_statistics(const char *action, const char *action_result, const char *action_data);
void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
-void info_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
-void error_int( const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
+void info_int( int is_collector, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
+void error_int( int is_collector, const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(6, 7);
void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, unsigned long line __maybe_unused, const char *fmt, ... ) PRINTFLIKE(6, 7);;
void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5);
void log_access( const char *fmt, ... ) PRINTFLIKE(1, 2);
diff --git a/libnetdata/onewayalloc/README.md b/libnetdata/onewayalloc/README.md
index 1f459c263..3fa0d9fd3 100644
--- a/libnetdata/onewayalloc/README.md
+++ b/libnetdata/onewayalloc/README.md
@@ -1,6 +1,10 @@
# One Way Allocator
diff --git a/libnetdata/onewayalloc/onewayalloc.c b/libnetdata/onewayalloc/onewayalloc.c
index 59c3b6859..2f007b189 100644
--- a/libnetdata/onewayalloc/onewayalloc.c
+++ b/libnetdata/onewayalloc/onewayalloc.c
@@ -14,6 +14,12 @@ typedef struct owa_page {
struct owa_page *last; // the last page on the list - we currently allocate on this
} OWA_PAGE;
+static size_t onewayalloc_total_memory = 0;
+
+size_t onewayalloc_allocated_memory(void) {
+ return __atomic_load_n(&onewayalloc_total_memory, __ATOMIC_RELAXED);
+}
+
// allocations need to be aligned to CPU register width
// https://en.wikipedia.org/wiki/Data_structure_alignment
static inline size_t natural_alignment(size_t size) {
@@ -60,6 +66,7 @@ static OWA_PAGE *onewayalloc_create_internal(OWA_PAGE *head, size_t size_hint) {
// OWA_PAGE *page = (OWA_PAGE *)netdata_mmap(NULL, size, MAP_ANONYMOUS|MAP_PRIVATE, 0);
// if(unlikely(!page)) fatal("Cannot allocate onewayalloc buffer of size %zu", size);
OWA_PAGE *page = (OWA_PAGE *)mallocz(size);
+ __atomic_add_fetch(&onewayalloc_total_memory, size, __ATOMIC_RELAXED);
page->size = size;
page->offset = natural_alignment(sizeof(OWA_PAGE));
@@ -183,11 +190,17 @@ void onewayalloc_destroy(ONEWAYALLOC *owa) {
// head->stats_mallocs_made, head->stats_mallocs_size,
// head->stats_pages, head->stats_pages_size);
+ size_t total_size = 0;
OWA_PAGE *page = head;
while(page) {
+ total_size += page->size;
+
OWA_PAGE *p = page;
page = page->next;
+
// munmap(p, p->size);
freez(p);
}
+
+ __atomic_sub_fetch(&onewayalloc_total_memory, total_size, __ATOMIC_RELAXED);
}
diff --git a/libnetdata/onewayalloc/onewayalloc.h b/libnetdata/onewayalloc/onewayalloc.h
index e536e0542..a415b063b 100644
--- a/libnetdata/onewayalloc/onewayalloc.h
+++ b/libnetdata/onewayalloc/onewayalloc.h
@@ -16,4 +16,6 @@ void onewayalloc_freez(ONEWAYALLOC *owa, const void *ptr);
void *onewayalloc_doublesize(ONEWAYALLOC *owa, const void *src, size_t oldsize);
+size_t onewayalloc_allocated_memory(void);
+
#endif // ONEWAYALLOC_H
diff --git a/libnetdata/os.c b/libnetdata/os.c
index 196288a6a..133c02248 100644
--- a/libnetdata/os.c
+++ b/libnetdata/os.c
@@ -6,61 +6,77 @@
// system functions
// to retrieve settings of the system
-int processors = 1;
-long get_system_cpus(void) {
- processors = 1;
+#define CPUS_FOR_COLLECTORS 0
+#define CPUS_FOR_NETDATA 1
-#ifdef __APPLE__
- int32_t tmp_processors;
+long get_system_cpus_with_cache(bool cache, bool for_netdata) {
+ static long processors[2] = { 0, 0 };
- if (unlikely(GETSYSCTL_BY_NAME("hw.logicalcpu", tmp_processors))) {
- error("Assuming system has %d processors.", processors);
- } else {
- processors = tmp_processors;
- }
+ int index = for_netdata ? CPUS_FOR_NETDATA : CPUS_FOR_COLLECTORS;
+
+ if(likely(cache && processors[index] > 0))
+ return processors[index];
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#if defined(__APPLE__)
+#define HW_CPU_NAME "hw.logicalcpu"
+#else
+#define HW_CPU_NAME "hw.ncpu"
+#endif
- return processors;
-#elif __FreeBSD__
int32_t tmp_processors;
+ bool error = false;
- if (unlikely(GETSYSCTL_BY_NAME("hw.ncpu", tmp_processors))) {
- error("Assuming system has %d processors.", processors);
- } else {
- processors = tmp_processors;
- }
+ if (unlikely(GETSYSCTL_BY_NAME(HW_CPU_NAME, tmp_processors)))
+ error = true;
+ else
+ processors[index] = tmp_processors;
+
+ if(processors[index] < 1) {
+ processors[index] = 1;
- return processors;
+ if(error)
+ error("Assuming system has %d processors.", processors[index]);
+ }
+
+ return processors[index];
#else
char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
+ snprintfz(filename, FILENAME_MAX, "%s/proc/stat",
+ (!for_netdata && netdata_configured_host_prefix) ? netdata_configured_host_prefix : "");
procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_DEFAULT);
if(!ff) {
- error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors);
- return processors;
+ processors[index] = 1;
+ error("Cannot open file '%s'. Assuming system has %ld processors.", filename, processors[index]);
+ return processors[index];
}
ff = procfile_readall(ff);
if(!ff) {
- error("Cannot open file '%s'. Assuming system has %d processors.", filename, processors);
- return processors;
+ processors[index] = 1;
+ error("Cannot open file '%s'. Assuming system has %ld processors.", filename, processors[index]);
+ return processors[index];
}
- processors = 0;
+ long tmp_processors = 0;
unsigned int i;
for(i = 0; i < procfile_lines(ff); i++) {
if(!procfile_linewords(ff, i)) continue;
- if(strncmp(procfile_lineword(ff, i, 0), "cpu", 3) == 0) processors++;
+ if(strncmp(procfile_lineword(ff, i, 0), "cpu", 3) == 0)
+ tmp_processors++;
}
- processors--;
- if(processors < 1) processors = 1;
-
procfile_close(ff);
- debug(D_SYSTEM, "System has %d processors.", processors);
- return processors;
+ processors[index] = --tmp_processors;
+
+ if(processors[index] < 1)
+ processors[index] = 1;
+
+ debug(D_SYSTEM, "System has %ld processors.", processors[index]);
+ return processors[index];
#endif /* __APPLE__, __FreeBSD__ */
}
@@ -90,7 +106,7 @@ pid_t get_system_pid_max(void) {
read = 1;
char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/pid_max", netdata_configured_host_prefix);
+ snprintfz(filename, FILENAME_MAX, "%s/proc/sys/kernel/pid_max", netdata_configured_host_prefix?netdata_configured_host_prefix:"");
unsigned long long max = 0;
if(read_single_number_file(filename, &max) != 0) {
@@ -120,6 +136,56 @@ void get_system_HZ(void) {
system_hz = (unsigned int) ticks;
}
+static inline unsigned long cpuset_str2ul(char **s) {
+ unsigned long n = 0;
+ char c;
+ for(c = **s; c >= '0' && c <= '9' ; c = *(++*s)) {
+ n *= 10;
+ n += c - '0';
+ }
+ return n;
+}
+
+unsigned long read_cpuset_cpus(const char *filename, long system_cpus) {
+ static char *buf = NULL;
+ static size_t buf_size = 0;
+
+ if(!buf) {
+ buf_size = 100U + 6 * system_cpus; // taken from kernel/cgroup/cpuset.c
+ buf = mallocz(buf_size + 1);
+ }
+
+ int ret = read_file(filename, buf, buf_size);
+
+ if(!ret) {
+ char *s = buf;
+ unsigned long ncpus = 0;
+
+ // parse the cpuset string and calculate the number of cpus the cgroup is allowed to use
+ while(*s) {
+ unsigned long n = cpuset_str2ul(&s);
+ ncpus++;
+ if(*s == ',') {
+ s++;
+ continue;
+ }
+ if(*s == '-') {
+ s++;
+ unsigned long m = cpuset_str2ul(&s);
+ ncpus += m - n; // calculate the number of cpus in the region
+ }
+ s++;
+ }
+
+ if(!ncpus)
+ return 0;
+
+ return ncpus;
+ }
+
+ return 0;
+}
+
// =====================================================================================================================
// FreeBSD
diff --git a/libnetdata/os.h b/libnetdata/os.h
index 67abf0be4..3cda79ed7 100644
--- a/libnetdata/os.h
+++ b/libnetdata/os.h
@@ -48,8 +48,10 @@ int getsysctl_by_name(const char *name, void *ptr, size_t len);
extern const char *os_type;
-extern int processors;
-long get_system_cpus(void);
+#define get_system_cpus() get_system_cpus_with_cache(true, false)
+#define get_system_cpus_uncached() get_system_cpus_with_cache(false, false)
+long get_system_cpus_with_cache(bool cache, bool for_netdata);
+unsigned long read_cpuset_cpus(const char *filename, long system_cpus);
extern pid_t pid_max;
pid_t get_system_pid_max(void);
diff --git a/libnetdata/popen/README.md b/libnetdata/popen/README.md
index db4aefaed..804690d13 100644
--- a/libnetdata/popen/README.md
+++ b/libnetdata/popen/README.md
@@ -1,5 +1,15 @@
+# popen
+
+Process management library
+
+
diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c
index 57f957f63..5ed74ae95 100644
--- a/libnetdata/popen/popen.c
+++ b/libnetdata/popen/popen.c
@@ -43,7 +43,7 @@ static void netdata_popen_tracking_add_pid_unsafe(pid_t pid) {
mp = mallocz(sizeof(struct netdata_popen));
mp->pid = pid;
- DOUBLE_LINKED_LIST_PREPEND_UNSAFE(netdata_popen_root, mp, prev, next);
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(netdata_popen_root, mp, prev, next);
}
// myp_del deletes pid if we're tracking.
@@ -61,7 +61,7 @@ static void netdata_popen_tracking_del_pid(pid_t pid) {
}
if(mp) {
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(netdata_popen_root, mp, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(netdata_popen_root, mp, prev, next);
freez(mp);
}
else
@@ -96,7 +96,7 @@ void netdata_popen_tracking_cleanup(void) {
while(netdata_popen_root) {
struct netdata_popen *mp = netdata_popen_root;
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(netdata_popen_root, mp, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(netdata_popen_root, mp, prev, next);
freez(mp);
}
@@ -163,8 +163,7 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI
posix_spawnattr_t attr;
posix_spawn_file_actions_t fa;
- int stdin_fd_to_exclude_from_closing = -1;
- int stdout_fd_to_exclude_from_closing = -1;
+ unsigned int fds_to_exclude_from_closing = OPEN_FD_EXCLUDE_STDERR;
if(posix_spawn_file_actions_init(&fa)) {
error("POPEN: posix_spawn_file_actions_init() failed.");
@@ -195,7 +194,7 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI
if (posix_spawn_file_actions_addopen(&fa, STDIN_FILENO, "/dev/null", O_RDONLY, 0)) {
error("POPEN: posix_spawn_file_actions_addopen() on stdin to /dev/null failed.");
// this is not a fatal error
- stdin_fd_to_exclude_from_closing = STDIN_FILENO;
+ fds_to_exclude_from_closing |= OPEN_FD_EXCLUDE_STDIN;
}
}
@@ -222,16 +221,13 @@ static int popene_internal(volatile pid_t *pidptr, char **env, uint8_t flags, FI
if (posix_spawn_file_actions_addopen(&fa, STDOUT_FILENO, "/dev/null", O_WRONLY, 0)) {
error("POPEN: posix_spawn_file_actions_addopen() on stdout to /dev/null failed.");
// this is not a fatal error
- stdout_fd_to_exclude_from_closing = STDOUT_FILENO;
+ fds_to_exclude_from_closing |= OPEN_FD_EXCLUDE_STDOUT;
}
}
if(flags & POPEN_FLAG_CLOSE_FD) {
// Mark all files to be closed by the exec() stage of posix_spawn()
- for(int i = (int)(sysconf(_SC_OPEN_MAX) - 1); i >= 0; i--) {
- if(likely(i != STDERR_FILENO && i != stdin_fd_to_exclude_from_closing && i != stdout_fd_to_exclude_from_closing))
- (void)fcntl(i, F_SETFD, FD_CLOEXEC);
- }
+ for_each_open_fd(OPEN_FD_ACTION_FD_CLOEXEC, fds_to_exclude_from_closing);
}
attr_rc = posix_spawnattr_init(&attr);
diff --git a/libnetdata/procfile/README.md b/libnetdata/procfile/README.md
index 65638030d..8610e77e5 100644
--- a/libnetdata/procfile/README.md
+++ b/libnetdata/procfile/README.md
@@ -1,6 +1,10 @@
# PROCFILE
@@ -28,7 +32,7 @@ For each iteration, the caller:
- calls `procfile_readall()` to read updated contents.
This call also rewinds (`lseek()` to 0) before reading it.
- For every file, a [BUFFER](/libnetdata/buffer/README.md) is used that is automatically adjusted to fit the entire
+ For every file, a [BUFFER](https://github.com/netdata/netdata/blob/master/libnetdata/buffer/README.md) is used that is automatically adjusted to fit the entire
file contents of the file. So the file is read with a single `read()` call (providing atomicity / consistency when
the data are read from the kernel).
diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c
index eb04316c3..cdf0f9723 100644
--- a/libnetdata/procfile/procfile.c
+++ b/libnetdata/procfile/procfile.c
@@ -296,7 +296,8 @@ procfile *procfile_readall(procfile *ff) {
debug(D_PROCFILE, "Reading file '%s', from position %zd with length %zd", procfile_filename(ff), s, (ssize_t)(ff->size - s));
r = read(ff->fd, &ff->data[s], ff->size - s);
if(unlikely(r == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
+ else if(unlikely(ff->flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) error(PF_PREFIX ": Cannot read from file '%s' on fd %d", procfile_filename(ff), ff->fd);
procfile_close(ff);
return NULL;
}
@@ -306,7 +307,8 @@ procfile *procfile_readall(procfile *ff) {
// debug(D_PROCFILE, "Rewinding file '%s'", ff->filename);
if(unlikely(lseek(ff->fd, 0, SEEK_SET) == -1)) {
- if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
+ if(unlikely(!(ff->flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
+ else if(unlikely(ff->flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) error(PF_PREFIX ": Cannot rewind on file '%s'.", procfile_filename(ff));
procfile_close(ff);
return NULL;
}
@@ -403,7 +405,8 @@ procfile *procfile_open(const char *filename, const char *separators, uint32_t f
int fd = open(filename, procfile_open_flags, 0666);
if(unlikely(fd == -1)) {
- if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) error(PF_PREFIX ": Cannot open file '%s'", filename);
+ if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot open file '%s'", filename);
+ else if(unlikely(flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG)) error(PF_PREFIX ": Cannot open file '%s'", filename);
return NULL;
}
diff --git a/libnetdata/procfile/procfile.h b/libnetdata/procfile/procfile.h
index cae4ad484..8db5b45f4 100644
--- a/libnetdata/procfile/procfile.h
+++ b/libnetdata/procfile/procfile.h
@@ -34,8 +34,9 @@ typedef struct {
// ----------------------------------------------------------------------------
// The procfile
-#define PROCFILE_FLAG_DEFAULT 0x00000000
-#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001
+#define PROCFILE_FLAG_DEFAULT 0x00000000 // To store inside `collector.log`
+#define PROCFILE_FLAG_NO_ERROR_ON_FILE_IO 0x00000001 // Do not store nothing
+#define PROCFILE_FLAG_ERROR_ON_ERROR_LOG 0x00000002 // Store inside `error.log`
typedef enum __attribute__ ((__packed__)) procfile_separator {
PF_CHAR_IS_SEPARATOR,
diff --git a/libnetdata/required_dummies.h b/libnetdata/required_dummies.h
index ad1e8fb84..5a0d4e050 100644
--- a/libnetdata/required_dummies.h
+++ b/libnetdata/required_dummies.h
@@ -36,6 +36,7 @@ int health_variable_lookup(STRING *variable, struct rrdcalc *rc, NETDATA_DOUBLE
void rrdset_thread_rda_free(void){};
void sender_thread_buffer_free(void){};
void query_target_free(void){};
+void service_exits(void){};
// required by get_system_cpus()
char *netdata_configured_host_prefix = "";
diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md
index cb377f84e..a0a7cf688 100644
--- a/libnetdata/simple_pattern/README.md
+++ b/libnetdata/simple_pattern/README.md
@@ -1,10 +1,14 @@
-# Netdata simple patterns
+# Simple patterns
Unix prefers regular expressions. But they are just too hard, too cryptic
to use, write and understand.
diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c
index 88b3f6d93..7c5092150 100644
--- a/libnetdata/socket/security.c
+++ b/libnetdata/socket/security.c
@@ -310,7 +310,7 @@ int security_process_accept(SSL *ssl,int msg) {
int counter = 0;
while ((err = ERR_get_error()) != 0) {
ERR_error_string_n(err, buf, sizeof(buf));
- info("%d SSL Handshake error (%s) on socket %d ", counter++, ERR_error_string((long)SSL_get_error(ssl, test), NULL), sock);
+ error("%d SSL Handshake error (%s) on socket %d", counter++, ERR_error_string((long)SSL_get_error(ssl, test), NULL), sock);
}
return NETDATA_SSL_NO_HANDSHAKE;
}
diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c
index 40271b623..69124b949 100644
--- a/libnetdata/socket/socket.c
+++ b/libnetdata/socket/socket.c
@@ -926,13 +926,17 @@ ssize_t netdata_ssl_read(SSL *ssl, void *buf, size_t num) {
int bytes, err, retries = 0;
//do {
- bytes = SSL_read(ssl, buf, (int)num);
- err = SSL_get_error(ssl, bytes);
- retries++;
- //} while (bytes <= 0 && (err == SSL_ERROR_WANT_READ));
+ bytes = SSL_read(ssl, buf, (int)num);
+ err = SSL_get_error(ssl, bytes);
+ retries++;
+ //} while (bytes <= 0 && err == SSL_ERROR_WANT_READ);
- if(unlikely(bytes <= 0))
- error("SSL_read() returned %d bytes, SSL error %d", bytes, err);
+ if(unlikely(bytes <= 0)) {
+ if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ) {
+ bytes = 0;
+ } else
+ error("SSL_write() returned %d bytes, SSL error %d", bytes, err);
+ }
if(retries > 1)
error_limit(&erl, "SSL_read() retried %d times", retries);
@@ -947,17 +951,21 @@ ssize_t netdata_ssl_write(SSL *ssl, const void *buf, size_t num) {
size_t total = 0;
//do {
- bytes = SSL_write(ssl, (uint8_t *)buf + total, (int)(num - total));
- err = SSL_get_error(ssl, bytes);
- retries++;
+ bytes = SSL_write(ssl, (uint8_t *)buf + total, (int)(num - total));
+ err = SSL_get_error(ssl, bytes);
+ retries++;
- if(bytes > 0)
- total += bytes;
+ if(bytes > 0)
+ total += bytes;
//} while ((bytes <= 0 && (err == SSL_ERROR_WANT_WRITE)) || (bytes > 0 && total < num));
- if(unlikely(bytes <= 0))
- error("SSL_write() returned %d bytes, SSL error %d", bytes, err);
+ if(unlikely(bytes <= 0)) {
+ if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ) {
+ bytes = 0;
+ } else
+ error("SSL_write() returned %d bytes, SSL error %d", bytes, err);
+ }
if(retries > 1)
error_limit(&erl, "SSL_write() retried %d times", retries);
@@ -1633,6 +1641,7 @@ void poll_events(LISTEN_SOCKETS *sockets
, int (*rcv_callback)(POLLINFO * /*pi*/, short int * /*events*/)
, int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
, void (*tmr_callback)(void * /*timer_data*/)
+ , bool (*check_to_stop_callback)(void)
, SIMPLE_PATTERN *access_list
, int allow_dns
, void *data
@@ -1715,7 +1724,7 @@ void poll_events(LISTEN_SOCKETS *sockets
netdata_thread_cleanup_push(poll_events_cleanup, &p);
- while(!netdata_exit) {
+ while(!check_to_stop_callback()) {
if(unlikely(timer_usec)) {
now_usec = now_boottime_usec();
diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h
index 282324273..9577453d5 100644
--- a/libnetdata/socket/socket.h
+++ b/libnetdata/socket/socket.h
@@ -10,18 +10,18 @@
#endif
typedef enum web_client_acl {
- WEB_CLIENT_ACL_NONE = 0,
- WEB_CLIENT_ACL_NOCHECK = 0,
- WEB_CLIENT_ACL_DASHBOARD = 1 << 0,
- WEB_CLIENT_ACL_REGISTRY = 1 << 1,
- WEB_CLIENT_ACL_BADGE = 1 << 2,
- WEB_CLIENT_ACL_MGMT = 1 << 3,
- WEB_CLIENT_ACL_STREAMING = 1 << 4,
- WEB_CLIENT_ACL_NETDATACONF = 1 << 5,
- WEB_CLIENT_ACL_SSL_OPTIONAL = 1 << 6,
- WEB_CLIENT_ACL_SSL_FORCE = 1 << 7,
- WEB_CLIENT_ACL_SSL_DEFAULT = 1 << 8,
- WEB_CLIENT_ACL_ACLK = 1 << 9,
+ WEB_CLIENT_ACL_NONE = (0),
+ WEB_CLIENT_ACL_NOCHECK = (0),
+ WEB_CLIENT_ACL_DASHBOARD = (1 << 0),
+ WEB_CLIENT_ACL_REGISTRY = (1 << 1),
+ WEB_CLIENT_ACL_BADGE = (1 << 2),
+ WEB_CLIENT_ACL_MGMT = (1 << 3),
+ WEB_CLIENT_ACL_STREAMING = (1 << 4),
+ WEB_CLIENT_ACL_NETDATACONF = (1 << 5),
+ WEB_CLIENT_ACL_SSL_OPTIONAL = (1 << 6),
+ WEB_CLIENT_ACL_SSL_FORCE = (1 << 7),
+ WEB_CLIENT_ACL_SSL_DEFAULT = (1 << 8),
+ WEB_CLIENT_ACL_ACLK = (1 << 9),
} WEB_CLIENT_ACL;
#define WEB_CLIENT_ACL_ALL 0xFFFF
@@ -202,6 +202,7 @@ void poll_events(LISTEN_SOCKETS *sockets
, int (*rcv_callback)(POLLINFO *pi, short int *events)
, int (*snd_callback)(POLLINFO *pi, short int *events)
, void (*tmr_callback)(void *timer_data)
+ , bool (*check_to_stop_callback)(void)
, SIMPLE_PATTERN *access_list
, int allow_dns
, void *data
diff --git a/libnetdata/statistical/README.md b/libnetdata/statistical/README.md
index f254081d2..8fa101f0e 100644
--- a/libnetdata/statistical/README.md
+++ b/libnetdata/statistical/README.md
@@ -1,5 +1,12 @@
+# Statistical functions
+A library for easy and fast calculations of statistical measurements like average, median etc.
diff --git a/libnetdata/storage_number/README.md b/libnetdata/storage_number/README.md
index 4cd19a98b..da2c3ccfd 100644
--- a/libnetdata/storage_number/README.md
+++ b/libnetdata/storage_number/README.md
@@ -1,6 +1,10 @@
# Netdata storage number
diff --git a/libnetdata/string/README.md b/libnetdata/string/README.md
index e73ab2696..b1c6e61c3 100644
--- a/libnetdata/string/README.md
+++ b/libnetdata/string/README.md
@@ -1,5 +1,10 @@
# STRING
@@ -17,4 +22,4 @@ index lookup to find it.
Once there is a `STRING *`, the actual `const char *` can be accessed with `string2str()`.
-All STRING should be constant. Changing the contents of a `const char *` that has been acquired by `string2str()` should never happen.
\ No newline at end of file
+All STRING should be constant. Changing the contents of a `const char *` that has been acquired by `string2str()` should never happen.
diff --git a/libnetdata/string/string.c b/libnetdata/string/string.c
index d2db8aab4..4e232523c 100644
--- a/libnetdata/string/string.c
+++ b/libnetdata/string/string.c
@@ -56,14 +56,29 @@ static struct string_hashtable {
#define string_stats_atomic_decrement(var) __atomic_sub_fetch(&string_base.var, 1, __ATOMIC_RELAXED)
void string_statistics(size_t *inserts, size_t *deletes, size_t *searches, size_t *entries, size_t *references, size_t *memory, size_t *duplications, size_t *releases) {
- *inserts = string_base.inserts;
- *deletes = string_base.deletes;
- *searches = string_base.searches;
- *entries = (size_t)string_base.entries;
- *references = (size_t)string_base.active_references;
- *memory = (size_t)string_base.memory;
- *duplications = string_base.duplications;
- *releases = string_base.releases;
+ if(inserts)
+ *inserts = string_base.inserts;
+
+ if(deletes)
+ *deletes = string_base.deletes;
+
+ if(searches)
+ *searches = string_base.searches;
+
+ if(entries)
+ *entries = (size_t)string_base.entries;
+
+ if(references)
+ *references = (size_t)string_base.active_references;
+
+ if(memory)
+ *memory = (size_t)string_base.memory;
+
+ if(duplications)
+ *duplications = string_base.duplications;
+
+ if(releases)
+ *releases = string_base.releases;
}
#define string_entry_acquire(se) __atomic_add_fetch(&((se)->refcount), 1, __ATOMIC_SEQ_CST);
@@ -186,7 +201,7 @@ static inline STRING *string_index_insert(const char *str, size_t length) {
*ptr = string;
string_base.inserts++;
string_base.entries++;
- string_base.memory += (long)mem_size;
+ string_base.memory += (long)(mem_size + JUDYHS_INDEX_SIZE_ESTIMATE(length));
}
else {
// the item is already in the index
@@ -240,7 +255,7 @@ static inline void string_index_delete(STRING *string) {
size_t mem_size = sizeof(STRING) + string->length;
string_base.deletes++;
string_base.entries--;
- string_base.memory -= (long)mem_size;
+ string_base.memory -= (long)(mem_size + JUDYHS_INDEX_SIZE_ESTIMATE(string->length));
freez(string);
}
diff --git a/libnetdata/threads/README.md b/libnetdata/threads/README.md
index 75ab11b1e..71979feac 100644
--- a/libnetdata/threads/README.md
+++ b/libnetdata/threads/README.md
@@ -1,5 +1,12 @@
+# Threads
+Netdata uses a custom threads library
diff --git a/libnetdata/threads/threads.c b/libnetdata/threads/threads.c
index 5c3d2675c..16de45fd1 100644
--- a/libnetdata/threads/threads.c
+++ b/libnetdata/threads/threads.c
@@ -2,8 +2,7 @@
#include "../libnetdata.h"
-static size_t default_stacksize = 0, wanted_stacksize = 0;
-static pthread_attr_t *attr = NULL;
+static pthread_attr_t *netdata_threads_attr = NULL;
// ----------------------------------------------------------------------------
// per thread data
@@ -69,46 +68,48 @@ size_t netdata_threads_init(void) {
// --------------------------------------------------------------------
// get the required stack size of the threads of netdata
- attr = callocz(1, sizeof(pthread_attr_t));
- i = pthread_attr_init(attr);
+ netdata_threads_attr = callocz(1, sizeof(pthread_attr_t));
+ i = pthread_attr_init(netdata_threads_attr);
if(i != 0)
fatal("pthread_attr_init() failed with code %d.", i);
- i = pthread_attr_getstacksize(attr, &default_stacksize);
+ size_t stacksize = 0;
+ i = pthread_attr_getstacksize(netdata_threads_attr, &stacksize);
if(i != 0)
fatal("pthread_attr_getstacksize() failed with code %d.", i);
else
- debug(D_OPTIONS, "initial pthread stack size is %zu bytes", default_stacksize);
+ debug(D_OPTIONS, "initial pthread stack size is %zu bytes", stacksize);
- return default_stacksize;
+ return stacksize;
}
// ----------------------------------------------------------------------------
// late initialization
void netdata_threads_init_after_fork(size_t stacksize) {
- wanted_stacksize = stacksize;
int i;
// ------------------------------------------------------------------------
- // set default pthread stack size
+ // set pthread stack size
- if(attr && default_stacksize < wanted_stacksize && wanted_stacksize > 0) {
- i = pthread_attr_setstacksize(attr, wanted_stacksize);
+ if(netdata_threads_attr && stacksize > (size_t)PTHREAD_STACK_MIN) {
+ i = pthread_attr_setstacksize(netdata_threads_attr, stacksize);
if(i != 0)
- fatal("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", wanted_stacksize, i);
+ error("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", stacksize, i);
else
- debug(D_SYSTEM, "Successfully set pthread stacksize to %zu bytes", wanted_stacksize);
+ info("Set threads stack size to %zu bytes", stacksize);
}
+ else
+ error("Invalid pthread stacksize %zu", stacksize);
}
-
// ----------------------------------------------------------------------------
// netdata_thread_create
-extern void rrdset_thread_rda_free(void);
-extern void sender_thread_buffer_free(void);
-extern void query_target_free(void);
+void rrdset_thread_rda_free(void);
+void sender_thread_buffer_free(void);
+void query_target_free(void);
+void service_exits(void);
static void thread_cleanup(void *ptr) {
if(netdata_thread != ptr) {
@@ -123,6 +124,8 @@ static void thread_cleanup(void *ptr) {
rrdset_thread_rda_free();
query_target_free();
thread_cache_destroy();
+ service_exits();
+ worker_unregister();
freez((void *)netdata_thread->tag);
netdata_thread->tag = NULL;
@@ -214,7 +217,7 @@ int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THR
info->start_routine = start_routine;
info->options = options;
- int ret = pthread_create(thread, attr, thread_start, info);
+ int ret = pthread_create(thread, netdata_threads_attr, thread_start, info);
if(ret != 0)
error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret);
diff --git a/libnetdata/url/README.md b/libnetdata/url/README.md
index bd289d955..cca6f8731 100644
--- a/libnetdata/url/README.md
+++ b/libnetdata/url/README.md
@@ -1,5 +1,14 @@
+# URL
+
+The URL library contains common functions useful for URLs, like conversion from/to hex,
+URL encode/decode and query string parsing.
diff --git a/libnetdata/worker_utilization/worker_utilization.c b/libnetdata/worker_utilization/worker_utilization.c
index afaff209b..8028e3a21 100644
--- a/libnetdata/worker_utilization/worker_utilization.c
+++ b/libnetdata/worker_utilization/worker_utilization.c
@@ -52,6 +52,7 @@ struct workers_workname { // this is what we add to Ju
static struct workers_globals {
SPINLOCK spinlock;
Pvoid_t worknames_JudyHS;
+ size_t memory;
} workers_globals = { // workers globals, the base of all worknames
.spinlock = NETDATA_SPINLOCK_INITIALIZER, // a lock for the worknames index
@@ -60,6 +61,14 @@ static struct workers_globals {
static __thread struct worker *worker = NULL; // the current thread worker
+size_t workers_allocated_memory(void) {
+ netdata_spinlock_lock(&workers_globals.spinlock);
+ size_t memory = workers_globals.memory;
+ netdata_spinlock_unlock(&workers_globals.spinlock);
+
+ return memory;
+}
+
void worker_register(const char *name) {
if(unlikely(worker)) return;
@@ -76,20 +85,22 @@ void worker_register(const char *name) {
size_t name_size = strlen(name) + 1;
netdata_spinlock_lock(&workers_globals.spinlock);
- Pvoid_t *PValue = JudyHSGet(workers_globals.worknames_JudyHS, (void *)name, name_size);
- if(!PValue)
- PValue = JudyHSIns(&workers_globals.worknames_JudyHS, (void *)name, name_size, PJE0);
+ workers_globals.memory += sizeof(struct worker) + strlen(worker->tag) + 1 + strlen(worker->workname) + 1;
+
+ Pvoid_t *PValue = JudyHSIns(&workers_globals.worknames_JudyHS, (void *)name, name_size, PJE0);
struct workers_workname *workname = *PValue;
if(!workname) {
workname = mallocz(sizeof(struct workers_workname));
- workname->spinlock = NETDATA_SPINLOCK_INITIALIZER;
+ netdata_spinlock_init(&workname->spinlock);
workname->base = NULL;
*PValue = workname;
+
+ workers_globals.memory += sizeof(struct workers_workname) + JUDYHS_INDEX_SIZE_ESTIMATE(name_size);
}
netdata_spinlock_lock(&workname->spinlock);
- DOUBLE_LINKED_LIST_APPEND_UNSAFE(workname->base, worker, prev, next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(workname->base, worker, prev, next);
netdata_spinlock_unlock(&workname->spinlock);
netdata_spinlock_unlock(&workers_globals.spinlock);
@@ -130,14 +141,16 @@ void worker_unregister(void) {
if(PValue) {
struct workers_workname *workname = *PValue;
netdata_spinlock_lock(&workname->spinlock);
- DOUBLE_LINKED_LIST_REMOVE_UNSAFE(workname->base, worker, prev, next);
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(workname->base, worker, prev, next);
netdata_spinlock_unlock(&workname->spinlock);
if(!workname->base) {
JudyHSDel(&workers_globals.worknames_JudyHS, (void *) worker->workname, workname_size, PJE0);
freez(workname);
+ workers_globals.memory -= sizeof(struct workers_workname) + JUDYHS_INDEX_SIZE_ESTIMATE(workname_size);
}
}
+ workers_globals.memory -= sizeof(struct worker) + strlen(worker->tag) + 1 + strlen(worker->workname) + 1;
netdata_spinlock_unlock(&workers_globals.spinlock);
for(int i = 0; i < WORKER_UTILIZATION_MAX_JOB_TYPES ;i++) {
diff --git a/libnetdata/worker_utilization/worker_utilization.h b/libnetdata/worker_utilization/worker_utilization.h
index f1412e6b4..6745a010b 100644
--- a/libnetdata/worker_utilization/worker_utilization.h
+++ b/libnetdata/worker_utilization/worker_utilization.h
@@ -15,6 +15,7 @@ typedef enum {
WORKER_METRIC_INCREMENTAL_TOTAL = 4,
} WORKER_METRIC_TYPE;
+size_t workers_allocated_memory(void);
void worker_register(const char *name);
void worker_register_job_name(size_t job_id, const char *name);
void worker_register_job_custom_metric(size_t job_id, const char *name, const char *units, WORKER_METRIC_TYPE type);
diff --git a/ml/ADCharts.cc b/ml/ADCharts.cc
index 00c593c0c..cbb13f5d1 100644
--- a/ml/ADCharts.cc
+++ b/ml/ADCharts.cc
@@ -3,55 +3,185 @@
#include "ADCharts.h"
#include "Config.h"
-void ml::updateDimensionsChart(RRDHOST *RH,
- collected_number NumTrainedDimensions,
- collected_number NumNormalDimensions,
- collected_number NumAnomalousDimensions) {
- static thread_local RRDSET *RS = nullptr;
- static thread_local RRDDIM *NumTotalDimensionsRD = nullptr;
- static thread_local RRDDIM *NumTrainedDimensionsRD = nullptr;
- static thread_local RRDDIM *NumNormalDimensionsRD = nullptr;
- static thread_local RRDDIM *NumAnomalousDimensionsRD = nullptr;
-
- if (!RS) {
- std::stringstream IdSS, NameSS;
+void ml::updateDimensionsChart(RRDHOST *RH, const MachineLearningStats &MLS) {
+ /*
+ * Machine learning status
+ */
+ {
+ static thread_local RRDSET *MachineLearningStatusRS = nullptr;
+
+ static thread_local RRDDIM *Enabled = nullptr;
+ static thread_local RRDDIM *DisabledUE = nullptr;
+ static thread_local RRDDIM *DisabledSP = nullptr;
+
+ if (!MachineLearningStatusRS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "machine_learning_status_on_" << localhost->machine_guid;
+ NameSS << "machine_learning_status_on_" << rrdhost_hostname(localhost);
+
+ MachineLearningStatusRS = rrdset_create(
+ RH,
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.machine_learning_status", // ctx
+ "Machine learning status", // title
+ "dimensions", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_MACHINE_LEARNING_STATUS, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE // chart_type
+ );
+ rrdset_flag_set(MachineLearningStatusRS , RRDSET_FLAG_ANOMALY_DETECTION);
+
+ Enabled = rrddim_add(MachineLearningStatusRS, "enabled", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ DisabledUE = rrddim_add(MachineLearningStatusRS, "disabled-ue", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ DisabledSP = rrddim_add(MachineLearningStatusRS, "disabled-sp", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(MachineLearningStatusRS, Enabled, MLS.NumMachineLearningStatusEnabled);
+ rrddim_set_by_pointer(MachineLearningStatusRS, DisabledUE, MLS.NumMachineLearningStatusDisabledUE);
+ rrddim_set_by_pointer(MachineLearningStatusRS, DisabledSP, MLS.NumMachineLearningStatusDisabledSP);
+
+ rrdset_done(MachineLearningStatusRS);
+ }
- IdSS << "dimensions_on_" << localhost->machine_guid;
- NameSS << "dimensions_on_" << localhost->hostname;
+ /*
+ * Metric type
+ */
+ {
+ static thread_local RRDSET *MetricTypesRS = nullptr;
+
+ static thread_local RRDDIM *Constant = nullptr;
+ static thread_local RRDDIM *Variable = nullptr;
+
+ if (!MetricTypesRS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "metric_types_on_" << localhost->machine_guid;
+ NameSS << "metric_types_on_" << rrdhost_hostname(localhost);
+
+ MetricTypesRS = rrdset_create(
+ RH,
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.metric_types", // ctx
+ "Dimensions by metric type", // title
+ "dimensions", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_METRIC_TYPES, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE // chart_type
+ );
+ rrdset_flag_set(MetricTypesRS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ Constant = rrddim_add(MetricTypesRS, "constant", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ Variable = rrddim_add(MetricTypesRS, "variable", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(MetricTypesRS, Constant, MLS.NumMetricTypeConstant);
+ rrddim_set_by_pointer(MetricTypesRS, Variable, MLS.NumMetricTypeVariable);
+
+ rrdset_done(MetricTypesRS);
+ }
- RS = rrdset_create(
- RH,
- "anomaly_detection", // type
- IdSS.str().c_str(), // id
- NameSS.str().c_str(), // name
- "dimensions", // family
- "anomaly_detection.dimensions", // ctx
- "Anomaly detection dimensions", // title
- "dimensions", // units
- "netdata", // plugin
- "ml", // module
- 39183, // priority
- RH->rrd_update_every, // update_every
- RRDSET_TYPE_LINE // chart_type
- );
- rrdset_flag_set(RS, RRDSET_FLAG_ANOMALY_DETECTION);
-
- NumTotalDimensionsRD = rrddim_add(RS, "total", NULL,
- 1, 1, RRD_ALGORITHM_ABSOLUTE);
- NumTrainedDimensionsRD = rrddim_add(RS, "trained", NULL,
- 1, 1, RRD_ALGORITHM_ABSOLUTE);
- NumNormalDimensionsRD = rrddim_add(RS, "normal", NULL,
- 1, 1, RRD_ALGORITHM_ABSOLUTE);
- NumAnomalousDimensionsRD = rrddim_add(RS, "anomalous", NULL,
- 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ /*
+ * Training status
+ */
+ {
+ static thread_local RRDSET *TrainingStatusRS = nullptr;
+
+ static thread_local RRDDIM *Untrained = nullptr;
+ static thread_local RRDDIM *PendingWithoutModel = nullptr;
+ static thread_local RRDDIM *Trained = nullptr;
+ static thread_local RRDDIM *PendingWithModel = nullptr;
+
+ if (!TrainingStatusRS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "training_status_on_" << localhost->machine_guid;
+ NameSS << "training_status_on_" << rrdhost_hostname(localhost);
+
+ TrainingStatusRS = rrdset_create(
+ RH,
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.training_status", // ctx
+ "Training status of dimensions", // title
+ "dimensions", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_TRAINING_STATUS, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE // chart_type
+ );
+
+ rrdset_flag_set(TrainingStatusRS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ Untrained = rrddim_add(TrainingStatusRS, "untrained", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ PendingWithoutModel = rrddim_add(TrainingStatusRS, "pending-without-model", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ Trained = rrddim_add(TrainingStatusRS, "trained", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ PendingWithModel = rrddim_add(TrainingStatusRS, "pending-with-model", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(TrainingStatusRS, Untrained, MLS.NumTrainingStatusUntrained);
+ rrddim_set_by_pointer(TrainingStatusRS, PendingWithoutModel, MLS.NumTrainingStatusPendingWithoutModel);
+ rrddim_set_by_pointer(TrainingStatusRS, Trained, MLS.NumTrainingStatusTrained);
+ rrddim_set_by_pointer(TrainingStatusRS, PendingWithModel, MLS.NumTrainingStatusPendingWithModel);
+
+ rrdset_done(TrainingStatusRS);
}
- rrddim_set_by_pointer(RS, NumTotalDimensionsRD, NumNormalDimensions + NumAnomalousDimensions);
- rrddim_set_by_pointer(RS, NumTrainedDimensionsRD, NumTrainedDimensions);
- rrddim_set_by_pointer(RS, NumNormalDimensionsRD, NumNormalDimensions);
- rrddim_set_by_pointer(RS, NumAnomalousDimensionsRD, NumAnomalousDimensions);
+ /*
+ * Prediction status
+ */
+ {
+ static thread_local RRDSET *PredictionRS = nullptr;
+
+ static thread_local RRDDIM *Anomalous = nullptr;
+ static thread_local RRDDIM *Normal = nullptr;
+
+ if (!PredictionRS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "dimensions_on_" << localhost->machine_guid;
+ NameSS << "dimensions_on_" << rrdhost_hostname(localhost);
+
+ PredictionRS = rrdset_create(
+ RH,
+ "anomaly_detection", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ "dimensions", // family
+ "anomaly_detection.dimensions", // ctx
+ "Anomaly detection dimensions", // title
+ "dimensions", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ ML_CHART_PRIO_DIMENSIONS, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE // chart_type
+ );
+ rrdset_flag_set(PredictionRS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ Anomalous = rrddim_add(PredictionRS, "anomalous", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ Normal = rrddim_add(PredictionRS, "normal", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(PredictionRS, Anomalous, MLS.NumAnomalousDimensions);
+ rrddim_set_by_pointer(PredictionRS, Normal, MLS.NumNormalDimensions);
+
+ rrdset_done(PredictionRS);
+ }
- rrdset_done(RS);
}
void ml::updateHostAndDetectionRateCharts(RRDHOST *RH, collected_number AnomalyRate) {
@@ -62,20 +192,20 @@ void ml::updateHostAndDetectionRateCharts(RRDHOST *RH, collected_number AnomalyR
std::stringstream IdSS, NameSS;
IdSS << "anomaly_rate_on_" << localhost->machine_guid;
- NameSS << "anomaly_rate_on_" << localhost->hostname;
+ NameSS << "anomaly_rate_on_" << rrdhost_hostname(localhost);
HostRateRS = rrdset_create(
- RH,
- "anomaly_detection", // type
+ RH,
+ "anomaly_detection", // type
IdSS.str().c_str(), // id
NameSS.str().c_str(), // name
"anomaly_rate", // family
"anomaly_detection.anomaly_rate", // ctx
"Percentage of anomalous dimensions", // title
"percentage", // units
- "netdata", // plugin
- "ml", // module
- 39184, // priority
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_DETECTION, // module
+ ML_CHART_PRIO_ANOMALY_RATE, // priority
RH->rrd_update_every, // update_every
RRDSET_TYPE_LINE // chart_type
);
@@ -96,20 +226,20 @@ void ml::updateHostAndDetectionRateCharts(RRDHOST *RH, collected_number AnomalyR
std::stringstream IdSS, NameSS;
IdSS << "anomaly_detection_on_" << localhost->machine_guid;
- NameSS << "anomaly_detection_on_" << localhost->hostname;
+ NameSS << "anomaly_detection_on_" << rrdhost_hostname(localhost);
AnomalyDetectionRS = rrdset_create(
- RH,
- "anomaly_detection", // type
+ RH,
+ "anomaly_detection", // type
IdSS.str().c_str(), // id
NameSS.str().c_str(), // name
"anomaly_detection", // family
"anomaly_detection.detector_events", // ctx
"Anomaly detection events", // title
"percentage", // units
- "netdata", // plugin
- "ml", // module
- 39185, // priority
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_DETECTION, // module
+ ML_CHART_PRIO_DETECTOR_EVENTS, // priority
RH->rrd_update_every, // update_every
RRDSET_TYPE_LINE // chart_type
);
@@ -141,93 +271,248 @@ void ml::updateHostAndDetectionRateCharts(RRDHOST *RH, collected_number AnomalyR
NULL /* group options */,
0, /* timeout */
0, /* tier */
- QUERY_SOURCE_ML
+ QUERY_SOURCE_ML,
+ STORAGE_PRIORITY_BEST_EFFORT
);
- if(R) {
- assert(R->d == 1 && R->n == 1 && R->rows == 1);
- static thread_local bool PrevAboveThreshold = false;
- bool AboveThreshold = R->v[0] >= Cfg.HostAnomalyRateThreshold;
- bool NewAnomalyEvent = AboveThreshold && !PrevAboveThreshold;
- PrevAboveThreshold = AboveThreshold;
+ if(R) {
+ if(R->d == 1 && R->n == 1 && R->rows == 1) {
+ static thread_local bool PrevAboveThreshold = false;
+ bool AboveThreshold = R->v[0] >= Cfg.HostAnomalyRateThreshold;
+ bool NewAnomalyEvent = AboveThreshold && !PrevAboveThreshold;
+ PrevAboveThreshold = AboveThreshold;
- rrddim_set_by_pointer(AnomalyDetectionRS, AboveThresholdRD, AboveThreshold);
- rrddim_set_by_pointer(AnomalyDetectionRS, NewAnomalyEventRD, NewAnomalyEvent);
- rrdset_done(AnomalyDetectionRS);
+ rrddim_set_by_pointer(AnomalyDetectionRS, AboveThresholdRD, AboveThreshold);
+ rrddim_set_by_pointer(AnomalyDetectionRS, NewAnomalyEventRD, NewAnomalyEvent);
+ rrdset_done(AnomalyDetectionRS);
+ }
rrdr_free(OWA, R);
}
+
onewayalloc_destroy(OWA);
}
-void ml::updateDetectionChart(RRDHOST *RH) {
- static thread_local RRDSET *RS = nullptr;
- static thread_local RRDDIM *UserRD, *SystemRD = nullptr;
-
- if (!RS) {
- std::stringstream IdSS, NameSS;
-
- IdSS << "prediction_stats_" << RH->machine_guid;
- NameSS << "prediction_stats_for_" << RH->hostname;
-
- RS = rrdset_create_localhost(
- "netdata", // type
- IdSS.str().c_str(), // id
- NameSS.str().c_str(), // name
- "ml", // family
- "netdata.prediction_stats", // ctx
- "Prediction thread CPU usage", // title
- "milliseconds/s", // units
- "netdata", // plugin
- "ml", // module
- 136000, // priority
- RH->rrd_update_every, // update_every
- RRDSET_TYPE_STACKED // chart_type
- );
-
- UserRD = rrddim_add(RS, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
- SystemRD = rrddim_add(RS, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+void ml::updateResourceUsageCharts(RRDHOST *RH, const struct rusage &PredictionRU, const struct rusage &TrainingRU) {
+ /*
+ * prediction rusage
+ */
+ {
+ static thread_local RRDSET *RS = nullptr;
+
+ static thread_local RRDDIM *User = nullptr;
+ static thread_local RRDDIM *System = nullptr;
+
+ if (!RS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "prediction_usage_for_" << RH->machine_guid;
+ NameSS << "prediction_usage_for_" << rrdhost_hostname(RH);
+
+ RS = rrdset_create_localhost(
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.prediction_usage", // ctx
+ "Prediction resource usage", // title
+ "milliseconds/s", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_PREDICTION, // module
+ NETDATA_ML_CHART_PRIO_PREDICTION_USAGE, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_STACKED // chart_type
+ );
+ rrdset_flag_set(RS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ User = rrddim_add(RS, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ System = rrddim_add(RS, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(RS, User, PredictionRU.ru_utime.tv_sec * 1000000ULL + PredictionRU.ru_utime.tv_usec);
+ rrddim_set_by_pointer(RS, System, PredictionRU.ru_stime.tv_sec * 1000000ULL + PredictionRU.ru_stime.tv_usec);
+
+ rrdset_done(RS);
}
- struct rusage TRU;
- getrusage(RUSAGE_THREAD, &TRU);
-
- rrddim_set_by_pointer(RS, UserRD, TRU.ru_utime.tv_sec * 1000000ULL + TRU.ru_utime.tv_usec);
- rrddim_set_by_pointer(RS, SystemRD, TRU.ru_stime.tv_sec * 1000000ULL + TRU.ru_stime.tv_usec);
- rrdset_done(RS);
+ /*
+ * training rusage
+ */
+ {
+ static thread_local RRDSET *RS = nullptr;
+
+ static thread_local RRDDIM *User = nullptr;
+ static thread_local RRDDIM *System = nullptr;
+
+ if (!RS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "training_usage_for_" << RH->machine_guid;
+ NameSS << "training_usage_for_" << rrdhost_hostname(RH);
+
+ RS = rrdset_create_localhost(
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.training_usage", // ctx
+ "Training resource usage", // title
+ "milliseconds/s", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_TRAINING_USAGE, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_STACKED // chart_type
+ );
+ rrdset_flag_set(RS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ User = rrddim_add(RS, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ System = rrddim_add(RS, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(RS, User, TrainingRU.ru_utime.tv_sec * 1000000ULL + TrainingRU.ru_utime.tv_usec);
+ rrddim_set_by_pointer(RS, System, TrainingRU.ru_stime.tv_sec * 1000000ULL + TrainingRU.ru_stime.tv_usec);
+
+ rrdset_done(RS);
+ }
}
-void ml::updateTrainingChart(RRDHOST *RH, struct rusage *TRU) {
- static thread_local RRDSET *RS = nullptr;
- static thread_local RRDDIM *UserRD = nullptr;
- static thread_local RRDDIM *SystemRD = nullptr;
-
- if (!RS) {
- std::stringstream IdSS, NameSS;
-
- IdSS << "training_stats_" << RH->machine_guid;
- NameSS << "training_stats_for_" << RH->hostname;
-
- RS = rrdset_create_localhost(
- "netdata", // type
- IdSS.str().c_str(), // id
- NameSS.str().c_str(), // name
- "ml", // family
- "netdata.training_stats", // ctx
- "Training thread CPU usage", // title
- "milliseconds/s", // units
- "netdata", // plugin
- "ml", // module
- 136001, // priority
- RH->rrd_update_every, // update_every
- RRDSET_TYPE_STACKED // chart_type
- );
+void ml::updateTrainingStatisticsChart(RRDHOST *RH, const TrainingStats &TS) {
+ /*
+ * queue stats
+ */
+ {
+ static thread_local RRDSET *RS = nullptr;
+
+ static thread_local RRDDIM *QueueSize = nullptr;
+ static thread_local RRDDIM *PoppedItems = nullptr;
+
+ if (!RS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "queue_stats_on_" << localhost->machine_guid;
+ NameSS << "queue_stats_on_" << rrdhost_hostname(localhost);
+
+ RS = rrdset_create(
+ RH,
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.queue_stats", // ctx
+ "Training queue stats", // title
+ "items", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_QUEUE_STATS, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE// chart_type
+ );
+ rrdset_flag_set(RS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ QueueSize = rrddim_add(RS, "queue_size", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ PoppedItems = rrddim_add(RS, "popped_items", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(RS, QueueSize, TS.QueueSize);
+ rrddim_set_by_pointer(RS, PoppedItems, TS.NumPoppedItems);
+
+ rrdset_done(RS);
+ }
- UserRD = rrddim_add(RS, "user", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
- SystemRD = rrddim_add(RS, "system", NULL, 1, 1000, RRD_ALGORITHM_INCREMENTAL);
+ /*
+ * training stats
+ */
+ {
+ static thread_local RRDSET *RS = nullptr;
+
+ static thread_local RRDDIM *Allotted = nullptr;
+ static thread_local RRDDIM *Consumed = nullptr;
+ static thread_local RRDDIM *Remaining = nullptr;
+
+ if (!RS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "training_time_stats_on_" << localhost->machine_guid;
+ NameSS << "training_time_stats_on_" << rrdhost_hostname(localhost);
+
+ RS = rrdset_create(
+ RH,
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.training_time_stats", // ctx
+ "Training time stats", // title
+ "milliseconds", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_TRAINING_TIME_STATS, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE// chart_type
+ );
+ rrdset_flag_set(RS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ Allotted = rrddim_add(RS, "allotted", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ Consumed = rrddim_add(RS, "consumed", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ Remaining = rrddim_add(RS, "remaining", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(RS, Allotted, TS.AllottedUT);
+ rrddim_set_by_pointer(RS, Consumed, TS.ConsumedUT);
+ rrddim_set_by_pointer(RS, Remaining, TS.RemainingUT);
+
+ rrdset_done(RS);
}
- rrddim_set_by_pointer(RS, UserRD, TRU->ru_utime.tv_sec * 1000000ULL + TRU->ru_utime.tv_usec);
- rrddim_set_by_pointer(RS, SystemRD, TRU->ru_stime.tv_sec * 1000000ULL + TRU->ru_stime.tv_usec);
- rrdset_done(RS);
+ /*
+ * training result stats
+ */
+ {
+ static thread_local RRDSET *RS = nullptr;
+
+ static thread_local RRDDIM *Ok = nullptr;
+ static thread_local RRDDIM *InvalidQueryTimeRange = nullptr;
+ static thread_local RRDDIM *NotEnoughCollectedValues = nullptr;
+ static thread_local RRDDIM *NullAcquiredDimension = nullptr;
+ static thread_local RRDDIM *ChartUnderReplication = nullptr;
+
+ if (!RS) {
+ std::stringstream IdSS, NameSS;
+
+ IdSS << "training_results_on_" << localhost->machine_guid;
+ NameSS << "training_results_on_" << rrdhost_hostname(localhost);
+
+ RS = rrdset_create(
+ RH,
+ "netdata", // type
+ IdSS.str().c_str(), // id
+ NameSS.str().c_str(), // name
+ NETDATA_ML_CHART_FAMILY, // family
+ "netdata.training_results", // ctx
+ "Training results", // title
+ "events", // units
+ NETDATA_ML_PLUGIN, // plugin
+ NETDATA_ML_MODULE_TRAINING, // module
+ NETDATA_ML_CHART_PRIO_TRAINING_RESULTS, // priority
+ RH->rrd_update_every, // update_every
+ RRDSET_TYPE_LINE// chart_type
+ );
+ rrdset_flag_set(RS, RRDSET_FLAG_ANOMALY_DETECTION);
+
+ Ok = rrddim_add(RS, "ok", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ InvalidQueryTimeRange = rrddim_add(RS, "invalid-queries", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ NotEnoughCollectedValues = rrddim_add(RS, "not-enough-values", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ NullAcquiredDimension = rrddim_add(RS, "null-acquired-dimensions", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ ChartUnderReplication = rrddim_add(RS, "chart-under-replication", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(RS, Ok, TS.TrainingResultOk);
+ rrddim_set_by_pointer(RS, InvalidQueryTimeRange, TS.TrainingResultInvalidQueryTimeRange);
+ rrddim_set_by_pointer(RS, NotEnoughCollectedValues, TS.TrainingResultNotEnoughCollectedValues);
+ rrddim_set_by_pointer(RS, NullAcquiredDimension, TS.TrainingResultNullAcquiredDimension);
+ rrddim_set_by_pointer(RS, ChartUnderReplication, TS.TrainingResultChartUnderReplication);
+
+ rrdset_done(RS);
+ }
}
diff --git a/ml/ADCharts.h b/ml/ADCharts.h
index 0be324f7d..ee09669e2 100644
--- a/ml/ADCharts.h
+++ b/ml/ADCharts.h
@@ -3,20 +3,18 @@
#ifndef ML_ADCHARTS_H
#define ML_ADCHARTS_H
+#include "Stats.h"
#include "ml-private.h"
namespace ml {
-void updateDimensionsChart(RRDHOST *RH,
- collected_number NumTrainedDimensions,
- collected_number NumNormalDimensions,
- collected_number NumAnomalousDimensions);
+void updateDimensionsChart(RRDHOST *RH, const MachineLearningStats &MLS);
void updateHostAndDetectionRateCharts(RRDHOST *RH, collected_number AnomalyRate);
-void updateDetectionChart(RRDHOST *RH);
+void updateResourceUsageCharts(RRDHOST *RH, const struct rusage &PredictionRU, const struct rusage &TrainingRU);
-void updateTrainingChart(RRDHOST *RH, struct rusage *TRU);
+void updateTrainingStatisticsChart(RRDHOST *RH, const TrainingStats &TS);
} // namespace ml
diff --git a/ml/Chart.cc b/ml/Chart.cc
new file mode 100644
index 000000000..e69de29bb
diff --git a/ml/Chart.h b/ml/Chart.h
new file mode 100644
index 000000000..dbd6a910f
--- /dev/null
+++ b/ml/Chart.h
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef ML_CHART_H
+#define ML_CHART_H
+
+#include "Config.h"
+#include "Dimension.h"
+
+#include "ml-private.h"
+#include "json/single_include/nlohmann/json.hpp"
+
+namespace ml
+{
+
+class Chart {
+public:
+ Chart(RRDSET *RS) :
+ RS(RS),
+ MLS()
+ { }
+
+ RRDSET *getRS() const {
+ return RS;
+ }
+
+ bool isAvailableForML() {
+ return rrdset_is_available_for_exporting_and_alarms(RS);
+ }
+
+ void addDimension(Dimension *D) {
+ std::lock_guard L(M);
+ Dimensions[D->getRD()] = D;
+ }
+
+ void removeDimension(Dimension *D) {
+ std::lock_guard L(M);
+ Dimensions.erase(D->getRD());
+ }
+
+ void getModelsAsJson(nlohmann::json &Json) {
+ std::lock_guard L(M);
+
+ for (auto &DP : Dimensions) {
+ Dimension *D = DP.second;
+ nlohmann::json JsonArray = nlohmann::json::array();
+ for (const KMeans &KM : D->getModels()) {
+ nlohmann::json J;
+ KM.toJson(J);
+ JsonArray.push_back(J);
+ }
+
+ Json[getMLDimensionID(D->getRD())] = JsonArray;
+ }
+ }
+
+ void updateBegin() {
+ M.lock();
+ MLS = {};
+ }
+
+ void updateDimension(Dimension *D, bool IsAnomalous) {
+ switch (D->getMLS()) {
+ case MachineLearningStatus::DisabledDueToUniqueUpdateEvery:
+ MLS.NumMachineLearningStatusDisabledUE++;
+ return;
+ case MachineLearningStatus::DisabledDueToExcludedChart:
+ MLS.NumMachineLearningStatusDisabledSP++;
+ return;
+ case MachineLearningStatus::Enabled: {
+ MLS.NumMachineLearningStatusEnabled++;
+
+ switch (D->getMT()) {
+ case MetricType::Constant:
+ MLS.NumMetricTypeConstant++;
+ MLS.NumTrainingStatusTrained++;
+ MLS.NumNormalDimensions++;
+ return;
+ case MetricType::Variable:
+ MLS.NumMetricTypeVariable++;
+ break;
+ }
+
+ switch (D->getTS()) {
+ case TrainingStatus::Untrained:
+ MLS.NumTrainingStatusUntrained++;
+ return;
+ case TrainingStatus::PendingWithoutModel:
+ MLS.NumTrainingStatusPendingWithoutModel++;
+ return;
+ case TrainingStatus::Trained:
+ MLS.NumTrainingStatusTrained++;
+
+ MLS.NumAnomalousDimensions += IsAnomalous;
+ MLS.NumNormalDimensions += !IsAnomalous;
+ return;
+ case TrainingStatus::PendingWithModel:
+ MLS.NumTrainingStatusPendingWithModel++;
+
+ MLS.NumAnomalousDimensions += IsAnomalous;
+ MLS.NumNormalDimensions += !IsAnomalous;
+ return;
+ }
+
+ return;
+ }
+ }
+ }
+
+ void updateEnd() {
+ M.unlock();
+ }
+
+ MachineLearningStats getMLS() {
+ std::lock_guard L(M);
+ return MLS;
+ }
+
+private:
+ RRDSET *RS;
+ MachineLearningStats MLS;
+
+ Mutex M;
+ std::unordered_map Dimensions;
+};
+
+} // namespace ml
+
+#endif /* ML_CHART_H */
diff --git a/ml/Config.cc b/ml/Config.cc
index eedd8c29f..ba3a61445 100644
--- a/ml/Config.cc
+++ b/ml/Config.cc
@@ -31,7 +31,7 @@ void Config::readMLConfig(void) {
unsigned MaxTrainSamples = config_get_number(ConfigSectionML, "maximum num samples to train", 4 * 3600);
unsigned MinTrainSamples = config_get_number(ConfigSectionML, "minimum num samples to train", 1 * 900);
unsigned TrainEvery = config_get_number(ConfigSectionML, "train every", 1 * 3600);
- unsigned NumModelsToUse = config_get_number(ConfigSectionML, "number of models per dimension", 1 * 24);
+ unsigned NumModelsToUse = config_get_number(ConfigSectionML, "number of models per dimension", 1);
unsigned DiffN = config_get_number(ConfigSectionML, "num samples to diff", 1);
unsigned SmoothN = config_get_number(ConfigSectionML, "num samples to smooth", 3);
@@ -53,7 +53,7 @@ void Config::readMLConfig(void) {
MaxTrainSamples = clamp(MaxTrainSamples, 1 * 3600, 24 * 3600);
MinTrainSamples = clamp(MinTrainSamples, 1 * 900, 6 * 3600);
TrainEvery = clamp(TrainEvery, 1 * 3600, 6 * 3600);
- NumModelsToUse = clamp(TrainEvery, 1, 7 * 24);
+ NumModelsToUse = clamp(NumModelsToUse, 1, 7 * 24);
DiffN = clamp(DiffN, 0u, 1u);
SmoothN = clamp(SmoothN, 0u, 5u);
@@ -108,7 +108,7 @@ void Config::readMLConfig(void) {
// Always exclude anomaly_detection charts from training.
Cfg.ChartsToSkip = "anomaly_detection.* ";
Cfg.ChartsToSkip += config_get(ConfigSectionML, "charts to skip from training", "netdata.*");
- Cfg.SP_ChartsToSkip = simple_pattern_create(ChartsToSkip.c_str(), NULL, SIMPLE_PATTERN_EXACT);
+ Cfg.SP_ChartsToSkip = simple_pattern_create(Cfg.ChartsToSkip.c_str(), NULL, SIMPLE_PATTERN_EXACT);
Cfg.StreamADCharts = config_get_boolean(ConfigSectionML, "stream anomaly detection charts", true);
}
diff --git a/ml/Config.h b/ml/Config.h
index d876d4aa4..f10e11492 100644
--- a/ml/Config.h
+++ b/ml/Config.h
@@ -14,6 +14,7 @@ public:
unsigned MaxTrainSamples;
unsigned MinTrainSamples;
unsigned TrainEvery;
+
unsigned NumModelsToUse;
unsigned DBEngineAnomalyRateEvery;
diff --git a/ml/Dimension.cc b/ml/Dimension.cc
index bf34abb72..db9256895 100644
--- a/ml/Dimension.cc
+++ b/ml/Dimension.cc
@@ -3,171 +3,344 @@
#include "Config.h"
#include "Dimension.h"
#include "Query.h"
+#include "Host.h"
using namespace ml;
-bool Dimension::isActive() const {
- bool SetObsolete = rrdset_flag_check(RD->rrdset, RRDSET_FLAG_OBSOLETE);
- bool DimObsolete = rrddim_flag_check(RD, RRDDIM_FLAG_OBSOLETE);
- return !SetObsolete && !DimObsolete;
+static const char *mls2str(MachineLearningStatus MLS) {
+ switch (MLS) {
+ case ml::MachineLearningStatus::Enabled:
+ return "enabled";
+ case ml::MachineLearningStatus::DisabledDueToUniqueUpdateEvery:
+ return "disabled-ue";
+ case ml::MachineLearningStatus::DisabledDueToExcludedChart:
+ return "disabled-sp";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *mt2str(MetricType MT) {
+ switch (MT) {
+ case ml::MetricType::Constant:
+ return "constant";
+ case ml::MetricType::Variable:
+ return "variable";
+ default:
+ return "unknown";
+ }
}
-std::pair Dimension::getCalculatedNumbers() {
+static const char *ts2str(TrainingStatus TS) {
+ switch (TS) {
+ case ml::TrainingStatus::PendingWithModel:
+ return "pending-with-model";
+ case ml::TrainingStatus::PendingWithoutModel:
+ return "pending-without-model";
+ case ml::TrainingStatus::Trained:
+ return "trained";
+ case ml::TrainingStatus::Untrained:
+ return "untrained";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *tr2str(TrainingResult TR) {
+ switch (TR) {
+ case ml::TrainingResult::Ok:
+ return "ok";
+ case ml::TrainingResult::InvalidQueryTimeRange:
+ return "invalid-query";
+ case ml::TrainingResult::NotEnoughCollectedValues:
+ return "missing-values";
+ case ml::TrainingResult::NullAcquiredDimension:
+ return "null-acquired-dim";
+ case ml::TrainingResult::ChartUnderReplication:
+ return "chart-under-replication";
+ default:
+ return "unknown";
+ }
+}
+
+std::pair Dimension::getCalculatedNumbers(const TrainingRequest &TrainingReq) {
+ TrainingResponse TrainingResp = {};
+
+ TrainingResp.RequestTime = TrainingReq.RequestTime;
+ TrainingResp.FirstEntryOnRequest = TrainingReq.FirstEntryOnRequest;
+ TrainingResp.LastEntryOnRequest = TrainingReq.LastEntryOnRequest;
+
+ TrainingResp.FirstEntryOnResponse = rrddim_first_entry_s_of_tier(RD, 0);
+ TrainingResp.LastEntryOnResponse = rrddim_last_entry_s_of_tier(RD, 0);
+
size_t MinN = Cfg.MinTrainSamples;
size_t MaxN = Cfg.MaxTrainSamples;
// Figure out what our time window should be.
- time_t BeforeT = now_realtime_sec() - 1;
- time_t AfterT = BeforeT - (MaxN * updateEvery());
-
- BeforeT -= (BeforeT % updateEvery());
- AfterT -= (AfterT % updateEvery());
-
- BeforeT = std::min(BeforeT, latestTime());
- AfterT = std::max(AfterT, oldestTime());
+ TrainingResp.QueryBeforeT = TrainingResp.LastEntryOnResponse;
+ TrainingResp.QueryAfterT = std::max(
+ TrainingResp.QueryBeforeT - static_cast((MaxN - 1) * updateEvery()),
+ TrainingResp.FirstEntryOnResponse
+ );
+
+ if (TrainingResp.QueryAfterT >= TrainingResp.QueryBeforeT) {
+ TrainingResp.Result = TrainingResult::InvalidQueryTimeRange;
+ return { nullptr, TrainingResp };
+ }
- if (AfterT >= BeforeT)
- return { nullptr, 0 };
+ if (rrdset_is_replicating(RD->rrdset)) {
+ TrainingResp.Result = TrainingResult::ChartUnderReplication;
+ return { nullptr, TrainingResp };
+ }
CalculatedNumber *CNs = new CalculatedNumber[MaxN * (Cfg.LagN + 1)]();
// Start the query.
- unsigned Idx = 0;
- unsigned CollectedValues = 0;
- unsigned TotalValues = 0;
+ size_t Idx = 0;
CalculatedNumber LastValue = std::numeric_limits::quiet_NaN();
Query Q = Query(getRD());
- Q.init(AfterT, BeforeT);
+ Q.init(TrainingResp.QueryAfterT, TrainingResp.QueryBeforeT);
while (!Q.isFinished()) {
if (Idx == MaxN)
break;
auto P = Q.nextMetric();
+
CalculatedNumber Value = P.second;
if (netdata_double_isnumber(Value)) {
+ if (!TrainingResp.DbAfterT)
+ TrainingResp.DbAfterT = P.first;
+ TrainingResp.DbBeforeT = P.first;
+
CNs[Idx] = Value;
LastValue = CNs[Idx];
- CollectedValues++;
+ TrainingResp.CollectedValues++;
} else
CNs[Idx] = LastValue;
Idx++;
}
- TotalValues = Idx;
+ TrainingResp.TotalValues = Idx;
+
+ if (TrainingResp.CollectedValues < MinN) {
+ TrainingResp.Result = TrainingResult::NotEnoughCollectedValues;
- if (CollectedValues < MinN) {
delete[] CNs;
- return { nullptr, 0 };
+ return { nullptr, TrainingResp };
}
// Find first non-NaN value.
- for (Idx = 0; std::isnan(CNs[Idx]); Idx++, TotalValues--) { }
+ for (Idx = 0; std::isnan(CNs[Idx]); Idx++, TrainingResp.TotalValues--) { }
// Overwrite NaN values.
if (Idx != 0)
- memmove(CNs, &CNs[Idx], sizeof(CalculatedNumber) * TotalValues);
+ memmove(CNs, &CNs[Idx], sizeof(CalculatedNumber) * TrainingResp.TotalValues);
- return { CNs, TotalValues };
+ TrainingResp.Result = TrainingResult::Ok;
+ return { CNs, TrainingResp };
}
-MLResult Dimension::trainModel() {
- auto P = getCalculatedNumbers();
+TrainingResult Dimension::trainModel(const TrainingRequest &TrainingReq) {
+ auto P = getCalculatedNumbers(TrainingReq);
CalculatedNumber *CNs = P.first;
- unsigned N = P.second;
+ TrainingResponse TrainingResp = P.second;
+
+ if (TrainingResp.Result != TrainingResult::Ok) {
+ std::lock_guard L(M);
+
+ MT = MetricType::Constant;
+
+ switch (TS) {
+ case TrainingStatus::PendingWithModel:
+ TS = TrainingStatus::Trained;
+ break;
+ case TrainingStatus::PendingWithoutModel:
+ TS = TrainingStatus::Untrained;
+ break;
+ default:
+ break;
+ }
+
+ TR = TrainingResp;
- if (!CNs)
- return MLResult::MissingData;
+ LastTrainingTime = TrainingResp.LastEntryOnResponse;
+ return TrainingResp.Result;
+ }
+ unsigned N = TrainingResp.TotalValues;
unsigned TargetNumSamples = Cfg.MaxTrainSamples * Cfg.RandomSamplingRatio;
double SamplingRatio = std::min(static_cast(TargetNumSamples) / N, 1.0);
SamplesBuffer SB = SamplesBuffer(CNs, N, 1, Cfg.DiffN, Cfg.SmoothN, Cfg.LagN,
SamplingRatio, Cfg.RandomNums);
- std::vector Samples = SB.preprocess();
+ std::vector Samples;
+ SB.preprocess(Samples);
KMeans KM;
KM.train(Samples, Cfg.MaxKMeansIters);
{
- std::lock_guard Lock(Mutex);
- Models[0] = KM;
- }
+ std::lock_guard L(M);
- Trained = true;
- ConstantModel = true;
+ if (Models.size() < Cfg.NumModelsToUse) {
+ Models.push_back(std::move(KM));
+ } else {
+ std::rotate(std::begin(Models), std::begin(Models) + 1, std::end(Models));
+ Models[Models.size() - 1] = std::move(KM);
+ }
+
+ MT = MetricType::Constant;
+ TS = TrainingStatus::Trained;
+ TR = TrainingResp;
+ LastTrainingTime = rrddim_last_entry_s(RD);
+ }
delete[] CNs;
- return MLResult::Success;
+ return TrainingResp.Result;
}
-bool Dimension::shouldTrain(const TimePoint &TP) const {
- if (ConstantModel)
- return false;
+void Dimension::scheduleForTraining(time_t CurrT) {
+ switch (MT) {
+ case MetricType::Constant: {
+ return;
+ } default:
+ break;
+ }
- return (LastTrainedAt + Seconds(Cfg.TrainEvery * updateEvery())) < TP;
+ switch (TS) {
+ case TrainingStatus::PendingWithModel:
+ case TrainingStatus::PendingWithoutModel:
+ break;
+ case TrainingStatus::Untrained: {
+ Host *H = reinterpret_cast(RD->rrdset->rrdhost->ml_host);
+ TS = TrainingStatus::PendingWithoutModel;
+ H->scheduleForTraining(getTrainingRequest(CurrT));
+ break;
+ }
+ case TrainingStatus::Trained: {
+ bool NeedsTraining = (time_t)(LastTrainingTime + (Cfg.TrainEvery * updateEvery())) < CurrT;
+
+ if (NeedsTraining) {
+ Host *H = reinterpret_cast(RD->rrdset->rrdhost->ml_host);
+ TS = TrainingStatus::PendingWithModel;
+ H->scheduleForTraining(getTrainingRequest(CurrT));
+ }
+ break;
+ }
+ }
}
-bool Dimension::predict(CalculatedNumber Value, bool Exists) {
+bool Dimension::predict(time_t CurrT, CalculatedNumber Value, bool Exists) {
+ // Nothing to do if ML is disabled for this dimension
+ if (MLS != MachineLearningStatus::Enabled)
+ return false;
+
+ // Don't treat values that don't exist as anomalous
if (!Exists) {
CNs.clear();
- AnomalyBit = false;
return false;
}
+ // Save the value and return if we don't have enough values for a sample
unsigned N = Cfg.DiffN + Cfg.SmoothN + Cfg.LagN;
if (CNs.size() < N) {
CNs.push_back(Value);
- AnomalyBit = false;
return false;
}
+ // Push the value and check if it's different from the last one
+ bool SameValue = true;
std::rotate(std::begin(CNs), std::begin(CNs) + 1, std::end(CNs));
-
if (CNs[N - 1] != Value)
- ConstantModel = false;
-
+ SameValue = false;
CNs[N - 1] = Value;
- if (!isTrained() || ConstantModel) {
- AnomalyBit = false;
- return false;
- }
-
- CalculatedNumber *TmpCNs = new CalculatedNumber[N * (Cfg.LagN + 1)]();
+ // Create the sample
+ CalculatedNumber TmpCNs[N * (Cfg.LagN + 1)];
+ memset(TmpCNs, 0, N * (Cfg.LagN + 1) * sizeof(CalculatedNumber));
std::memcpy(TmpCNs, CNs.data(), N * sizeof(CalculatedNumber));
SamplesBuffer SB = SamplesBuffer(TmpCNs, N, 1,
Cfg.DiffN, Cfg.SmoothN, Cfg.LagN,
1.0, Cfg.RandomNums);
- const DSample Sample = SB.preprocess().back();
- delete[] TmpCNs;
+ SB.preprocess(Feature);
- std::unique_lock Lock(Mutex, std::defer_lock);
- if (!Lock.try_lock()) {
- AnomalyBit = false;
+ /*
+ * Lock to predict and possibly schedule the dimension for training
+ */
+
+ std::unique_lock L(M, std::defer_lock);
+ if (!L.try_lock()) {
return false;
}
+ // Mark the metric time as variable if we received different values
+ if (!SameValue)
+ MT = MetricType::Variable;
+
+ // Decide if the dimension needs to be scheduled for training
+ scheduleForTraining(CurrT);
+
+ // Nothing to do if we don't have a model
+ switch (TS) {
+ case TrainingStatus::Untrained:
+ case TrainingStatus::PendingWithoutModel:
+ return false;
+ default:
+ break;
+ }
+
+ /*
+ * Use the KMeans models to check if the value is anomalous
+ */
+
+ size_t ModelsConsulted = 0;
+ size_t Sum = 0;
+
for (const auto &KM : Models) {
- double AnomalyScore = KM.anomalyScore(Sample);
- if (AnomalyScore == std::numeric_limits::quiet_NaN()) {
- AnomalyBit = false;
+ ModelsConsulted++;
+
+ double AnomalyScore = KM.anomalyScore(Feature);
+ if (AnomalyScore == std::numeric_limits::quiet_NaN())
continue;
- }
if (AnomalyScore < (100 * Cfg.DimensionAnomalyScoreThreshold)) {
- AnomalyBit = false;
+ global_statistics_ml_models_consulted(ModelsConsulted);
return false;
}
+
+ Sum += 1;
}
- AnomalyBit = true;
- return true;
+ global_statistics_ml_models_consulted(ModelsConsulted);
+ return Sum;
}
-std::array Dimension::getModels() {
- std::unique_lock Lock(Mutex);
+std::vector Dimension::getModels() {
+ std::unique_lock L(M);
return Models;
}
+
+void Dimension::dump() const {
+ const char *ChartId = rrdset_id(RD->rrdset);
+ const char *DimensionId = rrddim_id(RD);
+
+ const char *MLS_Str = mls2str(MLS);
+ const char *MT_Str = mt2str(MT);
+ const char *TS_Str = ts2str(TS);
+ const char *TR_Str = tr2str(TR.Result);
+
+ const char *fmt =
+ "[ML] %s.%s: MLS=%s, MT=%s, TS=%s, Result=%s, "
+ "ReqTime=%ld, FEOReq=%ld, LEOReq=%ld, "
+ "FEOResp=%ld, LEOResp=%ld, QTR=<%ld, %ld>, DBTR=<%ld, %ld>, Collected=%zu, Total=%zu";
+
+ error(fmt,
+ ChartId, DimensionId, MLS_Str, MT_Str, TS_Str, TR_Str,
+ TR.RequestTime, TR.FirstEntryOnRequest, TR.LastEntryOnRequest,
+ TR.FirstEntryOnResponse, TR.LastEntryOnResponse,
+ TR.QueryAfterT, TR.QueryBeforeT, TR.DbAfterT, TR.DbBeforeT, TR.CollectedValues, TR.TotalValues
+ );
+}
diff --git a/ml/Dimension.h b/ml/Dimension.h
index 3ec56e098..2b1adfff9 100644
--- a/ml/Dimension.h
+++ b/ml/Dimension.h
@@ -3,6 +3,8 @@
#ifndef ML_DIMENSION_H
#define ML_DIMENSION_H
+#include "Mutex.h"
+#include "Stats.h"
#include "Query.h"
#include "Config.h"
@@ -10,12 +12,6 @@
namespace ml {
-enum class MLResult {
- Success = 0,
- MissingData,
- NaN,
-};
-
static inline std::string getMLDimensionID(RRDDIM *RD) {
RRDSET *RS = RD->rrdset;
@@ -24,16 +20,118 @@ static inline std::string getMLDimensionID(RRDDIM *RD) {
return SS.str();
}
+enum class MachineLearningStatus {
+ // Enable training/prediction
+ Enabled,
+
+ // Disable due to update every being different from the host's
+ DisabledDueToUniqueUpdateEvery,
+
+ // Disable because configuration pattern matches the chart's id
+ DisabledDueToExcludedChart,
+};
+
+enum class TrainingStatus {
+ // We don't have a model for this dimension
+ Untrained,
+
+ // Request for training sent, but we don't have any models yet
+ PendingWithoutModel,
+
+ // Request to update existing models sent
+ PendingWithModel,
+
+ // Have a valid, up-to-date model
+ Trained,
+};
+
+enum class MetricType {
+ // The dimension has constant values, no need to train
+ Constant,
+
+ // The dimension's values fluctuate, we need to generate a model
+ Variable,
+};
+
+struct TrainingRequest {
+ // Chart/dimension we want to train
+ STRING *ChartId;
+ STRING *DimensionId;
+
+ // Creation time of request
+ time_t RequestTime;
+
+ // First/last entry of this dimension in DB
+ // at the point the request was made
+ time_t FirstEntryOnRequest;
+ time_t LastEntryOnRequest;
+};
+
+void dumpTrainingRequest(const TrainingRequest &TrainingReq, const char *Prefix);
+
+enum TrainingResult {
+ // We managed to create a KMeans model
+ Ok,
+ // Could not query DB with a correct time range
+ InvalidQueryTimeRange,
+ // Did not gather enough data from DB to run KMeans
+ NotEnoughCollectedValues,
+ // Acquired a null dimension
+ NullAcquiredDimension,
+ // Chart is under replication
+ ChartUnderReplication,
+};
+
+struct TrainingResponse {
+ // Time when the request for this response was made
+ time_t RequestTime;
+
+ // First/last entry of the dimension in DB when generating the request
+ time_t FirstEntryOnRequest;
+ time_t LastEntryOnRequest;
+
+ // First/last entry of the dimension in DB when generating the response
+ time_t FirstEntryOnResponse;
+ time_t LastEntryOnResponse;
+
+ // After/Before timestamps of our DB query
+ time_t QueryAfterT;
+ time_t QueryBeforeT;
+
+ // Actual after/before returned by the DB query ops
+ time_t DbAfterT;
+ time_t DbBeforeT;
+
+ // Number of doubles returned by the DB query
+ size_t CollectedValues;
+
+ // Number of values we return to the caller
+ size_t TotalValues;
+
+ // Result of training response
+ TrainingResult Result;
+};
+
+void dumpTrainingResponse(const TrainingResponse &TrainingResp, const char *Prefix);
+
class Dimension {
public:
Dimension(RRDDIM *RD) :
RD(RD),
- LastTrainedAt(Seconds(0)),
- Trained(false),
- ConstantModel(false),
- AnomalyScore(0.0),
- AnomalyBit(0)
- { }
+ MT(MetricType::Constant),
+ TS(TrainingStatus::Untrained),
+ TR(),
+ LastTrainingTime(0)
+ {
+ if (simple_pattern_matches(Cfg.SP_ChartsToSkip, rrdset_name(RD->rrdset)))
+ MLS = MachineLearningStatus::DisabledDueToExcludedChart;
+ else if (RD->update_every != RD->rrdset->rrdhost->rrd_update_every)
+ MLS = MachineLearningStatus::DisabledDueToUniqueUpdateEvery;
+ else
+ MLS = MachineLearningStatus::Enabled;
+
+ Models.reserve(Cfg.NumModelsToUse);
+ }
RRDDIM *getRD() const {
return RD;
@@ -43,50 +141,56 @@ public:
return RD->update_every;
}
- time_t latestTime() const {
- return Query(RD).latestTime();
- }
-
- time_t oldestTime() const {
- return Query(RD).oldestTime();
+ MetricType getMT() const {
+ return MT;
}
- bool isTrained() const {
- return Trained;
+ TrainingStatus getTS() const {
+ return TS;
}
- bool isAnomalous() const {
- return AnomalyBit;
+ MachineLearningStatus getMLS() const {
+ return MLS;
}
- bool shouldTrain(const TimePoint &TP) const;
+ TrainingResult trainModel(const TrainingRequest &TR);
- bool isActive() const;
+ void scheduleForTraining(time_t CurrT);
- MLResult trainModel();
+ bool predict(time_t CurrT, CalculatedNumber Value, bool Exists);
- bool predict(CalculatedNumber Value, bool Exists);
+ std::vector getModels();
+
+ void dump() const;
- std::pair detect(size_t WindowLength, bool Reset);
-
- std::array getModels();
+private:
+ TrainingRequest getTrainingRequest(time_t CurrT) const {
+ return TrainingRequest {
+ string_dup(RD->rrdset->id),
+ string_dup(RD->id),
+ CurrT,
+ rrddim_first_entry_s(RD),
+ rrddim_last_entry_s(RD)
+ };
+ }
private:
- std::pair getCalculatedNumbers();
+ std::pair getCalculatedNumbers(const TrainingRequest &TrainingReq);
public:
RRDDIM *RD;
+ MetricType MT;
+ TrainingStatus TS;
+ TrainingResponse TR;
- TimePoint LastTrainedAt;
- std::atomic Trained;
- std::atomic ConstantModel;
+ time_t LastTrainingTime;
- CalculatedNumber AnomalyScore;
- std::atomic AnomalyBit;
+ MachineLearningStatus MLS;
std::vector CNs;
- std::array Models;
- std::mutex Mutex;
+ DSample Feature;
+ std::vector Models;
+ Mutex M;
};
} // namespace ml
diff --git a/ml/Host.cc b/ml/Host.cc
index 4a57178c7..a5f276a80 100644
--- a/ml/Host.cc
+++ b/ml/Host.cc
@@ -2,42 +2,24 @@
#include "Config.h"
#include "Host.h"
+#include "Queue.h"
#include "ADCharts.h"
#include "json/single_include/nlohmann/json.hpp"
using namespace ml;
-void RrdHost::addDimension(Dimension *D) {
- std::lock_guard Lock(Mutex);
-
- DimensionsMap[D->getRD()] = D;
-
- // Default construct mutex for dimension
- LocksMap[D];
+void Host::addChart(Chart *C) {
+ std::lock_guard L(M);
+ Charts[C->getRS()] = C;
}
-void RrdHost::removeDimension(Dimension *D) {
- // Remove the dimension from the hosts map.
- {
- std::lock_guard Lock(Mutex);
- DimensionsMap.erase(D->getRD());
- }
-
- // Delete the dimension by locking the mutex that protects it.
- {
- std::lock_guard Lock(LocksMap[D]);
- delete D;
- }
-
- // Remove the lock entry for the deleted dimension.
- {
- std::lock_guard Lock(Mutex);
- LocksMap.erase(D);
- }
+void Host::removeChart(Chart *C) {
+ std::lock_guard L(M);
+ Charts.erase(C->getRS());
}
-void RrdHost::getConfigAsJson(nlohmann::json &Json) const {
+void Host::getConfigAsJson(nlohmann::json &Json) const {
Json["version"] = 1;
Json["enabled"] = Cfg.EnableAnomalyDetection;
@@ -63,193 +45,343 @@ void RrdHost::getConfigAsJson(nlohmann::json &Json) const {
Json["charts-to-skip"] = Cfg.ChartsToSkip;
}
-void TrainableHost::getModelsAsJson(nlohmann::json &Json) {
- std::lock_guard Lock(Mutex);
+void Host::getModelsAsJson(nlohmann::json &Json) {
+ std::lock_guard L(M);
- for (auto &DP : DimensionsMap) {
- Dimension *D = DP.second;
-
- nlohmann::json JsonArray = nlohmann::json::array();
- for (const KMeans &KM : D->getModels()) {
- nlohmann::json J;
- KM.toJson(J);
- JsonArray.push_back(J);
- }
- Json[getMLDimensionID(D->getRD())] = JsonArray;
+ for (auto &CP : Charts) {
+ Chart *C = CP.second;
+ C->getModelsAsJson(Json);
}
-
- return;
}
-std::pair>
-TrainableHost::findDimensionToTrain(const TimePoint &NowTP) {
- std::lock_guard Lock(Mutex);
+#define WORKER_JOB_DETECTION_PREP 0
+#define WORKER_JOB_DETECTION_DIM_CHART 1
+#define WORKER_JOB_DETECTION_HOST_CHART 2
+#define WORKER_JOB_DETECTION_STATS 3
+#define WORKER_JOB_DETECTION_RESOURCES 4
- Duration AllottedDuration = Duration{Cfg.TrainEvery * updateEvery()} / (DimensionsMap.size() + 1);
+void Host::detectOnce() {
+ worker_is_busy(WORKER_JOB_DETECTION_PREP);
- for (auto &DP : DimensionsMap) {
- Dimension *D = DP.second;
+ MLS = {};
+ MachineLearningStats MLSCopy = {};
+ TrainingStats TSCopy = {};
- if (D->shouldTrain(NowTP)) {
- LocksMap[D].lock();
- return { D, AllottedDuration };
- }
- }
+ {
+ std::lock_guard L(M);
- return { nullptr, AllottedDuration };
-}
+ /*
+ * prediction/detection stats
+ */
+ for (auto &CP : Charts) {
+ Chart *C = CP.second;
-void TrainableHost::trainDimension(Dimension *D, const TimePoint &NowTP) {
- if (D == nullptr)
- return;
+ if (!C->isAvailableForML())
+ continue;
- D->LastTrainedAt = NowTP + Seconds{D->updateEvery()};
- D->trainModel();
+ MachineLearningStats ChartMLS = C->getMLS();
- {
- std::lock_guard Lock(Mutex);
- LocksMap[D].unlock();
- }
-}
+ MLS.NumMachineLearningStatusEnabled += ChartMLS.NumMachineLearningStatusEnabled;
+ MLS.NumMachineLearningStatusDisabledUE += ChartMLS.NumMachineLearningStatusDisabledUE;
+ MLS.NumMachineLearningStatusDisabledSP += ChartMLS.NumMachineLearningStatusDisabledSP;
-void TrainableHost::train() {
- Duration MaxSleepFor = Seconds{10 * updateEvery()};
+ MLS.NumMetricTypeConstant += ChartMLS.NumMetricTypeConstant;
+ MLS.NumMetricTypeVariable += ChartMLS.NumMetricTypeVariable;
- worker_register("MLTRAIN");
- worker_register_job_name(0, "dimensions");
+ MLS.NumTrainingStatusUntrained += ChartMLS.NumTrainingStatusUntrained;
+ MLS.NumTrainingStatusPendingWithoutModel += ChartMLS.NumTrainingStatusPendingWithoutModel;
+ MLS.NumTrainingStatusTrained += ChartMLS.NumTrainingStatusTrained;
+ MLS.NumTrainingStatusPendingWithModel += ChartMLS.NumTrainingStatusPendingWithModel;
- worker_is_busy(0);
- while (!netdata_exit) {
- netdata_thread_testcancel();
- netdata_thread_disable_cancelability();
+ MLS.NumAnomalousDimensions += ChartMLS.NumAnomalousDimensions;
+ MLS.NumNormalDimensions += ChartMLS.NumNormalDimensions;
+ }
- updateResourceUsage();
+ HostAnomalyRate = 0.0;
+ size_t NumActiveDimensions = MLS.NumAnomalousDimensions + MLS.NumNormalDimensions;
+ if (NumActiveDimensions)
+ HostAnomalyRate = static_cast(MLS.NumAnomalousDimensions) / NumActiveDimensions;
- TimePoint NowTP = SteadyClock::now();
+ MLSCopy = MLS;
- auto P = findDimensionToTrain(NowTP);
- trainDimension(P.first, NowTP);
+ /*
+ * training stats
+ */
+ TSCopy = TS;
- netdata_thread_enable_cancelability();
+ TS.QueueSize = 0;
+ TS.NumPoppedItems = 0;
- Duration AllottedDuration = P.second;
- Duration RealDuration = SteadyClock::now() - NowTP;
+ TS.AllottedUT = 0;
+ TS.ConsumedUT = 0;
+ TS.RemainingUT = 0;
- Duration SleepFor;
- if (RealDuration >= AllottedDuration)
- continue;
+ TS.TrainingResultOk = 0;
+ TS.TrainingResultInvalidQueryTimeRange = 0;
+ TS.TrainingResultNotEnoughCollectedValues = 0;
+ TS.TrainingResultNullAcquiredDimension = 0;
+ TS.TrainingResultChartUnderReplication = 0;
+ }
- worker_is_idle();
- SleepFor = std::min(AllottedDuration - RealDuration, MaxSleepFor);
- TimePoint Now = SteadyClock::now();
- auto Until = Now + SleepFor;
- while (Now < Until && !netdata_exit) {
- std::this_thread::sleep_for(std::chrono::milliseconds(1000));
- Now = SteadyClock::now();
- }
- worker_is_busy(0);
+ // Calc the avg values
+ if (TSCopy.NumPoppedItems) {
+ TSCopy.QueueSize /= TSCopy.NumPoppedItems;
+ TSCopy.AllottedUT /= TSCopy.NumPoppedItems;
+ TSCopy.ConsumedUT /= TSCopy.NumPoppedItems;
+ TSCopy.RemainingUT /= TSCopy.NumPoppedItems;
+
+ TSCopy.TrainingResultOk /= TSCopy.NumPoppedItems;
+ TSCopy.TrainingResultInvalidQueryTimeRange /= TSCopy.NumPoppedItems;
+ TSCopy.TrainingResultNotEnoughCollectedValues /= TSCopy.NumPoppedItems;
+ TSCopy.TrainingResultNullAcquiredDimension /= TSCopy.NumPoppedItems;
+ TSCopy.TrainingResultChartUnderReplication /= TSCopy.NumPoppedItems;
+ } else {
+ TSCopy.QueueSize = 0;
+ TSCopy.AllottedUT = 0;
+ TSCopy.ConsumedUT = 0;
+ TSCopy.RemainingUT = 0;
}
-}
-#define WORKER_JOB_DETECT_DIMENSION 0
-#define WORKER_JOB_UPDATE_DETECTION_CHART 1
-#define WORKER_JOB_UPDATE_ANOMALY_RATES 2
-#define WORKER_JOB_UPDATE_CHARTS 3
+ if(!RH)
+ return;
-#if WORKER_UTILIZATION_MAX_JOB_TYPES < 5
-#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 5
+ worker_is_busy(WORKER_JOB_DETECTION_DIM_CHART);
+ updateDimensionsChart(RH, MLSCopy);
+
+ worker_is_busy(WORKER_JOB_DETECTION_HOST_CHART);
+ updateHostAndDetectionRateCharts(RH, HostAnomalyRate * 10000.0);
+
+#ifdef NETDATA_ML_RESOURCE_CHARTS
+ worker_is_busy(WORKER_JOB_DETECTION_RESOURCES);
+ struct rusage PredictionRU;
+ getrusage(RUSAGE_THREAD, &PredictionRU);
+ updateResourceUsageCharts(RH, PredictionRU, TSCopy.TrainingRU);
#endif
-void DetectableHost::detectOnce() {
- size_t NumAnomalousDimensions = 0;
- size_t NumNormalDimensions = 0;
- size_t NumTrainedDimensions = 0;
- size_t NumActiveDimensions = 0;
+ worker_is_busy(WORKER_JOB_DETECTION_STATS);
+ updateTrainingStatisticsChart(RH, TSCopy);
+}
- {
- std::lock_guard Lock(Mutex);
+class AcquiredDimension {
+public:
+ static AcquiredDimension find(RRDHOST *RH, STRING *ChartId, STRING *DimensionId) {
+ RRDDIM_ACQUIRED *AcqRD = nullptr;
+ Dimension *D = nullptr;
+
+ RRDSET *RS = rrdset_find(RH, string2str(ChartId));
+ if (RS) {
+ AcqRD = rrddim_find_and_acquire(RS, string2str(DimensionId));
+ if (AcqRD) {
+ RRDDIM *RD = rrddim_acquired_to_rrddim(AcqRD);
+ if (RD)
+ D = reinterpret_cast(RD->ml_dimension);
+ }
+ }
- for (auto &DP : DimensionsMap) {
- worker_is_busy(WORKER_JOB_DETECT_DIMENSION);
+ return AcquiredDimension(AcqRD, D);
+ }
- Dimension *D = DP.second;
+private:
+ AcquiredDimension(RRDDIM_ACQUIRED *AcqRD, Dimension *D) : AcqRD(AcqRD), D(D) {}
- if (!D->isActive())
- continue;
+public:
+ TrainingResult train(const TrainingRequest &TR) {
+ if (!D)
+ return TrainingResult::NullAcquiredDimension;
- NumActiveDimensions++;
- NumTrainedDimensions += D->isTrained();
+ return D->trainModel(TR);
+ }
- bool IsAnomalous = D->isAnomalous();
- if (IsAnomalous)
- NumAnomalousDimensions += 1;
+ ~AcquiredDimension() {
+ if (AcqRD)
+ rrddim_acquired_release(AcqRD);
+ }
+
+private:
+ RRDDIM_ACQUIRED *AcqRD;
+ Dimension *D;
+};
+
+void Host::scheduleForTraining(TrainingRequest TR) {
+ TrainingQueue.push(TR);
+}
+
+#define WORKER_JOB_TRAINING_FIND 0
+#define WORKER_JOB_TRAINING_TRAIN 1
+#define WORKER_JOB_TRAINING_STATS 2
+
+void Host::train() {
+ worker_register("MLTRAIN");
+ worker_register_job_name(WORKER_JOB_TRAINING_FIND, "find");
+ worker_register_job_name(WORKER_JOB_TRAINING_TRAIN, "train");
+ worker_register_job_name(WORKER_JOB_TRAINING_STATS, "stats");
+
+ service_register(SERVICE_THREAD_TYPE_NETDATA, NULL, (force_quit_t )ml_cancel_anomaly_detection_threads, RH, true);
+
+ while (service_running(SERVICE_ML_TRAINING)) {
+ auto P = TrainingQueue.pop();
+ TrainingRequest TrainingReq = P.first;
+ size_t Size = P.second;
+
+ if (ThreadsCancelled) {
+ info("Stopping training thread because it was cancelled.");
+ break;
}
- if (NumAnomalousDimensions)
- HostAnomalyRate = static_cast(NumAnomalousDimensions) / NumActiveDimensions;
- else
- HostAnomalyRate = 0.0;
+ usec_t AllottedUT = (Cfg.TrainEvery * RH->rrd_update_every * USEC_PER_SEC) / Size;
+ if (AllottedUT > USEC_PER_SEC)
+ AllottedUT = USEC_PER_SEC;
- NumNormalDimensions = NumActiveDimensions - NumAnomalousDimensions;
- }
+ usec_t StartUT = now_monotonic_usec();
+ TrainingResult TrainingRes;
+ {
+ worker_is_busy(WORKER_JOB_TRAINING_FIND);
+ AcquiredDimension AcqDim = AcquiredDimension::find(RH, TrainingReq.ChartId, TrainingReq.DimensionId);
- this->NumAnomalousDimensions = NumAnomalousDimensions;
- this->NumNormalDimensions = NumNormalDimensions;
- this->NumTrainedDimensions = NumTrainedDimensions;
- this->NumActiveDimensions = NumActiveDimensions;
+ worker_is_busy(WORKER_JOB_TRAINING_TRAIN);
+ TrainingRes = AcqDim.train(TrainingReq);
- worker_is_busy(WORKER_JOB_UPDATE_CHARTS);
- updateDimensionsChart(getRH(), NumTrainedDimensions, NumNormalDimensions, NumAnomalousDimensions);
- updateHostAndDetectionRateCharts(getRH(), HostAnomalyRate * 10000.0);
+ string_freez(TrainingReq.ChartId);
+ string_freez(TrainingReq.DimensionId);
+ }
+ usec_t ConsumedUT = now_monotonic_usec() - StartUT;
+
+ worker_is_busy(WORKER_JOB_TRAINING_STATS);
+
+ usec_t RemainingUT = 0;
+ if (ConsumedUT < AllottedUT)
+ RemainingUT = AllottedUT - ConsumedUT;
+
+ {
+ std::lock_guard L(M);
+
+ if (TS.AllottedUT == 0) {
+ struct rusage TRU;
+ getrusage(RUSAGE_THREAD, &TRU);
+ TS.TrainingRU = TRU;
+ }
+
+ TS.QueueSize += Size;
+ TS.NumPoppedItems += 1;
+
+ TS.AllottedUT += AllottedUT;
+ TS.ConsumedUT += ConsumedUT;
+ TS.RemainingUT += RemainingUT;
+
+ switch (TrainingRes) {
+ case TrainingResult::Ok:
+ TS.TrainingResultOk += 1;
+ break;
+ case TrainingResult::InvalidQueryTimeRange:
+ TS.TrainingResultInvalidQueryTimeRange += 1;
+ break;
+ case TrainingResult::NotEnoughCollectedValues:
+ TS.TrainingResultNotEnoughCollectedValues += 1;
+ break;
+ case TrainingResult::NullAcquiredDimension:
+ TS.TrainingResultNullAcquiredDimension += 1;
+ break;
+ case TrainingResult::ChartUnderReplication:
+ TS.TrainingResultChartUnderReplication += 1;
+ break;
+ }
+ }
- struct rusage TRU;
- getResourceUsage(&TRU);
- updateTrainingChart(getRH(), &TRU);
+ worker_is_idle();
+ std::this_thread::sleep_for(std::chrono::microseconds{RemainingUT});
+ worker_is_busy(0);
+ }
}
-void DetectableHost::detect() {
+void Host::detect() {
worker_register("MLDETECT");
- worker_register_job_name(WORKER_JOB_DETECT_DIMENSION, "dimensions");
- worker_register_job_name(WORKER_JOB_UPDATE_DETECTION_CHART, "detection chart");
- worker_register_job_name(WORKER_JOB_UPDATE_ANOMALY_RATES, "anomaly rates");
- worker_register_job_name(WORKER_JOB_UPDATE_CHARTS, "charts");
+ worker_register_job_name(WORKER_JOB_DETECTION_PREP, "prep");
+ worker_register_job_name(WORKER_JOB_DETECTION_DIM_CHART, "dim chart");
+ worker_register_job_name(WORKER_JOB_DETECTION_HOST_CHART, "host chart");
+ worker_register_job_name(WORKER_JOB_DETECTION_STATS, "stats");
+ worker_register_job_name(WORKER_JOB_DETECTION_RESOURCES, "resources");
- std::this_thread::sleep_for(Seconds{10});
+ service_register(SERVICE_THREAD_TYPE_NETDATA, NULL, (force_quit_t )ml_cancel_anomaly_detection_threads, RH, true);
heartbeat_t HB;
heartbeat_init(&HB);
- while (!netdata_exit) {
- netdata_thread_testcancel();
+ while (service_running((SERVICE_TYPE)(SERVICE_ML_PREDICTION | SERVICE_COLLECTORS))) {
worker_is_idle();
- heartbeat_next(&HB, updateEvery() * USEC_PER_SEC);
-
- netdata_thread_disable_cancelability();
+ heartbeat_next(&HB, (RH ? RH->rrd_update_every : default_rrd_update_every) * USEC_PER_SEC);
detectOnce();
-
- worker_is_busy(WORKER_JOB_UPDATE_DETECTION_CHART);
- updateDetectionChart(getRH());
- netdata_thread_enable_cancelability();
}
}
-void DetectableHost::getDetectionInfoAsJson(nlohmann::json &Json) const {
+void Host::getDetectionInfoAsJson(nlohmann::json &Json) const {
Json["version"] = 1;
- Json["anomalous-dimensions"] = NumAnomalousDimensions;
- Json["normal-dimensions"] = NumNormalDimensions;
- Json["total-dimensions"] = NumAnomalousDimensions + NumNormalDimensions;
- Json["trained-dimensions"] = NumTrainedDimensions;
+ Json["anomalous-dimensions"] = MLS.NumAnomalousDimensions;
+ Json["normal-dimensions"] = MLS.NumNormalDimensions;
+ Json["total-dimensions"] = MLS.NumAnomalousDimensions + MLS.NumNormalDimensions;
+ Json["trained-dimensions"] = MLS.NumTrainingStatusTrained + MLS.NumTrainingStatusPendingWithModel;
+}
+
+void *train_main(void *Arg) {
+ Host *H = reinterpret_cast(Arg);
+ H->train();
+ return nullptr;
}
-void DetectableHost::startAnomalyDetectionThreads() {
- TrainingThread = std::thread(&TrainableHost::train, this);
- DetectionThread = std::thread(&DetectableHost::detect, this);
+void *detect_main(void *Arg) {
+ Host *H = reinterpret_cast(Arg);
+ H->detect();
+ return nullptr;
}
-void DetectableHost::stopAnomalyDetectionThreads() {
- netdata_thread_cancel(TrainingThread.native_handle());
- netdata_thread_cancel(DetectionThread.native_handle());
+void Host::startAnomalyDetectionThreads() {
+ if (ThreadsRunning) {
+ error("Anomaly detections threads for host %s are already-up and running.", rrdhost_hostname(RH));
+ return;
+ }
+
+ ThreadsRunning = true;
+ ThreadsCancelled = false;
+ ThreadsJoined = false;
+
+ char Tag[NETDATA_THREAD_TAG_MAX + 1];
+
+// #define ML_DISABLE_JOINING
- TrainingThread.join();
- DetectionThread.join();
+ snprintfz(Tag, NETDATA_THREAD_TAG_MAX, "MLTR[%s]", rrdhost_hostname(RH));
+ netdata_thread_create(&TrainingThread, Tag, NETDATA_THREAD_OPTION_JOINABLE, train_main, static_cast(this));
+
+ snprintfz(Tag, NETDATA_THREAD_TAG_MAX, "MLDT[%s]", rrdhost_hostname(RH));
+ netdata_thread_create(&DetectionThread, Tag, NETDATA_THREAD_OPTION_JOINABLE, detect_main, static_cast(this));
+}
+
+void Host::stopAnomalyDetectionThreads(bool join) {
+ if (!ThreadsRunning) {
+ error("Anomaly detections threads for host %s have already been stopped.", rrdhost_hostname(RH));
+ return;
+ }
+
+ if(!ThreadsCancelled) {
+ ThreadsCancelled = true;
+
+ // Signal the training queue to stop popping-items
+ TrainingQueue.signal();
+ netdata_thread_cancel(TrainingThread);
+ netdata_thread_cancel(DetectionThread);
+ }
+
+ if (join && !ThreadsJoined) {
+ ThreadsJoined = true;
+ ThreadsRunning = false;
+
+ // these fail on alpine linux and our CI hangs forever
+ // failing to compile static builds
+
+ // commenting them, until we find a solution
+
+ // to enable again:
+ // NETDATA_THREAD_OPTION_DEFAULT needs to become NETDATA_THREAD_OPTION_JOINABLE
+
+ netdata_thread_join(TrainingThread, nullptr);
+ netdata_thread_join(DetectionThread, nullptr);
+ }
}
diff --git a/ml/Host.h b/ml/Host.h
index 52a0cd095..289cb5ab7 100644
--- a/ml/Host.h
+++ b/ml/Host.h
@@ -3,97 +3,67 @@
#ifndef ML_HOST_H
#define ML_HOST_H
+#include "Mutex.h"
#include "Config.h"
#include "Dimension.h"
+#include "Chart.h"
+#include "Queue.h"
#include "ml-private.h"
#include "json/single_include/nlohmann/json.hpp"
-namespace ml {
+namespace ml
+{
-class RrdHost {
-public:
- RrdHost(RRDHOST *RH) : RH(RH) {};
-
- RRDHOST *getRH() { return RH; }
-
- unsigned updateEvery() { return RH->rrd_update_every; }
-
- std::string getUUID() {
- char S[UUID_STR_LEN];
- uuid_unparse_lower(RH->host_uuid, S);
- return S;
- }
-
- void addDimension(Dimension *D);
- void removeDimension(Dimension *D);
-
- void getConfigAsJson(nlohmann::json &Json) const;
-
- virtual ~RrdHost() {};
+class Host {
-protected:
- RRDHOST *RH;
-
- // Protect dimension and lock maps
- std::mutex Mutex;
-
- std::unordered_map DimensionsMap;
- std::unordered_map LocksMap;
-};
+friend void* train_main(void *);
+friend void *detect_main(void *);
-class TrainableHost : public RrdHost {
public:
- TrainableHost(RRDHOST *RH) : RrdHost(RH) {}
-
- void train();
-
- void updateResourceUsage() {
- std::lock_guard Lock(ResourceUsageMutex);
- getrusage(RUSAGE_THREAD, &ResourceUsage);
- }
-
- void getResourceUsage(struct rusage *RU) {
- std::lock_guard Lock(ResourceUsageMutex);
- memcpy(RU, &ResourceUsage, sizeof(struct rusage));
- }
+ Host(RRDHOST *RH) :
+ RH(RH),
+ MLS(),
+ TS(),
+ HostAnomalyRate(0.0),
+ ThreadsRunning(false),
+ ThreadsCancelled(false),
+ ThreadsJoined(false)
+ {}
+
+ void addChart(Chart *C);
+ void removeChart(Chart *C);
+ void getConfigAsJson(nlohmann::json &Json) const;
void getModelsAsJson(nlohmann::json &Json);
-
-private:
- std::pair> findDimensionToTrain(const TimePoint &NowTP);
- void trainDimension(Dimension *D, const TimePoint &NowTP);
-
- struct rusage ResourceUsage{};
- std::mutex ResourceUsageMutex;
-};
-
-class DetectableHost : public TrainableHost {
-public:
- DetectableHost(RRDHOST *RH) : TrainableHost(RH) {}
+ void getDetectionInfoAsJson(nlohmann::json &Json) const;
void startAnomalyDetectionThreads();
- void stopAnomalyDetectionThreads();
+ void stopAnomalyDetectionThreads(bool join);
- void getDetectionInfoAsJson(nlohmann::json &Json) const;
+ void scheduleForTraining(TrainingRequest TR);
+ void train();
-private:
void detect();
void detectOnce();
private:
- std::thread TrainingThread;
- std::thread DetectionThread;
-
+ RRDHOST *RH;
+ MachineLearningStats MLS;
+ TrainingStats TS;
CalculatedNumber HostAnomalyRate{0.0};
+ std::atomic ThreadsRunning;
+ std::atomic ThreadsCancelled;
+ std::atomic ThreadsJoined;
- size_t NumAnomalousDimensions{0};
- size_t NumNormalDimensions{0};
- size_t NumTrainedDimensions{0};
- size_t NumActiveDimensions{0};
-};
+ Queue TrainingQueue;
-using Host = DetectableHost;
+ Mutex M;
+ std::unordered_map Charts;
+
+ netdata_thread_t TrainingThread;
+ netdata_thread_t DetectionThread;
+};
} // namespace ml
diff --git a/ml/Mutex.h b/ml/Mutex.h
new file mode 100644
index 000000000..fcdb75313
--- /dev/null
+++ b/ml/Mutex.h
@@ -0,0 +1,36 @@
+#ifndef ML_MUTEX_H
+#define ML_MUTEX_H
+
+#include "ml-private.h"
+
+class Mutex {
+public:
+ Mutex() {
+ netdata_mutex_init(&M);
+ }
+
+ void lock() {
+ netdata_mutex_lock(&M);
+ }
+
+ void unlock() {
+ netdata_mutex_unlock(&M);
+ }
+
+ bool try_lock() {
+ return netdata_mutex_trylock(&M) == 0;
+ }
+
+ netdata_mutex_t *inner() {
+ return &M;
+ }
+
+ ~Mutex() {
+ netdata_mutex_destroy(&M);
+ }
+
+private:
+ netdata_mutex_t M;
+};
+
+#endif /* ML_MUTEX_H */
diff --git a/ml/Query.h b/ml/Query.h
index 78d117003..42a96e85b 100644
--- a/ml/Query.h
+++ b/ml/Query.h
@@ -8,19 +8,19 @@ namespace ml {
class Query {
public:
Query(RRDDIM *RD) : RD(RD), Initialized(false) {
- Ops = RD->tiers[0]->query_ops;
+ Ops = RD->tiers[0].query_ops;
}
time_t latestTime() {
- return Ops->latest_time(RD->tiers[0]->db_metric_handle);
+ return Ops->latest_time_s(RD->tiers[0].db_metric_handle);
}
time_t oldestTime() {
- return Ops->oldest_time(RD->tiers[0]->db_metric_handle);
+ return Ops->oldest_time_s(RD->tiers[0].db_metric_handle);
}
void init(time_t AfterT, time_t BeforeT) {
- Ops->init(RD->tiers[0]->db_metric_handle, &Handle, AfterT, BeforeT);
+ Ops->init(RD->tiers[0].db_metric_handle, &Handle, AfterT, BeforeT, STORAGE_PRIORITY_BEST_EFFORT);
Initialized = true;
points_read = 0;
}
@@ -40,7 +40,7 @@ public:
std::pair nextMetric() {
points_read++;
STORAGE_POINT sp = Ops->next_metric(&Handle);
- return { sp.start_time, sp.sum / sp.count };
+ return {sp.end_time_s, sp.sum / sp.count };
}
private:
diff --git a/ml/Queue.h b/ml/Queue.h
new file mode 100644
index 000000000..37a74bd07
--- /dev/null
+++ b/ml/Queue.h
@@ -0,0 +1,66 @@
+#ifndef QUEUE_H
+#define QUEUE_H
+
+#include "ml-private.h"
+#include "Mutex.h"
+#include
+#include
+#include
+
+template
+class Queue {
+public:
+ Queue(void) : Q(), M() {
+ pthread_cond_init(&CV, nullptr);
+ Exit = false;
+ }
+
+ ~Queue() {
+ pthread_cond_destroy(&CV);
+ }
+
+ void push(T t) {
+ std::lock_guard L(M);
+
+ Q.push(t);
+ pthread_cond_signal(&CV);
+ }
+
+ std::pair pop(void) {
+ std::lock_guard L(M);
+
+ while (Q.empty()) {
+ pthread_cond_wait(&CV, M.inner());
+
+ if (Exit) {
+ // This should happen only when we are destroying a host.
+ // Callers should use a flag dedicated to checking if we
+ // are about to delete the host or exit the agent. The original
+ // implementation would call pthread_exit which would cause
+ // the queue's mutex to be destroyed twice (and fail on the
+ // 2nd time)
+ return { T(), 0 };
+ }
+ }
+
+ T V = Q.front();
+ size_t Size = Q.size();
+ Q.pop();
+
+ return { V, Size };
+ }
+
+ void signal() {
+ std::lock_guard L(M);
+ Exit = true;
+ pthread_cond_signal(&CV);
+ }
+
+private:
+ std::queue Q;
+ Mutex M;
+ pthread_cond_t CV;
+ std::atomic Exit;
+};
+
+#endif /* QUEUE_H */
diff --git a/ml/README.md b/ml/README.md
index f6fd923ab..7f3ed276b 100644
--- a/ml/README.md
+++ b/ml/README.md
@@ -1,14 +1,18 @@
-
+
# Machine learning (ML) powered anomaly detection
## Overview
-As of [`v1.32.0`](https://github.com/netdata/netdata/releases/tag/v1.32.0), Netdata comes with some ML powered [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) capabilities built into it and available to use out of the box, with zero configuration required (ML was enabled by default in `v1.35.0-29-nightly` in [this PR](https://github.com/netdata/netdata/pull/13158), previously it required a one line config change).
+As of [`v1.32.0`](https://github.com/netdata/netdata/releases/tag/v1.32.0), Netdata comes with ML powered [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) capabilities built into it and available to use out of the box, with zero configuration required (ML was enabled by default in `v1.35.0-29-nightly` in [this PR](https://github.com/netdata/netdata/pull/13158), previously it required a one line config change).
🚧 **Note**: If you would like to get involved and help us with some feedback, email us at analytics-ml-team@netdata.cloud, comment on the [beta launch post](https://community.netdata.cloud/t/anomaly-advisor-beta-launch/2717) in the Netdata community, or come join us in the [🤖-ml-powered-monitoring](https://discord.gg/4eRSEUpJnc) channel of the Netdata discord.
@@ -99,49 +103,7 @@ An ["anomaly detector"](#anomaly-detector) looks at all anomaly bits of a node.
Essentially if the ["Node Anomaly Rate"](#node-anomaly-rate) (NAR) passes a defined threshold and stays above that threshold for a persistent amount of time, a "Node [Anomaly Event](#anomaly-event)" will be triggered.
-These anomaly events are currently exposed via `/api/v1/anomaly_events`
-
-**Note**: Clicking the link below will likely return an empty list of `[]`. This is the response when no anomaly events exist in the specified range. The example response below is illustrative of what the response would be when one or more anomaly events exist within the range of `after` to `before`.
-
-https://london.my-netdata.io/api/v1/anomaly_events?after=1638365182000&before=1638365602000
-
-If an event exists within the window, the result would be a list of start and end times.
-
-```
-[
- [
- 1638367788,
- 1638367851
- ]
-]
-```
-
-Information about each anomaly event can then be found at the `/api/v1/anomaly_event_info` endpoint (making sure to pass the `after` and `before` params):
-
-**Note**: If you click the below url you will get a `null` since no such anomaly event exists as the response is just an illustrative example taken from a node that did have such an anomaly event.
-
-https://london.my-netdata.io/api/v1/anomaly_event_info?after=1638367788&before=1638367851
-
-```
-[
- [
- 0.66,
- "netdata.response_time|max"
- ],
- [
- 0.63,
- "netdata.response_time|average"
- ],
- [
- 0.54,
- "netdata.requests|requests"
- ],
- ...
-```
-
-The query returns a list of dimension anomaly rates for all dimensions that were considered part of the detected anomaly event.
-
-**Note**: We plan to build additional anomaly detection and exploration features into both Netdata Agent and Netdata Cloud. The current endpoints are still under active development to power the upcoming features.
+These anomaly events are currently exposed via the `new_anomaly_event` dimension on the `anomaly_detection.anomaly_detection` chart.
## Configuration
@@ -152,7 +114,7 @@ To enable or disable anomaly detection:
2. In the `[ml]` section, set `enabled = yes` to enable or `enabled = no` to disable.
3. Restart netdata (typically `sudo systemctl restart netdata`).
-**Note**: If you would like to learn more about configuring Netdata please see [the configuration guide](https://learn.netdata.cloud/guides/step-by-step/step-04).
+**Note**: If you would like to learn more about configuring Netdata please see [the configuration guide](https://github.com/netdata/netdata/blob/master/docs/guides/step-by-step/step-04.md).
Below is a list of all the available configuration params and their default values.
@@ -162,6 +124,7 @@ Below is a list of all the available configuration params and their default valu
# maximum num samples to train = 14400
# minimum num samples to train = 3600
# train every = 3600
+ # number of models per dimension = 1
# dbengine anomaly rate every = 30
# num samples to diff = 1
# num samples to smooth = 3
@@ -169,12 +132,9 @@ Below is a list of all the available configuration params and their default valu
# random sampling ratio = 0.2
# maximum number of k-means iterations = 1000
# dimension anomaly score threshold = 0.99
- # host anomaly rate threshold = 0.01000
- # minimum window size = 30.00000
- # maximum window size = 600.00000
- # idle window size = 30.00000
- # window minimum anomaly rate = 0.25000
- # anomaly event min dimension rate threshold = 0.05000
+ # host anomaly rate threshold = 1.0
+ # anomaly detection grouping method = average
+ # anomaly detection grouping duration = 300
# hosts to skip from training = !*
# charts to skip from training = netdata.*
```
@@ -183,7 +143,7 @@ Below is a list of all the available configuration params and their default valu
If you would like to run ML on a parent instead of at the edge, some configuration options are illustrated below.
-This example assumes 3 child nodes [streaming](https://learn.netdata.cloud/docs/agent/streaming) to 1 parent node and illustrates the main ways you might want to configure running ML for the children on the parent, running ML on the children themselves, or even a mix of approaches.
+This example assumes 3 child nodes [streaming](https://github.com/netdata/netdata/blob/master/streaming/README.md) to 1 parent node and illustrates the main ways you might want to configure running ML for the children on the parent, running ML on the children themselves, or even a mix of approaches.
![parent_child_options](https://user-images.githubusercontent.com/2178292/164439761-8fb7dddd-c4d8-4329-9f44-9a794937a086.png)
@@ -221,6 +181,7 @@ This example assumes 3 child nodes [streaming](https://learn.netdata.cloud/docs/
- `maximum num samples to train`: (`3600`/`86400`) This is the maximum amount of time you would like to train each model on. For example, the default of `14400` trains on the preceding 4 hours of data, assuming an `update every` of 1 second.
- `minimum num samples to train`: (`900`/`21600`) This is the minimum amount of data required to be able to train a model. For example, the default of `900` implies that once at least 15 minutes of data is available for training, a model is trained, otherwise it is skipped and checked again at the next training run.
- `train every`: (`1800`/`21600`) This is how often each model will be retrained. For example, the default of `3600` means that each model is retrained every hour. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period.
+- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 1` means that just the most recently trained model (covering up to the most recent `maximum num samples to train` of training data) for the dimension will be used to determine the corresponding anomaly bit. Alternatively, if you have `train every = 3600` and `number of models per dimension = 24` this means that netdata will store and use the last 24 trained models for each dimension when determining the anomaly bit, this means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 24 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring.
- `dbengine anomaly rate every`: (`30`/`900`) This is how often netdata will aggregate all the anomaly bits into a single chart (`anomaly_detection.anomaly_rates`). The aggregation into a single chart allows enabling anomaly rate ranking over _all_ metrics with one API call as opposed to a call per chart.
- `num samples to diff`: (`0`/`1`) This is a `0` or `1` to determine if you want the model to operate on differences of the raw data or just the raw data. For example, the default of `1` means that we take differences of the raw values. Using differences is more general and works on dimensions that might naturally tend to have some trends or cycles in them that is normal behavior to which we don't want to be too sensitive.
- `num samples to smooth`: (`0`/`5`) This is a small integer that controls the amount of smoothing applied as part of the feature processing used by the model. For example, the default of `3` means that the rolling average of the last 3 values is used. Smoothing like this helps the model be a little more robust to spiky types of dimensions that naturally "jump" up or down as part of their normal behavior.
@@ -228,40 +189,37 @@ This example assumes 3 child nodes [streaming](https://learn.netdata.cloud/docs/
- `random sampling ratio`: (`0.2`/`1.0`) This parameter determines how much of the available training data is randomly sampled when training a model. The default of `0.2` means that Netdata will train on a random 20% of training data. This parameter influences cost efficiency. At `0.2` the model is still reasonably trained while minimizing system overhead costs caused by the training.
- `maximum number of k-means iterations`: This is a parameter that can be passed to the model to limit the number of iterations in training the k-means model. Vast majority of cases can ignore and leave as default.
- `dimension anomaly score threshold`: (`0.01`/`5.00`) This is the threshold at which an individual dimension at a specific timestep is considered anomalous or not. For example, the default of `0.99` means that a dimension with an anomaly score of 99% or higher is flagged as anomalous. This is a normalized probability based on the training data, so the default of 99% means that anything that is as strange (based on distance measure) or more strange as the most strange 1% of data observed during training will be flagged as anomalous. If you wanted to make the anomaly detection on individual dimensions more sensitive you could try a value like `0.90` (90%) or to make it less sensitive you could try `1.5` (150%).
-- `host anomaly rate threshold`: (`0.0`/`1.0`) This is the percentage of dimensions (based on all those enabled for anomaly detection) that need to be considered anomalous at specific timestep for the host itself to be considered anomalous. For example, the default value of `0.01` means that if more than 1% of dimensions are anomalous at the same time then the host itself is considered in an anomalous state.
-- `minimum window size`: The Netdata "Anomaly Detector" logic works over a rolling window of data. This parameter defines the minimum length of window to consider. If over this window the host is in an anomalous state then an anomaly detection event will be triggered. For example, the default of `30` means that the detector will initially work over a rolling window of 30 seconds. Note: The length of this window will be dynamic once an anomaly event has been triggered such that it will expand as needed until either the max length of an anomaly event is hit or the host settles back into a normal state with sufficiently decreased host level anomaly states in the rolling window. Note: If you wanted to adjust the higher level anomaly detector behavior then this is one parameter you might adjust to see the impact of on anomaly detection events.
-- `maximum window size`: This parameter defines the maximum length of window to consider. If an anomaly event reaches this size, it will be closed. This is to provide an upper bound on the length of an anomaly event and cost of the anomaly detector logic for that event.
-- `window minimum anomaly rate`: (`0.0`/`1.0`) This parameter corresponds to a threshold on the percentage of time in the rolling window that the host was considered in an anomalous state. For example, the default of `0.25` means that if the host is in an anomalous state for 25% of more of the rolling window then and anomaly event will be triggered or extended if one is already active. Note: If you want to make the anomaly detector itself less sensitive, you can adjust this value to something like `0.75` which would mean the host needs to be much more consistently in an anomalous state to trigger an anomaly detection event. Likewise, a lower value like `0.1` would make the anomaly detector more sensitive.
-- `anomaly event min dimension rate threshold`: (`0.0`/`1.0`) This is a parameter that helps filter out irrelevant dimensions from anomaly events. For example, the default of `0.05` means that only dimensions that were considered anomalous for at least 5% of the anomaly event itself will be included in that anomaly event. The idea here is to just include dimensions that were consistently anomalous as opposed to those that may have just randomly happened to be anomalous at the same time.
+- `host anomaly rate threshold`: (`0.1`/`10.0`) This is the percentage of dimensions (based on all those enabled for anomaly detection) that need to be considered anomalous at specific timestep for the host itself to be considered anomalous. For example, the default value of `1.0` means that if more than 1% of dimensions are anomalous at the same time then the host itself is considered in an anomalous state.
+- `anomaly detection grouping method`: The grouping method used when calculating node level anomaly rate.
+- `anomaly detection grouping duration`: (`60`/`900`) The duration across which to calculate the node level anomaly rate, the default of `900` means that the node level anomaly rate is calculated across a rolling 5 minute window.
- `hosts to skip from training`: This parameter allows you to turn off anomaly detection for any child hosts on a parent host by defining those you would like to skip from training here. For example, a value like `dev-*` skips all hosts on a parent that begin with the "dev-" prefix. The default value of `!*` means "don't skip any".
-- `charts to skip from training`: This parameter allows you to exclude certain charts from anomaly detection. By default, only netdata related charts are excluded. This is to avoid the scenario where accessing the netdata dashboard could itself tigger some anomalies if you don't access them regularly. If you want to include charts that are excluded by default, add them in small groups and then measure any impact on performance before adding additional ones. Example: If you want to include system, apps, and user charts:`!system.* !apps.* !user.* *`.
+- `charts to skip from training`: This parameter allows you to exclude certain charts from anomaly detection. By default, only netdata related charts are excluded. This is to avoid the scenario where accessing the netdata dashboard could itself trigger some anomalies if you don't access them regularly. If you want to include charts that are excluded by default, add them in small groups and then measure any impact on performance before adding additional ones. Example: If you want to include system, apps, and user charts:`!system.* !apps.* !user.* *`.
## Charts
Once enabled, the "Anomaly Detection" menu and charts will be available on the dashboard.
-![anomaly_detection_menu](https://user-images.githubusercontent.com/2178292/144255721-4568aabf-39c7-4855-bf1c-31b1d60e28e6.png)
+![anomaly_detection_menu](https://user-images.githubusercontent.com/2178292/207584589-2e984786-5e01-404b-a20a-58573884d6df.png)
In terms of anomaly detection, the most interesting charts would be the `anomaly_detection.dimensions` and `anomaly_detection.anomaly_rate` ones, which hold the `anomalous` and `anomaly_rate` dimensions that show the overall number of dimensions considered anomalous at any time and the corresponding anomaly rate.
- `anomaly_detection.dimensions`: Total count of dimensions considered anomalous or normal.
- `anomaly_detection.dimensions`: Percentage of anomalous dimensions.
-- `anomaly_detection.detector_window`: The length of the active window used by the detector.
-- `anomaly_detection.detector_events`: Flags (0 or 1) to show when an anomaly event has been triggered by the detector.
+- `anomaly_detection.anomaly_detection`: Flags (0 or 1) to show when an anomaly event has been triggered by the detector.
Below is an example of how these charts may look in the presence of an anomaly event.
Initially we see a jump in `anomalous` dimensions:
-![anomalous](https://user-images.githubusercontent.com/2178292/144256036-c89fa768-5e5f-4278-9725-c67521c0d95e.png)
+![anomalous](https://user-images.githubusercontent.com/2178292/207589021-c0d2926f-bb55-4c5c-9e32-be1851558fa8.png)
And a corresponding jump in the `anomaly_rate`:
-![anomaly_rate](https://user-images.githubusercontent.com/2178292/144256071-7d157438-31f3-4b23-a795-0fd3b2e2e85c.png)
+![anomaly_rate](https://user-images.githubusercontent.com/2178292/207589172-8853804b-6826-4731-8d06-b9e32d3071af.png)
After a short while the rolling node anomaly rate goes `above_threshold`, and once it stays above threshold for long enough a `new_anomaly_event` is created:
-![anomaly_event](https://user-images.githubusercontent.com/2178292/144256152-910b06ec-26b8-45b4-bcb7-4c2acdf9af15.png)
+![anomaly_event](https://user-images.githubusercontent.com/2178292/207589308-931a3c76-440a-48c1-970e-191743d26607.png)
## Glossary
@@ -307,4 +265,4 @@ The anomaly rate across all dimensions of a node.
- Netdata uses [dlib](https://github.com/davisking/dlib) under the hood for its core ML features.
- You should benchmark Netdata resource usage before and after enabling ML. Typical overhead ranges from 1-2% additional CPU at most.
- The "anomaly bit" has been implemented to be a building block to underpin many more ML based use cases that we plan to deliver soon.
-- At its core Netdata uses an approach and problem formulation very similar to the Netdata python [anomalies collector](https://learn.netdata.cloud/docs/agent/collectors/python.d.plugin/anomalies), just implemented in a much much more efficient and scalable way in the agent in c++. So if you would like to learn more about the approach and are familiar with Python that is a useful resource to explore, as is the corresponding [deep dive tutorial](https://nbviewer.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) where the default model used is PCA instead of K-Means but the overall approach and formulation is similar.
+- At its core Netdata uses an approach and problem formulation very similar to the Netdata python [anomalies collector](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/anomalies/README.md), just implemented in a much much more efficient and scalable way in the agent in c++. So if you would like to learn more about the approach and are familiar with Python that is a useful resource to explore, as is the corresponding [deep dive tutorial](https://nbviewer.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) where the default model used is PCA instead of K-Means but the overall approach and formulation is similar.
diff --git a/ml/SamplesBuffer.cc b/ml/SamplesBuffer.cc
index d276c6e09..359b60c23 100644
--- a/ml/SamplesBuffer.cc
+++ b/ml/SamplesBuffer.cc
@@ -54,12 +54,12 @@ void SamplesBuffer::diffSamples() {
void SamplesBuffer::smoothSamples() {
// Holds the mean value of each window
- CalculatedNumber *AccCNs = new CalculatedNumber[NumDimsPerSample]();
- Sample Acc(AccCNs, NumDimsPerSample);
+ CalculatedNumber AccCNs[1] = { 0 };
+ Sample Acc(AccCNs, 1);
// Used to avoid clobbering the accumulator when moving the window
- CalculatedNumber *TmpCNs = new CalculatedNumber[NumDimsPerSample]();
- Sample Tmp(TmpCNs, NumDimsPerSample);
+ CalculatedNumber TmpCNs[1] = { 0 };
+ Sample Tmp(TmpCNs, 1);
CalculatedNumber Factor = (CalculatedNumber) 1 / SmoothN;
@@ -88,9 +88,6 @@ void SamplesBuffer::smoothSamples() {
Acc.copy(Tmp);
Acc.scale(Factor);
}
-
- delete[] AccCNs;
- delete[] TmpCNs;
}
void SamplesBuffer::lagSamples() {
@@ -103,31 +100,30 @@ void SamplesBuffer::lagSamples() {
}
}
-std::vector SamplesBuffer::preprocess() {
+void SamplesBuffer::preprocess(std::vector &Samples) {
assert(Preprocessed == false);
- std::vector DSamples;
size_t OutN = NumSamples;
// Diff
if (DiffN >= OutN)
- return DSamples;
+ return;
OutN -= DiffN;
diffSamples();
// Smooth
if (SmoothN == 0 || SmoothN > OutN)
- return DSamples;
+ return;
OutN -= (SmoothN - 1);
smoothSamples();
// Lag
if (LagN >= OutN)
- return DSamples;
+ return;
OutN -= LagN;
lagSamples();
- DSamples.reserve(OutN);
+ Samples.reserve(OutN);
Preprocessed = true;
uint32_t MaxMT = std::numeric_limits::max();
@@ -143,8 +139,45 @@ std::vector SamplesBuffer::preprocess() {
const Sample PS = getPreprocessedSample(Idx);
PS.initDSample(DS);
- DSamples.push_back(DS);
+ Samples.push_back(std::move(DS));
}
+}
+
+void SamplesBuffer::preprocess(DSample &Feature) {
+ assert(Preprocessed == false);
+
+ size_t OutN = NumSamples;
+
+ // Diff
+ if (DiffN >= OutN)
+ return;
+ OutN -= DiffN;
+ diffSamples();
- return DSamples;
+ // Smooth
+ if (SmoothN == 0 || SmoothN > OutN)
+ return;
+ OutN -= (SmoothN - 1);
+ smoothSamples();
+
+ // Lag
+ if (LagN >= OutN)
+ return;
+ OutN -= LagN;
+ lagSamples();
+
+ Preprocessed = true;
+
+ uint32_t MaxMT = std::numeric_limits::max();
+ uint32_t CutOff = static_cast(MaxMT) * SamplingRatio;
+
+ for (size_t Idx = NumSamples - OutN; Idx != NumSamples; Idx++) {
+ if (RandNums[Idx] > CutOff)
+ continue;
+
+ Feature.set_size(NumDimsPerSample * (LagN + 1));
+
+ const Sample PS = getPreprocessedSample(Idx);
+ PS.initDSample(Feature);
+ }
}
diff --git a/ml/SamplesBuffer.h b/ml/SamplesBuffer.h
index 1c7215cca..ca60f4b91 100644
--- a/ml/SamplesBuffer.h
+++ b/ml/SamplesBuffer.h
@@ -86,9 +86,12 @@ public:
DiffN(DiffN), SmoothN(SmoothN), LagN(LagN),
SamplingRatio(SamplingRatio), RandNums(RandNums),
BytesPerSample(NumDimsPerSample * sizeof(CalculatedNumber)),
- Preprocessed(false) {};
+ Preprocessed(false) {
+ assert(NumDimsPerSample == 1 && "SamplesBuffer supports only one dimension per sample");
+ };
- std::vector preprocess();
+ void preprocess(std::vector &Samples);
+ void preprocess(DSample &Feature);
std::vector getPreprocessedSamples() const;
size_t capacity() const { return NumSamples; }
diff --git a/ml/SamplesBufferTests.cc b/ml/SamplesBufferTests.cc
deleted file mode 100644
index 5997a2a15..000000000
--- a/ml/SamplesBufferTests.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ml/ml-private.h"
-#include
-
-/*
- * The SamplesBuffer class implements the functionality of the following python
- * code:
- * >> df = pd.DataFrame(data=samples)
- * >> df = df.diff(diff_n).dropna()
- * >> df = df.rolling(smooth_n).mean().dropna()
- * >> df = pd.concat([df.shift(n) for n in range(lag_n + 1)], axis=1).dropna()
- *
- * Its correctness has been verified by automatically generating random
- * data frames in Python and comparing them with the correspondent preprocessed
- * SampleBuffers.
- *
- * The following tests are meant to catch unintended changes in the SamplesBuffer
- * implementation. For development purposes, one should compare changes against
- * the aforementioned python code.
-*/
-
-TEST(SamplesBufferTest, NS_8_NDPS_1_DN_1_SN_3_LN_1) {
- size_t NumSamples = 8, NumDimsPerSample = 1;
- size_t DiffN = 1, SmoothN = 3, LagN = 3;
-
- size_t N = NumSamples * NumDimsPerSample * (LagN + 1);
- CalculatedNumber *CNs = new CalculatedNumber[N]();
-
- CNs[0] = 0.7568336679490107;
- CNs[1] = 0.4814406581763254;
- CNs[2] = 0.40073555156221874;
- CNs[3] = 0.5973257298194408;
- CNs[4] = 0.5334727814345868;
- CNs[5] = 0.2632477193454843;
- CNs[6] = 0.2684839023122384;
- CNs[7] = 0.851332948637479;
-
- std::vector RandNums(NumSamples, std::numeric_limits::max());
- SamplesBuffer SB(CNs, NumSamples, NumDimsPerSample, DiffN, SmoothN, LagN, 1.0, RandNums);
- SB.preprocess();
-
- std::vector Samples = SB.getPreprocessedSamples();
- EXPECT_EQ(Samples.size(), 2);
-
- Sample S0 = Samples[0];
- const CalculatedNumber *S0_CNs = S0.getCalculatedNumbers();
- Sample S1 = Samples[1];
- const CalculatedNumber *S1_CNs = S1.getCalculatedNumbers();
-
- EXPECT_NEAR(S0_CNs[0], -0.109614, 0.001);
- EXPECT_NEAR(S0_CNs[1], -0.0458293, 0.001);
- EXPECT_NEAR(S0_CNs[2], 0.017344, 0.001);
- EXPECT_NEAR(S0_CNs[3], -0.0531693, 0.001);
-
- EXPECT_NEAR(S1_CNs[0], 0.105953, 0.001);
- EXPECT_NEAR(S1_CNs[1], -0.109614, 0.001);
- EXPECT_NEAR(S1_CNs[2], -0.0458293, 0.001);
- EXPECT_NEAR(S1_CNs[3], 0.017344, 0.001);
-
- delete[] CNs;
-}
-
-TEST(SamplesBufferTest, NS_8_NDPS_1_DN_2_SN_3_LN_2) {
- size_t NumSamples = 8, NumDimsPerSample = 1;
- size_t DiffN = 2, SmoothN = 3, LagN = 2;
-
- size_t N = NumSamples * NumDimsPerSample * (LagN + 1);
- CalculatedNumber *CNs = new CalculatedNumber[N]();
-
- CNs[0] = 0.20511885291342846;
- CNs[1] = 0.13151717360306558;
- CNs[2] = 0.6017085062423134;
- CNs[3] = 0.46256882933941545;
- CNs[4] = 0.7887758447877941;
- CNs[5] = 0.9237989080034406;
- CNs[6] = 0.15552559051428083;
- CNs[7] = 0.6309750314597955;
-
- std::vector RandNums(NumSamples, std::numeric_limits::max());
- SamplesBuffer SB(CNs, NumSamples, NumDimsPerSample, DiffN, SmoothN, LagN, 1.0, RandNums);
- SB.preprocess();
-
- std::vector Samples = SB.getPreprocessedSamples();
- EXPECT_EQ(Samples.size(), 2);
-
- Sample S0 = Samples[0];
- const CalculatedNumber *S0_CNs = S0.getCalculatedNumbers();
- Sample S1 = Samples[1];
- const CalculatedNumber *S1_CNs = S1.getCalculatedNumbers();
-
- EXPECT_NEAR(S0_CNs[0], 0.005016, 0.001);
- EXPECT_NEAR(S0_CNs[1], 0.326450, 0.001);
- EXPECT_NEAR(S0_CNs[2], 0.304903, 0.001);
-
- EXPECT_NEAR(S1_CNs[0], -0.154948, 0.001);
- EXPECT_NEAR(S1_CNs[1], 0.005016, 0.001);
- EXPECT_NEAR(S1_CNs[2], 0.326450, 0.001);
-
- delete[] CNs;
-}
-
-TEST(SamplesBufferTest, NS_8_NDPS_3_DN_2_SN_4_LN_1) {
- size_t NumSamples = 8, NumDimsPerSample = 3;
- size_t DiffN = 2, SmoothN = 4, LagN = 1;
-
- size_t N = NumSamples * NumDimsPerSample * (LagN + 1);
- CalculatedNumber *CNs = new CalculatedNumber[N]();
-
- CNs[0] = 0.34310900399667765; CNs[1] = 0.14694315994488194; CNs[2] = 0.8246677800938796;
- CNs[3] = 0.48249504592307835; CNs[4] = 0.23241087965531182; CNs[5] = 0.9595348555892567;
- CNs[6] = 0.44281094035598334; CNs[7] = 0.5143142171362715; CNs[8] = 0.06391303014242555;
- CNs[9] = 0.7460491027783901; CNs[10] = 0.43887217459032923; CNs[11] = 0.2814395025355999;
- CNs[12] = 0.9231114281214198; CNs[13] = 0.326882401786898; CNs[14] = 0.26747939220376216;
- CNs[15] = 0.7787571209969636; CNs[16] =0.5851700001235088; CNs[17] = 0.34410728945321567;
- CNs[18] = 0.9394494507088997; CNs[19] =0.17567223681734334; CNs[20] = 0.42732886195446984;
- CNs[21] = 0.9460522396152958; CNs[22] =0.23462747016780894; CNs[23] = 0.35983249900892145;
-
- std::vector RandNums(NumSamples, std::numeric_limits::max());
- SamplesBuffer SB(CNs, NumSamples, NumDimsPerSample, DiffN, SmoothN, LagN, 1.0, RandNums);
- SB.preprocess();
-
- std::vector Samples = SB.getPreprocessedSamples();
- EXPECT_EQ(Samples.size(), 2);
-
- Sample S0 = Samples[0];
- const CalculatedNumber *S0_CNs = S0.getCalculatedNumbers();
- Sample S1 = Samples[1];
- const CalculatedNumber *S1_CNs = S1.getCalculatedNumbers();
-
- EXPECT_NEAR(S0_CNs[0], 0.198225, 0.001);
- EXPECT_NEAR(S0_CNs[1], 0.003529, 0.001);
- EXPECT_NEAR(S0_CNs[2], -0.063003, 0.001);
- EXPECT_NEAR(S0_CNs[3], 0.219066, 0.001);
- EXPECT_NEAR(S0_CNs[4], 0.133175, 0.001);
- EXPECT_NEAR(S0_CNs[5], -0.293154, 0.001);
-
- EXPECT_NEAR(S1_CNs[0], 0.174160, 0.001);
- EXPECT_NEAR(S1_CNs[1], -0.135722, 0.001);
- EXPECT_NEAR(S1_CNs[2], 0.110452, 0.001);
- EXPECT_NEAR(S1_CNs[3], 0.198225, 0.001);
- EXPECT_NEAR(S1_CNs[4], 0.003529, 0.001);
- EXPECT_NEAR(S1_CNs[5], -0.063003, 0.001);
-
- delete[] CNs;
-}
diff --git a/ml/Stats.h b/ml/Stats.h
new file mode 100644
index 000000000..b99bc39da
--- /dev/null
+++ b/ml/Stats.h
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef ML_STATS_H
+#define ML_STATS_H
+
+#include "ml-private.h"
+
+namespace ml {
+
+struct MachineLearningStats {
+ size_t NumMachineLearningStatusEnabled;
+ size_t NumMachineLearningStatusDisabledUE;
+ size_t NumMachineLearningStatusDisabledSP;
+
+ size_t NumMetricTypeConstant;
+ size_t NumMetricTypeVariable;
+
+ size_t NumTrainingStatusUntrained;
+ size_t NumTrainingStatusPendingWithoutModel;
+ size_t NumTrainingStatusTrained;
+ size_t NumTrainingStatusPendingWithModel;
+
+ size_t NumAnomalousDimensions;
+ size_t NumNormalDimensions;
+};
+
+struct TrainingStats {
+ struct rusage TrainingRU;
+
+ size_t QueueSize;
+ size_t NumPoppedItems;
+
+ usec_t AllottedUT;
+ usec_t ConsumedUT;
+ usec_t RemainingUT;
+
+ size_t TrainingResultOk;
+ size_t TrainingResultInvalidQueryTimeRange;
+ size_t TrainingResultNotEnoughCollectedValues;
+ size_t TrainingResultNullAcquiredDimension;
+ size_t TrainingResultChartUnderReplication;
+};
+
+} // namespace ml
+
+#endif /* ML_STATS_H */
diff --git a/ml/ml-dummy.c b/ml/ml-dummy.c
index 492dfe2fc..178018898 100644
--- a/ml/ml-dummy.c
+++ b/ml/ml-dummy.c
@@ -15,9 +15,37 @@ bool ml_enabled(RRDHOST *RH) {
void ml_init(void) {}
-void ml_new_host(RRDHOST *RH) { (void) RH; }
+void ml_host_new(RRDHOST *RH) {
+ UNUSED(RH);
+}
+
+void ml_host_delete(RRDHOST *RH) {
+ UNUSED(RH);
+}
+
+void ml_chart_new(RRDSET *RS) {
+ UNUSED(RS);
+}
+
+void ml_chart_delete(RRDSET *RS) {
+ UNUSED(RS);
+}
+
+void ml_dimension_new(RRDDIM *RD) {
+ UNUSED(RD);
+}
+
+void ml_dimension_delete(RRDDIM *RD) {
+ UNUSED(RD);
+}
-void ml_delete_host(RRDHOST *RH) { (void) RH; }
+void ml_start_anomaly_detection_threads(RRDHOST *RH) {
+ UNUSED(RH);
+}
+
+void ml_stop_anomaly_detection_threads(RRDHOST *RH) {
+ UNUSED(RH);
+}
char *ml_get_host_info(RRDHOST *RH) {
(void) RH;
@@ -29,17 +57,24 @@ char *ml_get_host_runtime_info(RRDHOST *RH) {
return NULL;
}
+void ml_chart_update_begin(RRDSET *RS) {
+ (void) RS;
+}
+
+void ml_chart_update_end(RRDSET *RS) {
+ (void) RS;
+}
+
char *ml_get_host_models(RRDHOST *RH) {
(void) RH;
return NULL;
}
-void ml_new_dimension(RRDDIM *RD) { (void) RD; }
-
-void ml_delete_dimension(RRDDIM *RD) { (void) RD; }
-
-bool ml_is_anomalous(RRDDIM *RD, double Value, bool Exists) {
- (void) RD; (void) Value; (void) Exists;
+bool ml_is_anomalous(RRDDIM *RD, time_t CurrT, double Value, bool Exists) {
+ (void) RD;
+ (void) CurrT;
+ (void) Value;
+ (void) Exists;
return false;
}
diff --git a/ml/ml-private.h b/ml/ml-private.h
index 2bd72ac5a..e479f2351 100644
--- a/ml/ml-private.h
+++ b/ml/ml-private.h
@@ -6,21 +6,8 @@
#include "KMeans.h"
#include "ml/ml.h"
-#include
#include
#include
#include
-namespace ml {
-
-using SteadyClock = std::chrono::steady_clock;
-using TimePoint = std::chrono::time_point;
-
-template
-using Duration = std::chrono::duration;
-
-using Seconds = std::chrono::seconds;
-
-} // namespace ml
-
#endif /* ML_PRIVATE_H */
diff --git a/ml/ml.cc b/ml/ml.cc
index 1a7d6ae25..461c83baa 100644
--- a/ml/ml.cc
+++ b/ml/ml.cc
@@ -2,6 +2,7 @@
#include "Config.h"
#include "Dimension.h"
+#include "Chart.h"
#include "Host.h"
#include
@@ -45,56 +46,65 @@ void ml_init(void) {
Cfg.RandomNums.push_back(Gen());
}
-void ml_new_host(RRDHOST *RH) {
+void ml_host_new(RRDHOST *RH) {
if (!ml_enabled(RH))
return;
Host *H = new Host(RH);
- RH->ml_host = static_cast(H);
-
- H->startAnomalyDetectionThreads();
+ RH->ml_host = reinterpret_cast(H);
}
-void ml_delete_host(RRDHOST *RH) {
- Host *H = static_cast(RH->ml_host);
+void ml_host_delete(RRDHOST *RH) {
+ Host *H = reinterpret_cast(RH->ml_host);
if (!H)
return;
- H->stopAnomalyDetectionThreads();
-
delete H;
RH->ml_host = nullptr;
}
-void ml_new_dimension(RRDDIM *RD) {
- RRDSET *RS = RD->rrdset;
-
- Host *H = static_cast(RD->rrdset->rrdhost->ml_host);
+void ml_chart_new(RRDSET *RS) {
+ Host *H = reinterpret_cast(RS->rrdhost->ml_host);
if (!H)
return;
- if (static_cast(RD->update_every) != H->updateEvery())
+ Chart *C = new Chart(RS);
+ RS->ml_chart = reinterpret_cast(C);
+
+ H->addChart(C);
+}
+
+void ml_chart_delete(RRDSET *RS) {
+ Host *H = reinterpret_cast