summaryrefslogtreecommitdiffstats
path: root/lib/malloc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/malloc')
-rw-r--r--lib/malloc/Makefile.in138
-rw-r--r--lib/malloc/alloca.c482
-rw-r--r--lib/malloc/getpagesize.h60
-rw-r--r--lib/malloc/i386-alloca.s16
-rw-r--r--lib/malloc/imalloc.h173
-rw-r--r--lib/malloc/malloc.c1481
-rw-r--r--lib/malloc/mstats.h114
-rw-r--r--lib/malloc/shmalloc.h70
-rw-r--r--lib/malloc/stats.c213
-rw-r--r--lib/malloc/stub.c22
-rw-r--r--lib/malloc/table.c429
-rw-r--r--lib/malloc/table.h116
-rw-r--r--lib/malloc/trace.c126
-rw-r--r--lib/malloc/watch.c151
-rw-r--r--lib/malloc/watch.h41
-rw-r--r--lib/malloc/x386-alloca.s63
-rwxr-xr-xlib/malloc/xleaktrace47
-rw-r--r--lib/malloc/xmalloc.c94
18 files changed, 3836 insertions, 0 deletions
diff --git a/lib/malloc/Makefile.in b/lib/malloc/Makefile.in
new file mode 100644
index 0000000..0ef3cfd
--- /dev/null
+++ b/lib/malloc/Makefile.in
@@ -0,0 +1,138 @@
+# Skeleton Makefile for the GNU malloc code
+#
+# Copyright (C) 1996-2009 Free Software Foundation, Inc.
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+srcdir = @srcdir@
+VPATH = @srcdir@
+topdir = @top_srcdir@
+BUILD_DIR = @BUILD_DIR@
+
+INSTALL = @INSTALL@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_DATA = @INSTALL_DATA@
+
+CC = @CC@
+RANLIB = @RANLIB@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+RM = rm -f
+CP = cp
+MV = mv
+
+SHELL = @MAKE_SHELL@
+
+PROFILE_FLAGS = @PROFILE_FLAGS@
+
+CFLAGS = @CFLAGS@
+LOCAL_CFLAGS = @LOCAL_CFLAGS@
+CPPFLAGS = @CPPFLAGS@
+LDFLAGS = @LDFLAGS@
+
+DEFS = @DEFS@
+LOCAL_DEFS = @LOCAL_DEFS@
+
+LIBBUILD = ${BUILD_DIR}/lib
+
+BASHINCDIR = ${topdir}/include
+
+INTL_LIBSRC = ${topdir}/lib/intl
+INTL_BUILDDIR = ${LIBBUILD}/intl
+INTL_INC = @INTL_INC@
+LIBINTL_H = @LIBINTL_H@
+
+INCLUDES = -I. -I../.. -I$(topdir) -I$(BASHINCDIR) -I$(topdir)/lib $(INTL_INC)
+
+CCFLAGS = ${PROFILE_FLAGS} ${INCLUDES} $(DEFS) $(LOCAL_DEFS) $(LOCAL_CFLAGS) \
+ $(CFLAGS) $(MALLOC_CFLAGS) $(CPPFLAGS)
+
+.c.o:
+ $(CC) $(CCFLAGS) -c $<
+
+.s.o:
+ $(CC) $(CCFLAGS) -c $<
+
+MALLOC_SOURCE = malloc.c
+STUB_SOURCE = stub.c
+
+ALLOCA_SOURCE = alloca.c
+ALLOCA_OBJECT = alloca.o
+
+MALLOC_SRC = @MALLOC_SRC@
+MALLOC = @MALLOC@
+ALLOCA = @ALLOCA@
+
+MALLOC_OBJS = malloc.o $(ALLOCA) trace.o stats.o table.o watch.o
+STUB_OBJS = $(ALLOCA) stub.o
+
+.PHONY: malloc stubmalloc
+
+all: malloc
+
+malloc: ${MALLOC_OBJS}
+ ${RM} libmalloc.a
+ ${AR} ${ARFLAGS} libmalloc.a ${MALLOC_OBJS}
+ -test -n "$(RANLIB)" && $(RANLIB) libmalloc.a
+
+stubmalloc: ${STUB_OBJS}
+ ${RM} libmalloc.a
+ ${AR} ${ARFLAGS} libmalloc.a ${STUB_OBJS}
+ -test -n "$(RANLIB)" && $(RANLIB) libmalloc.a
+
+alloca: ${ALLOCA}
+ ${RM} libmalloc.a
+ ${AR} ${ARFLAGS} libmalloc.a ${ALLOCA}
+ -test -n "$(RANLIB)" && $(RANLIB) libmalloc.a
+
+alloca.o: $(srcdir)/$(ALLOCA_SOURCE)
+ $(CC) $(CCFLAGS) -c $(srcdir)/$(ALLOCA_SOURCE)
+ @- if test "$(ALLOCA_OBJECT)" != alloca.o ; then \
+ mv $(ALLOCA_OBJECT) alloca.o >/dev/null 2>&1 ; \
+ fi
+
+mostlyclean clean:
+ $(RM) *.o libmalloc.a
+
+distclean realclean maintainer-clean: clean
+ $(RM) Makefile
+
+alloca.o: $(BUILD_DIR)/config.h
+malloc.o: $(BUILD_DIR)/config.h $(topdir)/bashtypes.h getpagesize.h
+xmalloc.o: $(BUILD_DIR)/config.h $(BASHINCDIR)/ansi_stdlib.h
+trace.o: ${BUILD_DIR}/config.h
+stats.o: ${BUILD_DIR}/config.h
+table.o: ${BUILD_DIR}/config.h
+watch.o: ${BUILD_DIR}/config.h
+
+malloc.o: ${srcdir}/imalloc.h ${srcdir}/mstats.h
+malloc.o: ${srcdir}/table.h ${srcdir}/watch.h
+stats.o: ${srcdir}/imalloc.h ${srcdir}/mstats.h
+trace.o: ${srcdir}/imalloc.h
+table.o: ${srcdir}/imalloc.h ${srcdir}/table.h
+watch.o: ${srcdir}/imalloc.h ${srcdir}/watch.h
+
+malloc.o: ${topdir}/bashintl.h ${LIBINTL_H} ${BASHINCDIR}/gettext.h
+stats.o: ${topdir}/bashintl.h ${LIBINTL_H} ${BASHINCDIR}/gettext.h
+trace.o: ${topdir}/bashintl.h ${LIBINTL_H} ${BASHINCDIR}/gettext.h
+table.o: ${topdir}/bashintl.h ${LIBINTL_H} ${BASHINCDIR}/gettext.h
+watch.o: ${topdir}/bashintl.h ${LIBINTL_H} ${BASHINCDIR}/gettext.h
+
+# Rules for deficient makes, like SunOS and Solaris
+stub.o: stub.c
+malloc.o: malloc.c
+table.o: table.c
+trace.o: trace.c
+stats.o: stats.c
+watch.o: watch.c
diff --git a/lib/malloc/alloca.c b/lib/malloc/alloca.c
new file mode 100644
index 0000000..26319c2
--- /dev/null
+++ b/lib/malloc/alloca.c
@@ -0,0 +1,482 @@
+/* alloca.c -- allocate automatically reclaimed memory
+ (Mostly) portable public-domain implementation -- D A Gwyn
+
+ This implementation of the PWB library alloca function,
+ which is used to allocate space off the run-time stack so
+ that it is automatically reclaimed upon procedure exit,
+ was inspired by discussions with J. Q. Johnson of Cornell.
+ J.Otto Tennant <jot@cray.com> contributed the Cray support.
+
+ There are some preprocessor constants that can
+ be defined when compiling for your specific system, for
+ improved efficiency; however, the defaults should be okay.
+
+ The general concept of this implementation is to keep
+ track of all alloca-allocated blocks, and reclaim any
+ that are found to be deeper in the stack than the current
+ invocation. This heuristic does not reclaim storage as
+ soon as it becomes invalid, but it will do so eventually.
+
+ As a special case, alloca(0) reclaims storage without
+ allocating any. It is a good idea to use alloca(0) in
+ your main control loop, etc. to force garbage collection. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* If compiling with GCC 2, this file's not needed. */
+#if !defined (__GNUC__) || __GNUC__ < 2
+
+#include <bashtypes.h> /* for size_t */
+
+/* If alloca is defined somewhere, this file is not needed. */
+#ifndef alloca
+
+#ifdef emacs
+#ifdef static
+/* actually, only want this if static is defined as ""
+ -- this is for usg, in which emacs must undefine static
+ in order to make unexec workable
+ */
+#ifndef STACK_DIRECTION
+you
+lose
+-- must know STACK_DIRECTION at compile-time
+#endif /* STACK_DIRECTION undefined */
+#endif /* static */
+#endif /* emacs */
+
+/* If your stack is a linked list of frames, you have to
+ provide an "address metric" ADDRESS_FUNCTION macro. */
+
+#if defined (CRAY) && defined (CRAY_STACKSEG_END)
+long i00afunc ();
+#define ADDRESS_FUNCTION(arg) (char *) i00afunc (&(arg))
+#else
+#define ADDRESS_FUNCTION(arg) &(arg)
+#endif /* CRAY && CRAY_STACKSEG_END */
+
+#if __STDC__
+typedef void *pointer;
+#else
+typedef char *pointer;
+#endif
+
+#define NULL 0
+
+/* Different portions of Emacs need to call different versions of
+ malloc. The Emacs executable needs alloca to call xmalloc, because
+ ordinary malloc isn't protected from input signals. On the other
+ hand, the utilities in lib-src need alloca to call malloc; some of
+ them are very simple, and don't have an xmalloc routine.
+
+ Non-Emacs programs expect this to call use xmalloc.
+
+ Callers below should use malloc. */
+
+#ifndef emacs
+#define malloc xmalloc
+extern pointer xmalloc ();
+#endif
+
+/* Define STACK_DIRECTION if you know the direction of stack
+ growth for your system; otherwise it will be automatically
+ deduced at run-time.
+
+ STACK_DIRECTION > 0 => grows toward higher addresses
+ STACK_DIRECTION < 0 => grows toward lower addresses
+ STACK_DIRECTION = 0 => direction of growth unknown */
+
+#ifndef STACK_DIRECTION
+#define STACK_DIRECTION 0 /* Direction unknown. */
+#endif
+
+#if STACK_DIRECTION != 0
+
+#define STACK_DIR STACK_DIRECTION /* Known at compile-time. */
+
+#else /* STACK_DIRECTION == 0; need run-time code. */
+
+static int stack_dir; /* 1 or -1 once known. */
+#define STACK_DIR stack_dir
+
+static void
+find_stack_direction ()
+{
+ static char *addr = NULL; /* Address of first `dummy', once known. */
+ auto char dummy; /* To get stack address. */
+
+ if (addr == NULL)
+ { /* Initial entry. */
+ addr = ADDRESS_FUNCTION (dummy);
+
+ find_stack_direction (); /* Recurse once. */
+ }
+ else
+ {
+ /* Second entry. */
+ if (ADDRESS_FUNCTION (dummy) > addr)
+ stack_dir = 1; /* Stack grew upward. */
+ else
+ stack_dir = -1; /* Stack grew downward. */
+ }
+}
+
+#endif /* STACK_DIRECTION == 0 */
+
+/* An "alloca header" is used to:
+ (a) chain together all alloca'ed blocks;
+ (b) keep track of stack depth.
+
+ It is very important that sizeof(header) agree with malloc
+ alignment chunk size. The following default should work okay. */
+
+#ifndef ALIGN_SIZE
+#define ALIGN_SIZE sizeof(double)
+#endif
+
+typedef union hdr
+{
+ char align[ALIGN_SIZE]; /* To force sizeof(header). */
+ struct
+ {
+ union hdr *next; /* For chaining headers. */
+ char *deep; /* For stack depth measure. */
+ } h;
+} header;
+
+static header *last_alloca_header = NULL; /* -> last alloca header. */
+
+/* Return a pointer to at least SIZE bytes of storage,
+ which will be automatically reclaimed upon exit from
+ the procedure that called alloca. Originally, this space
+ was supposed to be taken from the current stack frame of the
+ caller, but that method cannot be made to work for some
+ implementations of C, for example under Gould's UTX/32. */
+
+pointer
+alloca (size)
+ size_t size;
+{
+ auto char probe; /* Probes stack depth: */
+ register char *depth = ADDRESS_FUNCTION (probe);
+
+#if STACK_DIRECTION == 0
+ if (STACK_DIR == 0) /* Unknown growth direction. */
+ find_stack_direction ();
+#endif
+
+ /* Reclaim garbage, defined as all alloca'd storage that
+ was allocated from deeper in the stack than currently. */
+
+ {
+ register header *hp; /* Traverses linked list. */
+
+ for (hp = last_alloca_header; hp != NULL;)
+ if ((STACK_DIR > 0 && hp->h.deep > depth)
+ || (STACK_DIR < 0 && hp->h.deep < depth))
+ {
+ register header *np = hp->h.next;
+
+ free ((pointer) hp); /* Collect garbage. */
+
+ hp = np; /* -> next header. */
+ }
+ else
+ break; /* Rest are not deeper. */
+
+ last_alloca_header = hp; /* -> last valid storage. */
+ }
+
+ if (size == 0)
+ return NULL; /* No allocation required. */
+
+ /* Allocate combined header + user data storage. */
+
+ {
+ register pointer new = malloc (sizeof (header) + size);
+ /* Address of header. */
+
+ ((header *) new)->h.next = last_alloca_header;
+ ((header *) new)->h.deep = depth;
+
+ last_alloca_header = (header *) new;
+
+ /* User storage begins just after header. */
+
+ return (pointer) ((char *) new + sizeof (header));
+ }
+}
+
+#if defined (CRAY) && defined (CRAY_STACKSEG_END)
+
+#ifdef DEBUG_I00AFUNC
+#include <stdio.h>
+#endif
+
+#ifndef CRAY_STACK
+#define CRAY_STACK
+#ifndef CRAY2
+/* Stack structures for CRAY-1, CRAY X-MP, and CRAY Y-MP */
+struct stack_control_header
+ {
+ long shgrow:32; /* Number of times stack has grown. */
+ long shaseg:32; /* Size of increments to stack. */
+ long shhwm:32; /* High water mark of stack. */
+ long shsize:32; /* Current size of stack (all segments). */
+ };
+
+/* The stack segment linkage control information occurs at
+ the high-address end of a stack segment. (The stack
+ grows from low addresses to high addresses.) The initial
+ part of the stack segment linkage control information is
+ 0200 (octal) words. This provides for register storage
+ for the routine which overflows the stack. */
+
+struct stack_segment_linkage
+ {
+ long ss[0200]; /* 0200 overflow words. */
+ long sssize:32; /* Number of words in this segment. */
+ long ssbase:32; /* Offset to stack base. */
+ long:32;
+ long sspseg:32; /* Offset to linkage control of previous
+ segment of stack. */
+ long:32;
+ long sstcpt:32; /* Pointer to task common address block. */
+ long sscsnm; /* Private control structure number for
+ microtasking. */
+ long ssusr1; /* Reserved for user. */
+ long ssusr2; /* Reserved for user. */
+ long sstpid; /* Process ID for pid based multi-tasking. */
+ long ssgvup; /* Pointer to multitasking thread giveup. */
+ long sscray[7]; /* Reserved for Cray Research. */
+ long ssa0;
+ long ssa1;
+ long ssa2;
+ long ssa3;
+ long ssa4;
+ long ssa5;
+ long ssa6;
+ long ssa7;
+ long sss0;
+ long sss1;
+ long sss2;
+ long sss3;
+ long sss4;
+ long sss5;
+ long sss6;
+ long sss7;
+ };
+
+#else /* CRAY2 */
+/* The following structure defines the vector of words
+ returned by the STKSTAT library routine. */
+struct stk_stat
+ {
+ long now; /* Current total stack size. */
+ long maxc; /* Amount of contiguous space which would
+ be required to satisfy the maximum
+ stack demand to date. */
+ long high_water; /* Stack high-water mark. */
+ long overflows; /* Number of stack overflow ($STKOFEN) calls. */
+ long hits; /* Number of internal buffer hits. */
+ long extends; /* Number of block extensions. */
+ long stko_mallocs; /* Block allocations by $STKOFEN. */
+ long underflows; /* Number of stack underflow calls ($STKRETN). */
+ long stko_free; /* Number of deallocations by $STKRETN. */
+ long stkm_free; /* Number of deallocations by $STKMRET. */
+ long segments; /* Current number of stack segments. */
+ long maxs; /* Maximum number of stack segments so far. */
+ long pad_size; /* Stack pad size. */
+ long current_address; /* Current stack segment address. */
+ long current_size; /* Current stack segment size. This
+ number is actually corrupted by STKSTAT to
+ include the fifteen word trailer area. */
+ long initial_address; /* Address of initial segment. */
+ long initial_size; /* Size of initial segment. */
+ };
+
+/* The following structure describes the data structure which trails
+ any stack segment. I think that the description in 'asdef' is
+ out of date. I only describe the parts that I am sure about. */
+
+struct stk_trailer
+ {
+ long this_address; /* Address of this block. */
+ long this_size; /* Size of this block (does not include
+ this trailer). */
+ long unknown2;
+ long unknown3;
+ long link; /* Address of trailer block of previous
+ segment. */
+ long unknown5;
+ long unknown6;
+ long unknown7;
+ long unknown8;
+ long unknown9;
+ long unknown10;
+ long unknown11;
+ long unknown12;
+ long unknown13;
+ long unknown14;
+ };
+
+#endif /* CRAY2 */
+#endif /* not CRAY_STACK */
+
+#ifdef CRAY2
+/* Determine a "stack measure" for an arbitrary ADDRESS.
+ I doubt that "lint" will like this much. */
+
+static long
+i00afunc (long *address)
+{
+ struct stk_stat status;
+ struct stk_trailer *trailer;
+ long *block, size;
+ long result = 0;
+
+ /* We want to iterate through all of the segments. The first
+ step is to get the stack status structure. We could do this
+ more quickly and more directly, perhaps, by referencing the
+ $LM00 common block, but I know that this works. */
+
+ STKSTAT (&status);
+
+ /* Set up the iteration. */
+
+ trailer = (struct stk_trailer *) (status.current_address
+ + status.current_size
+ - 15);
+
+ /* There must be at least one stack segment. Therefore it is
+ a fatal error if "trailer" is null. */
+
+ if (trailer == 0)
+ abort ();
+
+ /* Discard segments that do not contain our argument address. */
+
+ while (trailer != 0)
+ {
+ block = (long *) trailer->this_address;
+ size = trailer->this_size;
+ if (block == 0 || size == 0)
+ abort ();
+ trailer = (struct stk_trailer *) trailer->link;
+ if ((block <= address) && (address < (block + size)))
+ break;
+ }
+
+ /* Set the result to the offset in this segment and add the sizes
+ of all predecessor segments. */
+
+ result = address - block;
+
+ if (trailer == 0)
+ {
+ return result;
+ }
+
+ do
+ {
+ if (trailer->this_size <= 0)
+ abort ();
+ result += trailer->this_size;
+ trailer = (struct stk_trailer *) trailer->link;
+ }
+ while (trailer != 0);
+
+ /* We are done. Note that if you present a bogus address (one
+ not in any segment), you will get a different number back, formed
+ from subtracting the address of the first block. This is probably
+ not what you want. */
+
+ return (result);
+}
+
+#else /* not CRAY2 */
+/* Stack address function for a CRAY-1, CRAY X-MP, or CRAY Y-MP.
+ Determine the number of the cell within the stack,
+ given the address of the cell. The purpose of this
+ routine is to linearize, in some sense, stack addresses
+ for alloca. */
+
+static long
+i00afunc (long address)
+{
+ long stkl = 0;
+
+ long size, pseg, this_segment, stack;
+ long result = 0;
+
+ struct stack_segment_linkage *ssptr;
+
+ /* Register B67 contains the address of the end of the
+ current stack segment. If you (as a subprogram) store
+ your registers on the stack and find that you are past
+ the contents of B67, you have overflowed the segment.
+
+ B67 also points to the stack segment linkage control
+ area, which is what we are really interested in. */
+
+ /* This might be _getb67() or GETB67 () or getb67 () */
+ stkl = CRAY_STACKSEG_END ();
+ ssptr = (struct stack_segment_linkage *) stkl;
+
+ /* If one subtracts 'size' from the end of the segment,
+ one has the address of the first word of the segment.
+
+ If this is not the first segment, 'pseg' will be
+ nonzero. */
+
+ pseg = ssptr->sspseg;
+ size = ssptr->sssize;
+
+ this_segment = stkl - size;
+
+ /* It is possible that calling this routine itself caused
+ a stack overflow. Discard stack segments which do not
+ contain the target address. */
+
+ while (!(this_segment <= address && address <= stkl))
+ {
+#ifdef DEBUG_I00AFUNC
+ fprintf (stderr, "%011o %011o %011o\n", this_segment, address, stkl);
+#endif
+ if (pseg == 0)
+ break;
+ stkl = stkl - pseg;
+ ssptr = (struct stack_segment_linkage *) stkl;
+ size = ssptr->sssize;
+ pseg = ssptr->sspseg;
+ this_segment = stkl - size;
+ }
+
+ result = address - this_segment;
+
+ /* If you subtract pseg from the current end of the stack,
+ you get the address of the previous stack segment's end.
+ This seems a little convoluted to me, but I'll bet you save
+ a cycle somewhere. */
+
+ while (pseg != 0)
+ {
+#ifdef DEBUG_I00AFUNC
+ fprintf (stderr, "%011o %011o\n", pseg, size);
+#endif
+ stkl = stkl - pseg;
+ ssptr = (struct stack_segment_linkage *) stkl;
+ size = ssptr->sssize;
+ pseg = ssptr->sspseg;
+ result += size;
+ }
+ return (result);
+}
+
+#endif /* not CRAY2 */
+#endif /* CRAY && CRAY_STACKSEG_END */
+
+#endif /* no alloca */
+#endif /* !__GNUC__ || __GNUC__ < 2 */
diff --git a/lib/malloc/getpagesize.h b/lib/malloc/getpagesize.h
new file mode 100644
index 0000000..a59eabe
--- /dev/null
+++ b/lib/malloc/getpagesize.h
@@ -0,0 +1,60 @@
+/* Emulation of getpagesize() for systems that need it.
+ Copyright (C) 1991-2003 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#if defined (HAVE_UNISTD_H)
+# ifdef _MINIX
+# include <sys/types.h>
+# endif
+# include <unistd.h>
+# if defined (_SC_PAGESIZE)
+# define getpagesize() sysconf(_SC_PAGESIZE)
+# else
+# if defined (_SC_PAGE_SIZE)
+# define getpagesize() sysconf(_SC_PAGE_SIZE)
+# endif /* _SC_PAGE_SIZE */
+# endif /* _SC_PAGESIZE */
+#endif
+
+#if !defined (getpagesize)
+# if defined (HAVE_SYS_PARAM_H)
+# include <sys/param.h>
+# endif
+# if defined (PAGESIZE)
+# define getpagesize() PAGESIZE
+# else /* !PAGESIZE */
+# if defined (EXEC_PAGESIZE)
+# define getpagesize() EXEC_PAGESIZE
+# else /* !EXEC_PAGESIZE */
+# if defined (NBPG)
+# if !defined (CLSIZE)
+# define CLSIZE 1
+# endif /* !CLSIZE */
+# define getpagesize() (NBPG * CLSIZE)
+# else /* !NBPG */
+# if defined (NBPC)
+# define getpagesize() NBPC
+# endif /* NBPC */
+# endif /* !NBPG */
+# endif /* !EXEC_PAGESIZE */
+# endif /* !PAGESIZE */
+#endif /* !getpagesize */
+
+#if !defined (getpagesize)
+# define getpagesize() 4096 /* Just punt and use reasonable value */
+#endif
diff --git a/lib/malloc/i386-alloca.s b/lib/malloc/i386-alloca.s
new file mode 100644
index 0000000..01b2cfe
--- /dev/null
+++ b/lib/malloc/i386-alloca.s
@@ -0,0 +1,16 @@
+ .file "alloca.s"
+ .text
+ .align 4
+ .def alloca; .val alloca; .scl 2; .type 044; .endef
+ .globl alloca
+alloca:
+ popl %edx
+ popl %eax
+ addl $3,%eax
+ andl $0xfffffffc,%eax
+ subl %eax,%esp
+ movl %esp,%eax
+ pushl %eax
+ pushl %edx
+ ret
+ .def alloca; .val .; .scl -1; .endef
diff --git a/lib/malloc/imalloc.h b/lib/malloc/imalloc.h
new file mode 100644
index 0000000..d07adac
--- /dev/null
+++ b/lib/malloc/imalloc.h
@@ -0,0 +1,173 @@
+/* imalloc.h -- internal malloc definitions shared by source files. */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* Must be included *after* config.h */
+
+#ifndef _IMALLOC_H
+#define _IMALLOC_H
+
+#ifdef MALLOC_DEBUG
+#define MALLOC_STATS
+#define MALLOC_TRACE
+#define MALLOC_REGISTER
+#define MALLOC_WATCH
+#endif
+
+#define MALLOC_WRAPFUNCS
+
+/* Generic pointer type. */
+#ifndef PTR_T
+# if defined (__STDC__)
+# define PTR_T void *
+# else
+# define PTR_T char *
+# endif
+#endif
+
+#if !defined (NULL)
+# define NULL 0
+#endif
+
+#if !defined (CPP_STRING)
+# if defined (HAVE_STRINGIZE)
+# define CPP_STRING(x) #x
+# else
+# define CPP_STRING(x) "x"
+# endif /* !HAVE_STRINGIZE */
+#endif /* !__STRING */
+
+#if __GNUC__ > 1
+# define FASTCOPY(s, d, n) __builtin_memcpy (d, s, n)
+#else /* !__GNUC__ */
+# if !defined (HAVE_BCOPY)
+# if !defined (HAVE_MEMMOVE)
+# define FASTCOPY(s, d, n) memcpy (d, s, n)
+# else
+# define FASTCOPY(s, d, n) memmove (d, s, n)
+# endif /* !HAVE_MEMMOVE */
+# else /* HAVE_BCOPY */
+# define FASTCOPY(s, d, n) bcopy (s, d, n)
+# endif /* HAVE_BCOPY */
+#endif /* !__GNUC__ */
+
+#if !defined (PARAMS)
+# if defined (__STDC__) || defined (__GNUC__) || defined (__cplusplus) || defined (PROTOTYPES)
+# define PARAMS(protos) protos
+# else
+# define PARAMS(protos) ()
+# endif
+#endif
+
+/* Use Duff's device for good zeroing/copying performance. DO NOT call the
+ Duff's device macros with NBYTES == 0. */
+
+#define MALLOC_BZERO(charp, nbytes) \
+do { \
+ if ((nbytes) <= 32) { \
+ size_t * mzp = (size_t *)(charp); \
+ unsigned long mctmp = (nbytes)/sizeof(size_t); \
+ long mcn; \
+ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp &= 7; } \
+ switch (mctmp) { \
+ case 0: for(;;) { *mzp++ = 0; \
+ case 7: *mzp++ = 0; \
+ case 6: *mzp++ = 0; \
+ case 5: *mzp++ = 0; \
+ case 4: *mzp++ = 0; \
+ case 3: *mzp++ = 0; \
+ case 2: *mzp++ = 0; \
+ case 1: *mzp++ = 0; if(mcn <= 0) break; mcn--; } \
+ } \
+ else \
+ memset ((charp), 0, (nbytes)); \
+} while(0)
+
+#define MALLOC_ZERO(charp, nbytes) \
+do { \
+ size_t mzsz = (nbytes); \
+ if (mzsz <= 9 * sizeof(mzsz) { \
+ size_t *mz = (size_t *)(charp); \
+ if(mzsz >= 5*sizeof(mzsz)) { *mz++ = 0; \
+ *mz++ = 0; \
+ if(mzsz >= 7*sizeof(mzsz)) { *mz++ = 0; \
+ *mz++ = 0; \
+ if(mzsz >= 9*sizeof(mzsz)) { *mz++ = 0; \
+ *mz++ = 0; }}} \
+ *mz++ = 0; \
+ *mz++ = 0; \
+ *mz = 0; \
+ } else \
+ memset ((charp), 0, mzsz); \
+} while (0)
+
+#define MALLOC_MEMSET(charp, xch, nbytes) \
+do { \
+ if ((nbytes) <= 32) { \
+ register char * mzp = (charp); \
+ unsigned long mctmp = (nbytes); \
+ register long mcn; \
+ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp &= 7; } \
+ switch (mctmp) { \
+ case 0: for(;;) { *mzp++ = xch; \
+ case 7: *mzp++ = xch; \
+ case 6: *mzp++ = xch; \
+ case 5: *mzp++ = xch; \
+ case 4: *mzp++ = xch; \
+ case 3: *mzp++ = xch; \
+ case 2: *mzp++ = xch; \
+ case 1: *mzp++ = xch; if(mcn <= 0) break; mcn--; } \
+ } \
+ } else \
+ memset ((charp), (xch), (nbytes)); \
+} while(0)
+
+#define MALLOC_MEMCPY(dest,src,nbytes) \
+do { \
+ if ((nbytes) <= 32) { \
+ size_t* mcsrc = (size_t*) src; \
+ size_t* mcdst = (size_t*) dest; \
+ unsigned long mctmp = (nbytes)/sizeof(size_t); \
+ long mcn; \
+ if (mctmp < 8) mcn = 0; else { mcn = (mctmp-1)/8; mctmp &= 7; } \
+ switch (mctmp) { \
+ case 0: for(;;) { *mcdst++ = *mcsrc++; \
+ case 7: *mcdst++ = *mcsrc++; \
+ case 6: *mcdst++ = *mcsrc++; \
+ case 5: *mcdst++ = *mcsrc++; \
+ case 4: *mcdst++ = *mcsrc++; \
+ case 3: *mcdst++ = *mcsrc++; \
+ case 2: *mcdst++ = *mcsrc++; \
+ case 1: *mcdst++ = *mcsrc++; if(mcn <= 0) break; mcn--; } \
+ } else \
+ memcpy ((dest), (src), (nbytes)) \
+} while(0)
+
+#if defined (SHELL)
+# include "bashintl.h"
+#else
+# define _(x) x
+#endif
+
+#include <signal.h>
+
+extern void _malloc_block_signals PARAMS((sigset_t *, sigset_t *));
+extern void _malloc_unblock_signals PARAMS((sigset_t *, sigset_t *));
+
+#endif /* _IMALLOC_H */
diff --git a/lib/malloc/malloc.c b/lib/malloc/malloc.c
new file mode 100644
index 0000000..439f8ef
--- /dev/null
+++ b/lib/malloc/malloc.c
@@ -0,0 +1,1481 @@
+/* malloc.c - dynamic memory allocation for bash. */
+
+/* Copyright (C) 1985-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ * @(#)nmalloc.c 1 (Caltech) 2/21/82
+ *
+ * U of M Modified: 20 Jun 1983 ACT: strange hacks for Emacs
+ *
+ * Nov 1983, Mike@BRL, Added support for 4.1C/4.2 BSD.
+ *
+ * [VERY] old explanation:
+ *
+ * This is a very fast storage allocator. It allocates blocks of a small
+ * number of different sizes, and keeps free lists of each size. Blocks
+ * that don't exactly fit are passed up to the next larger size. In this
+ * implementation, the available sizes are (2^n)-4 (or -16) bytes long.
+ * This is designed for use in a program that uses vast quantities of
+ * memory, but bombs when it runs out. To make it a little better, it
+ * warns the user when he starts to get near the end.
+ *
+ * June 84, ACT: modified rcheck code to check the range given to malloc,
+ * rather than the range determined by the 2-power used.
+ *
+ * Jan 85, RMS: calls malloc_warning to issue warning on nearly full.
+ * No longer Emacs-specific; can serve as all-purpose malloc for GNU.
+ * You should call malloc_init to reinitialize after loading dumped Emacs.
+ * Call malloc_stats to get info on memory stats if MALLOC_STATS turned on.
+ * realloc knows how to return same block given, just changing its size,
+ * if the power of 2 is correct.
+ */
+
+/*
+ * nextf[i] is the pointer to the next free block of size 2^(i+3). The
+ * smallest allocatable block is 8 bytes. The overhead information will
+ * go in the first int of the block, and the returned pointer will point
+ * to the second.
+ */
+
+/* Define MEMSCRAMBLE to have free() write 0xcf into memory as it's freed, to
+ uncover callers that refer to freed memory, and to have malloc() write 0xdf
+ into memory as it's allocated to avoid referring to previous contents. */
+
+/* SCO 3.2v4 getcwd and possibly other libc routines fail with MEMSCRAMBLE;
+ handled by configure. */
+
+#if defined (HAVE_CONFIG_H)
+# include <config.h>
+#endif /* HAVE_CONFIG_H */
+
+#if defined (SHELL)
+# include "bashtypes.h"
+# include "stdc.h"
+#else
+# include <sys/types.h>
+#endif
+
+#if defined (HAVE_UNISTD_H)
+# include <unistd.h>
+#endif
+
+/* Determine which kind of system this is. */
+#include <signal.h>
+
+#if defined (HAVE_STRING_H)
+# include <string.h>
+#else
+# include <strings.h>
+#endif
+#include <errno.h>
+#include <stdio.h>
+
+#if !defined (botch)
+#include <stdlib.h>
+#endif
+
+#if defined (HAVE_MMAP)
+#include <sys/mman.h>
+#endif
+
+/* Define getpagesize () if the system does not. */
+#ifndef HAVE_GETPAGESIZE
+# include "getpagesize.h"
+#endif
+
+#include "imalloc.h"
+#ifdef MALLOC_STATS
+# include "mstats.h"
+#endif
+#ifdef MALLOC_REGISTER
+# include "table.h"
+#endif
+#ifdef MALLOC_WATCH
+# include "watch.h"
+#endif
+
+#ifdef powerof2
+# undef powerof2
+#endif
+/* Could also use (((x) & -(x)) == (x)) */
+#define powerof2(x) ((((x) - 1) & (x)) == 0)
+
+/* System-specific omissions. */
+#ifdef HPUX
+# define NO_VALLOC
+#endif
+
+/* SIZEOF_LONG * 4 - 2, usable bins from 1..NBUCKETS-1 */
+#define NBUCKETS 30
+
+#define ISALLOC ((char) 0xf7) /* magic byte that implies allocation */
+#define ISFREE ((char) 0x54) /* magic byte that implies free block */
+ /* this is for error checking only */
+#define ISMEMALIGN ((char) 0xd6) /* Stored before the value returned by
+ memalign, with the rest of the word
+ being the distance to the true
+ beginning of the block. */
+
+
+/* We have a flag indicating whether memory is allocated, an index in
+ nextf[], a size field, and a sentinel value to determine whether or
+ not a caller wrote before the start of allocated memory; to realloc()
+ memory we either copy mh_nbytes or just change mh_nbytes if there is
+ enough room in the block for the new size. Range checking is always
+ done. */
+union mhead {
+#if SIZEOF_CHAR_P == 8
+ bits64_t mh_align[2]; /* 16 */
+#else
+ bits64_t mh_align; /* 8 */
+#endif
+ struct {
+ char mi_alloc; /* ISALLOC or ISFREE */ /* 1 */
+ char mi_index; /* index in nextf[] */ /* 1 */
+ /* Remainder are valid only when block is allocated */
+ u_bits16_t mi_magic2; /* should be == MAGIC2 */ /* 2 */
+ u_bits32_t mi_nbytes; /* # of bytes allocated */ /* 4 */
+#if SIZEOF_CHAR_P == 8
+ char mi_magic8[8]; /* MAGIC1 guard bytes */ /* 8 */
+#endif
+ } minfo;
+};
+#define mh_alloc minfo.mi_alloc
+#define mh_index minfo.mi_index
+#define mh_nbytes minfo.mi_nbytes
+#define mh_magic2 minfo.mi_magic2
+#define mh_magic8 minfo.mi_magic8
+
+#define MOVERHEAD sizeof(union mhead)
+
+#if SIZEOF_CHAR_P == 8
+#define MALIGN_MASK 15
+#else
+#define MALIGN_MASK 7 /* one less than desired alignment */
+#endif
+
+typedef union _malloc_guard {
+ char s[4];
+ u_bits32_t i;
+} mguard_t;
+
+/* Access free-list pointer of a block.
+ It is stored at block + sizeof (char *).
+ This is not a field in the minfo structure member of union mhead
+ because we want sizeof (union mhead)
+ to describe the overhead for when the block is in use,
+ and we do not want the free-list pointer to count in that. */
+
+/* If SIZEOF_CHAR_P == 8, this goes into the mh_magic8 buffer at the end of
+ the rest of the struct. This may need adjusting. */
+#define CHAIN(a) \
+ (*(union mhead **) (sizeof (char *) + (char *) (a)))
+
+/* To implement range checking, we write magic values in at the beginning
+ and end of each allocated block, and make sure they are undisturbed
+ whenever a free or a realloc occurs. */
+
+/* Written in the bytes before the block's real space (-SIZEOF_CHAR_P bytes) */
+#define MAGIC1 0x55
+#define MAGIC2 0x5555
+#define MSLOP 4 /* 4 bytes extra for u_bits32_t size */
+
+/* How many bytes are actually allocated for a request of size N --
+ rounded up to nearest multiple of 2*SIZEOF_CHAR_P after accounting for
+ malloc overhead. */
+#define ALLOCATED_BYTES(n) \
+ (((n) + MOVERHEAD + MSLOP + MALIGN_MASK) & ~MALIGN_MASK)
+
+#define ASSERT(p) \
+ do \
+ { \
+ if (!(p)) xbotch((PTR_T)0, ERR_ASSERT_FAILED, CPP_STRING(p), file, line); \
+ } \
+ while (0)
+
+/* Minimum and maximum bucket indices for block splitting (and to bound
+ the search for a block to split). */
+#define SPLIT_MIN 2 /* XXX - was 3 */
+#define SPLIT_MID 11
+#define SPLIT_MAX 14
+
+/* Minimum and maximum bucket indices for block coalescing. */
+#define COMBINE_MIN 2
+#define COMBINE_MAX (pagebucket - 1) /* XXX */
+
+#define LESSCORE_MIN 10
+#define LESSCORE_FRC 13
+
+#define STARTBUCK 1
+
+/* Should we use mmap for large allocations? */
+#if defined (HAVE_MMAP)
+# if defined (MAP_ANON) && !defined (MAP_ANONYMOUS)
+# define MAP_ANONYMOUS MAP_ANON
+# endif
+#endif
+
+#if defined (HAVE_MMAP) && defined (MAP_ANONYMOUS)
+# define USE_MMAP
+#endif
+
+#if defined (USE_MMAP)
+# define MMAP_THRESHOLD 14 /* must be >= SPLIT_MAX, COMBINE_MAX */
+#else
+# define MMAP_THRESHOLD (8 * SIZEOF_LONG)
+#endif
+
+/* Flags for the internal functions. */
+#define MALLOC_WRAPPER 0x01 /* wrapper function */
+#define MALLOC_INTERNAL 0x02 /* internal function calling another */
+#define MALLOC_NOTRACE 0x04 /* don't trace this allocation or free */
+#define MALLOC_NOREG 0x08 /* don't register this allocation or free */
+
+/* Future use. */
+#define ERR_DUPFREE 0x01
+#define ERR_UNALLOC 0x02
+#define ERR_UNDERFLOW 0x04
+#define ERR_ASSERT_FAILED 0x08
+
+/* Evaluates to true if NB is appropriate for bucket NU. NB is adjusted
+ appropriately by the caller to account for malloc overhead. This only
+ checks that the recorded size is not too big for the bucket. We
+ can't check whether or not it's in between NU and NU-1 because we
+ might have encountered a busy bucket when allocating and moved up to
+ the next size. */
+#define IN_BUCKET(nb, nu) ((nb) <= binsizes[(nu)])
+
+/* Use this when we want to be sure that NB is in bucket NU. */
+#define RIGHT_BUCKET(nb, nu) \
+ (((nb) > binsizes[(nu)-1]) && ((nb) <= binsizes[(nu)]))
+
+/* nextf[i] is free list of blocks of size 2**(i + 3) */
+
+static union mhead *nextf[NBUCKETS];
+
+/* busy[i] is nonzero while allocation or free of block size i is in progress. */
+
+static char busy[NBUCKETS];
+
+static int pagesz; /* system page size. */
+static int pagebucket; /* bucket for requests a page in size */
+static int maxbuck; /* highest bucket receiving allocation request. */
+
+static char *memtop; /* top of heap */
+
+static const unsigned long binsizes[NBUCKETS] = {
+ 8UL, 16UL, 32UL, 64UL, 128UL, 256UL, 512UL, 1024UL, 2048UL, 4096UL,
+ 8192UL, 16384UL, 32768UL, 65536UL, 131072UL, 262144UL, 524288UL,
+ 1048576UL, 2097152UL, 4194304UL, 8388608UL, 16777216UL, 33554432UL,
+ 67108864UL, 134217728UL, 268435456UL, 536870912UL, 1073741824UL,
+ 2147483648UL, 4294967295UL
+};
+
+/* binsizes[x] == (1 << ((x) + 3)) */
+#define binsize(x) binsizes[(x)]
+
+#if !defined (errno)
+extern int errno;
+#endif
+
+/* Declarations for internal functions */
+static PTR_T internal_malloc PARAMS((size_t, const char *, int, int));
+static PTR_T internal_realloc PARAMS((PTR_T, size_t, const char *, int, int));
+static void internal_free PARAMS((PTR_T, const char *, int, int));
+static PTR_T internal_memalign PARAMS((size_t, size_t, const char *, int, int));
+#ifndef NO_CALLOC
+static PTR_T internal_calloc PARAMS((size_t, size_t, const char *, int, int));
+static void internal_cfree PARAMS((PTR_T, const char *, int, int));
+#endif
+#ifndef NO_VALLOC
+static PTR_T internal_valloc PARAMS((size_t, const char *, int, int));
+#endif
+
+#if defined (botch)
+extern void botch ();
+#else
+static void botch PARAMS((const char *, const char *, int));
+#endif
+static void xbotch PARAMS((PTR_T, int, const char *, const char *, int));
+
+#if !HAVE_DECL_SBRK
+extern char *sbrk ();
+#endif /* !HAVE_DECL_SBRK */
+
+#ifdef SHELL
+extern int running_trap;
+extern int signal_is_trapped PARAMS((int));
+#endif
+
+#ifdef MALLOC_STATS
+struct _malstats _mstats;
+#endif /* MALLOC_STATS */
+
+/* Debugging variables available to applications. */
+int malloc_flags = 0; /* future use */
+int malloc_trace = 0; /* trace allocations and frees to stderr */
+int malloc_register = 0; /* future use */
+
+/* Use a variable in case we want to dynamically adapt it in the future */
+int malloc_mmap_threshold = MMAP_THRESHOLD;
+
+#ifdef MALLOC_TRACE
+char _malloc_trace_buckets[NBUCKETS];
+
+/* These should really go into a header file. */
+extern void mtrace_alloc PARAMS((const char *, PTR_T, size_t, const char *, int));
+extern void mtrace_free PARAMS((PTR_T, int, const char *, int));
+#endif
+
+#if !defined (botch)
+static void
+botch (s, file, line)
+ const char *s;
+ const char *file;
+ int line;
+{
+ fprintf (stderr, _("malloc: failed assertion: %s\n"), s);
+ (void)fflush (stderr);
+ abort ();
+}
+#endif
+
+/* print the file and line number that caused the assertion failure and
+ call botch() to do whatever the application wants with the information */
+static void
+xbotch (mem, e, s, file, line)
+ PTR_T mem;
+ int e;
+ const char *s;
+ const char *file;
+ int line;
+{
+ fprintf (stderr, _("\r\nmalloc: %s:%d: assertion botched\r\n"),
+ file ? file : _("unknown"), line);
+#ifdef MALLOC_REGISTER
+ if (mem != NULL && malloc_register)
+ mregister_describe_mem (mem, stderr);
+#endif
+ (void)fflush (stderr);
+ botch(s, file, line);
+}
+
+/* Coalesce two adjacent free blocks off the free list for size NU - 1,
+ as long as we can find two adjacent free blocks. nextf[NU -1] is
+ assumed to not be busy; the caller (morecore()) checks for this.
+ BUSY[NU] must be set to 1. */
+static void
+bcoalesce (nu)
+ register int nu;
+{
+ register union mhead *mp, *mp1, *mp2;
+ register int nbuck;
+ unsigned long siz;
+
+ nbuck = nu - 1;
+ if (nextf[nbuck] == 0 || busy[nbuck])
+ return;
+
+ busy[nbuck] = 1;
+ siz = binsize (nbuck);
+
+ mp2 = mp1 = nextf[nbuck];
+ mp = CHAIN (mp1);
+ while (mp && mp != (union mhead *)((char *)mp1 + siz))
+ {
+ mp2 = mp1;
+ mp1 = mp;
+ mp = CHAIN (mp);
+ }
+
+ if (mp == 0)
+ {
+ busy[nbuck] = 0;
+ return;
+ }
+
+ /* OK, now we have mp1 pointing to the block we want to add to nextf[NU].
+ CHAIN(mp2) must equal mp1. Check that mp1 and mp are adjacent. */
+ if (mp2 != mp1 && CHAIN(mp2) != mp1)
+ {
+ busy[nbuck] = 0;
+ xbotch ((PTR_T)0, 0, "bcoalesce: CHAIN(mp2) != mp1", (char *)NULL, 0);
+ }
+
+#ifdef MALLOC_DEBUG
+ if (CHAIN (mp1) != (union mhead *)((char *)mp1 + siz))
+ {
+ busy[nbuck] = 0;
+ return; /* not adjacent */
+ }
+#endif
+
+ /* Since they are adjacent, remove them from the free list */
+ if (mp1 == nextf[nbuck])
+ nextf[nbuck] = CHAIN (mp);
+ else
+ CHAIN (mp2) = CHAIN (mp);
+ busy[nbuck] = 0;
+
+#ifdef MALLOC_STATS
+ _mstats.tbcoalesce++;
+ _mstats.ncoalesce[nbuck]++;
+#endif
+
+ /* And add the combined two blocks to nextf[NU]. */
+ mp1->mh_alloc = ISFREE;
+ mp1->mh_index = nu;
+ CHAIN (mp1) = nextf[nu];
+ nextf[nu] = mp1;
+}
+
+/* Split a block at index > NU (but less than SPLIT_MAX) into a set of
+ blocks of the correct size, and attach them to nextf[NU]. nextf[NU]
+ is assumed to be empty. Must be called with signals blocked (e.g.,
+ by morecore()). BUSY[NU] must be set to 1. */
+static void
+bsplit (nu)
+ register int nu;
+{
+ register union mhead *mp;
+ int nbuck, nblks, split_max;
+ unsigned long siz;
+
+ split_max = (maxbuck > SPLIT_MAX) ? maxbuck : SPLIT_MAX;
+
+ if (nu >= SPLIT_MID)
+ {
+ for (nbuck = split_max; nbuck > nu; nbuck--)
+ {
+ if (busy[nbuck] || nextf[nbuck] == 0)
+ continue;
+ break;
+ }
+ }
+ else
+ {
+ for (nbuck = nu + 1; nbuck <= split_max; nbuck++)
+ {
+ if (busy[nbuck] || nextf[nbuck] == 0)
+ continue;
+ break;
+ }
+ }
+
+ if (nbuck > split_max || nbuck <= nu)
+ return;
+
+ /* XXX might want to split only if nextf[nbuck] has >= 2 blocks free
+ and nbuck is below some threshold. */
+
+ /* Remove the block from the chain of larger blocks. */
+ busy[nbuck] = 1;
+ mp = nextf[nbuck];
+ nextf[nbuck] = CHAIN (mp);
+ busy[nbuck] = 0;
+
+#ifdef MALLOC_STATS
+ _mstats.tbsplit++;
+ _mstats.nsplit[nbuck]++;
+#endif
+
+ /* Figure out how many blocks we'll get. */
+ siz = binsize (nu);
+ nblks = binsize (nbuck) / siz;
+
+ /* Split the block and put it on the requested chain. */
+ nextf[nu] = mp;
+ while (1)
+ {
+ mp->mh_alloc = ISFREE;
+ mp->mh_index = nu;
+ if (--nblks <= 0) break;
+ CHAIN (mp) = (union mhead *)((char *)mp + siz);
+ mp = (union mhead *)((char *)mp + siz);
+ }
+ CHAIN (mp) = 0;
+}
+
+/* Take the memory block MP and add it to a chain < NU. NU is the right bucket,
+ but is busy. This avoids memory orphaning. */
+static void
+xsplit (mp, nu)
+ union mhead *mp;
+ int nu;
+{
+ union mhead *nh;
+ int nbuck, nblks, split_max;
+ unsigned long siz;
+
+ nbuck = nu - 1;
+ while (nbuck >= SPLIT_MIN && busy[nbuck])
+ nbuck--;
+ if (nbuck < SPLIT_MIN)
+ return;
+
+#ifdef MALLOC_STATS
+ _mstats.tbsplit++;
+ _mstats.nsplit[nu]++;
+#endif
+
+ /* Figure out how many blocks we'll get. */
+ siz = binsize (nu); /* original block size */
+ nblks = siz / binsize (nbuck); /* should be 2 most of the time */
+
+ /* And add it to nextf[nbuck] */
+ siz = binsize (nbuck); /* XXX - resetting here */
+ nh = mp;
+ while (1)
+ {
+ mp->mh_alloc = ISFREE;
+ mp->mh_index = nbuck;
+ if (--nblks <= 0) break;
+ CHAIN (mp) = (union mhead *)((char *)mp + siz);
+ mp = (union mhead *)((char *)mp + siz);
+ }
+ busy[nbuck] = 1;
+ CHAIN (mp) = nextf[nbuck];
+ nextf[nbuck] = nh;
+ busy[nbuck] = 0;
+}
+
+void
+_malloc_block_signals (setp, osetp)
+ sigset_t *setp, *osetp;
+{
+#ifdef HAVE_POSIX_SIGNALS
+ sigfillset (setp);
+ sigemptyset (osetp);
+ sigprocmask (SIG_BLOCK, setp, osetp);
+#else
+# if defined (HAVE_BSD_SIGNALS)
+ *osetp = sigsetmask (-1);
+# endif
+#endif
+}
+
+void
+_malloc_unblock_signals (setp, osetp)
+ sigset_t *setp, *osetp;
+{
+#ifdef HAVE_POSIX_SIGNALS
+ sigprocmask (SIG_SETMASK, osetp, (sigset_t *)NULL);
+#else
+# if defined (HAVE_BSD_SIGNALS)
+ sigsetmask (*osetp);
+# endif
+#endif
+}
+
+/* Return some memory to the system by reducing the break. This is only
+ called with NU > pagebucket, so we're always assured of giving back
+ more than one page of memory. */
+static void
+lesscore (nu) /* give system back some memory */
+ register int nu; /* size index we're discarding */
+{
+ long siz;
+
+ siz = binsize (nu);
+ /* Should check for errors here, I guess. */
+ sbrk (-siz);
+ memtop -= siz;
+
+#ifdef MALLOC_STATS
+ _mstats.nsbrk++;
+ _mstats.tsbrk -= siz;
+ _mstats.nlesscore[nu]++;
+#endif
+}
+
+/* Ask system for more memory; add to NEXTF[NU]. BUSY[NU] must be set to 1. */
+static void
+morecore (nu)
+ register int nu; /* size index to get more of */
+{
+ register union mhead *mp;
+ register int nblks;
+ register long siz;
+ long sbrk_amt; /* amount to get via sbrk() */
+ sigset_t set, oset;
+ int blocked_sigs;
+
+ /* Block all signals in case we are executed from a signal handler. */
+ blocked_sigs = 0;
+#ifdef SHELL
+# if defined (SIGCHLD)
+ if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
+# else
+ if (running_trap || signal_is_trapped (SIGINT))
+# endif
+#endif
+ {
+ _malloc_block_signals (&set, &oset);
+ blocked_sigs = 1;
+ }
+
+ siz = binsize (nu); /* size of desired block for nextf[nu] */
+
+ if (siz < 0)
+ goto morecore_done; /* oops */
+
+#ifdef MALLOC_STATS
+ _mstats.nmorecore[nu]++;
+#endif
+
+ /* Try to split a larger block here, if we're within the range of sizes
+ to split. */
+ if (nu >= SPLIT_MIN && nu <= malloc_mmap_threshold)
+ {
+ bsplit (nu);
+ if (nextf[nu] != 0)
+ goto morecore_done;
+ }
+
+ /* Try to coalesce two adjacent blocks from the free list on nextf[nu - 1],
+ if we can, and we're within the range of the block coalescing limits. */
+ if (nu >= COMBINE_MIN && nu < COMBINE_MAX && nu <= malloc_mmap_threshold && busy[nu - 1] == 0 && nextf[nu - 1])
+ {
+ bcoalesce (nu);
+ if (nextf[nu] != 0)
+ goto morecore_done;
+ }
+
+ /* Take at least a page, and figure out how many blocks of the requested
+ size we're getting. */
+ if (siz <= pagesz)
+ {
+ sbrk_amt = pagesz;
+ nblks = sbrk_amt / siz;
+ }
+ else
+ {
+ /* We always want to request an integral multiple of the page size
+ from the kernel, so let's compute whether or not `siz' is such
+ an amount. If it is, we can just request it. If not, we want
+ the smallest integral multiple of pagesize that is larger than
+ `siz' and will satisfy the request. */
+ sbrk_amt = siz & (pagesz - 1);
+ if (sbrk_amt == 0)
+ sbrk_amt = siz;
+ else
+ sbrk_amt = siz + pagesz - sbrk_amt;
+ nblks = 1;
+ }
+
+#if defined (USE_MMAP)
+ if (nu > malloc_mmap_threshold)
+ {
+ mp = (union mhead *)mmap (0, sbrk_amt, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ if ((void *)mp == MAP_FAILED)
+ goto morecore_done;
+ nextf[nu] = mp;
+ mp->mh_alloc = ISFREE;
+ mp->mh_index = nu;
+ CHAIN (mp) = 0;
+#ifdef MALLOC_STATS
+ _mstats.nmmap++;
+ _mstats.tmmap += sbrk_amt;
+#endif
+ goto morecore_done;
+ }
+#endif
+
+
+#ifdef MALLOC_STATS
+ _mstats.nsbrk++;
+ _mstats.tsbrk += sbrk_amt;
+#endif
+
+ mp = (union mhead *) sbrk (sbrk_amt);
+
+ /* Totally out of memory. */
+ if ((long)mp == -1)
+ goto morecore_done;
+
+ memtop += sbrk_amt;
+
+ /* shouldn't happen, but just in case -- require 8- or 16-byte alignment */
+ if ((long)mp & MALIGN_MASK)
+ {
+ mp = (union mhead *) (((long)mp + MALIGN_MASK) & ~MALIGN_MASK);
+ nblks--;
+ }
+
+ /* save new header and link the nblks blocks together */
+ nextf[nu] = mp;
+ while (1)
+ {
+ mp->mh_alloc = ISFREE;
+ mp->mh_index = nu;
+ if (--nblks <= 0) break;
+ CHAIN (mp) = (union mhead *)((char *)mp + siz);
+ mp = (union mhead *)((char *)mp + siz);
+ }
+ CHAIN (mp) = 0;
+
+morecore_done:
+ if (blocked_sigs)
+ _malloc_unblock_signals (&set, &oset);
+}
+
+static void
+malloc_debug_dummy ()
+{
+ write (1, "malloc_debug_dummy\n", 19);
+}
+
+#if SIZEOF_CHAR_P == 8
+#define PREPOP_BIN 3
+#define PREPOP_SIZE 64
+#else
+#define PREPOP_BIN 2
+#define PREPOP_SIZE 32
+#endif
+
+static int
+pagealign ()
+{
+ register int nunits;
+ register union mhead *mp;
+ long sbrk_needed;
+ char *curbrk;
+
+ pagesz = getpagesize ();
+ if (pagesz < 1024)
+ pagesz = 1024;
+
+ /* OK, how much do we need to allocate to make things page-aligned?
+ Some of this partial page will be wasted space, but we'll use as
+ much as we can. Once we figure out how much to advance the break
+ pointer, go ahead and do it. */
+ memtop = curbrk = sbrk (0);
+ sbrk_needed = pagesz - ((long)curbrk & (pagesz - 1)); /* sbrk(0) % pagesz */
+ if (sbrk_needed < 0)
+ sbrk_needed += pagesz;
+
+ /* Now allocate the wasted space. */
+ if (sbrk_needed)
+ {
+#ifdef MALLOC_STATS
+ _mstats.nsbrk++;
+ _mstats.tsbrk += sbrk_needed;
+#endif
+ curbrk = sbrk (sbrk_needed);
+ if ((long)curbrk == -1)
+ return -1;
+ memtop += sbrk_needed;
+
+ /* Take the memory which would otherwise be wasted and populate the most
+ popular bin (3 == 64 bytes) with it. Add whatever we need to curbrk
+ to make things 64-byte aligned, compute how many 64-byte chunks we're
+ going to get, and set up the bin. */
+ curbrk += sbrk_needed & (PREPOP_SIZE - 1);
+ sbrk_needed -= sbrk_needed & (PREPOP_SIZE - 1);
+ nunits = sbrk_needed / PREPOP_SIZE;
+
+ if (nunits > 0)
+ {
+ mp = (union mhead *)curbrk;
+
+ nextf[PREPOP_BIN] = mp;
+ while (1)
+ {
+ mp->mh_alloc = ISFREE;
+ mp->mh_index = PREPOP_BIN;
+ if (--nunits <= 0) break;
+ CHAIN(mp) = (union mhead *)((char *)mp + PREPOP_SIZE);
+ mp = (union mhead *)((char *)mp + PREPOP_SIZE);
+ }
+ CHAIN(mp) = 0;
+ }
+ }
+
+ /* compute which bin corresponds to the page size. */
+ for (nunits = 7; nunits < NBUCKETS; nunits++)
+ if (pagesz <= binsize(nunits))
+ break;
+ pagebucket = nunits;
+
+ return 0;
+}
+
+static PTR_T
+internal_malloc (n, file, line, flags) /* get a block */
+ size_t n;
+ const char *file;
+ int line, flags;
+{
+ register union mhead *p;
+ register int nunits;
+ register char *m, *z;
+ long nbytes;
+ mguard_t mg;
+
+ /* Get the system page size and align break pointer so future sbrks will
+ be page-aligned. The page size must be at least 1K -- anything
+ smaller is increased. */
+ if (pagesz == 0)
+ if (pagealign () < 0)
+ return ((PTR_T)NULL);
+
+ /* Figure out how many bytes are required, rounding up to the nearest
+ multiple of 8, then figure out which nextf[] area to use. Try to
+ be smart about where to start searching -- if the number of bytes
+ needed is greater than the page size, we can start at pagebucket. */
+ nbytes = ALLOCATED_BYTES(n);
+ nunits = (nbytes <= (pagesz >> 1)) ? STARTBUCK : pagebucket;
+ for ( ; nunits < NBUCKETS; nunits++)
+ if (nbytes <= binsize(nunits))
+ break;
+
+ /* Silently reject too-large requests. XXX - can increase this if HAVE_MMAP */
+ if (nunits >= NBUCKETS)
+ return ((PTR_T) NULL);
+
+ /* In case this is reentrant use of malloc from signal handler,
+ pick a block size that no other malloc level is currently
+ trying to allocate. That's the easiest harmless way not to
+ interfere with the other level of execution. */
+#ifdef MALLOC_STATS
+ if (busy[nunits]) _mstats.nrecurse++;
+#endif
+ while (busy[nunits]) nunits++;
+ busy[nunits] = 1;
+
+ if (nunits > maxbuck)
+ maxbuck = nunits;
+
+ /* If there are no blocks of the appropriate size, go get some */
+ if (nextf[nunits] == 0)
+ morecore (nunits);
+
+ /* Get one block off the list, and set the new list head */
+ if ((p = nextf[nunits]) == NULL)
+ {
+ busy[nunits] = 0;
+ return NULL;
+ }
+ nextf[nunits] = CHAIN (p);
+ busy[nunits] = 0;
+
+ /* Check for free block clobbered */
+ /* If not for this check, we would gobble a clobbered free chain ptr
+ and bomb out on the NEXT allocate of this size block */
+ if (p->mh_alloc != ISFREE || p->mh_index != nunits)
+ xbotch ((PTR_T)(p+1), 0, _("malloc: block on free list clobbered"), file, line);
+
+ /* Fill in the info, and set up the magic numbers for range checking. */
+ p->mh_alloc = ISALLOC;
+ p->mh_magic2 = MAGIC2;
+ p->mh_nbytes = n;
+
+#if SIZEOF_CHAR_P == 8
+ /* Begin guard */
+ MALLOC_MEMSET ((char *)p->mh_magic8, MAGIC1, 8);
+#endif
+
+ /* End guard */
+ mg.i = n;
+ z = mg.s;
+ m = (char *) (p + 1) + n;
+ *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
+
+#ifdef MEMSCRAMBLE
+ if (n)
+ MALLOC_MEMSET ((char *)(p + 1), 0xdf, n); /* scramble previous contents */
+#endif
+#ifdef MALLOC_STATS
+ _mstats.nmalloc[nunits]++;
+ _mstats.tmalloc[nunits]++;
+ _mstats.nmal++;
+ _mstats.bytesreq += n;
+#endif /* MALLOC_STATS */
+
+#ifdef MALLOC_TRACE
+ if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
+ mtrace_alloc ("malloc", p + 1, n, file, line);
+ else if (_malloc_trace_buckets[nunits])
+ mtrace_alloc ("malloc", p + 1, n, file, line);
+#endif
+
+#ifdef MALLOC_REGISTER
+ if (malloc_register && (flags & MALLOC_NOREG) == 0)
+ mregister_alloc ("malloc", p + 1, n, file, line);
+#endif
+
+#ifdef MALLOC_WATCH
+ if (_malloc_nwatch > 0)
+ _malloc_ckwatch (p + 1, file, line, W_ALLOC, n);
+#endif
+
+#if defined (MALLOC_DEBUG)
+ z = (char *) (p + 1);
+ /* Check alignment of returned pointer */
+ if ((unsigned long)z & MALIGN_MASK)
+ fprintf (stderr, "malloc: %s:%d: warning: request for %d bytes not aligned on %d byte boundary\r\n",
+ file ? file : _("unknown"), line, p->mh_nbytes, MALIGN_MASK+1);
+#endif
+
+ return (PTR_T) (p + 1);
+}
+
+static void
+internal_free (mem, file, line, flags)
+ PTR_T mem;
+ const char *file;
+ int line, flags;
+{
+ register union mhead *p;
+ register char *ap, *z;
+ register int nunits;
+ register unsigned int nbytes;
+ int ubytes; /* caller-requested size */
+ mguard_t mg;
+
+ if ((ap = (char *)mem) == 0)
+ return;
+
+ p = (union mhead *) ap - 1;
+
+ if (p->mh_alloc == ISMEMALIGN)
+ {
+ ap -= p->mh_nbytes;
+ p = (union mhead *) ap - 1;
+ }
+
+#if defined (MALLOC_TRACE) || defined (MALLOC_REGISTER) || defined (MALLOC_WATCH)
+ if (malloc_trace || malloc_register || _malloc_nwatch > 0)
+ ubytes = p->mh_nbytes;
+#endif
+
+ if (p->mh_alloc != ISALLOC)
+ {
+ if (p->mh_alloc == ISFREE)
+ xbotch (mem, ERR_DUPFREE,
+ _("free: called with already freed block argument"), file, line);
+ else
+ xbotch (mem, ERR_UNALLOC,
+ _("free: called with unallocated block argument"), file, line);
+ }
+
+ ASSERT (p->mh_magic2 == MAGIC2);
+
+ nunits = p->mh_index;
+ nbytes = ALLOCATED_BYTES(p->mh_nbytes);
+ /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
+ are now used for the number of bytes allocated, a simple check of
+ mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
+ We sanity-check the value of mh_nbytes against the size of the blocks
+ in the appropriate bucket before we use it. This can still cause problems
+ and obscure errors if mh_nbytes is wrong but still within range; the
+ checks against the size recorded at the end of the chunk will probably
+ fail then. Using MALLOC_REGISTER will help here, since it saves the
+ original number of bytes requested. */
+
+ if (IN_BUCKET(nbytes, nunits) == 0)
+ xbotch (mem, ERR_UNDERFLOW,
+ _("free: underflow detected; mh_nbytes out of range"), file, line);
+#if SIZEOF_CHAR_P == 8
+ {
+ int i;
+ for (i = 0, z = p->mh_magic8; i < 8; i++)
+ if (*z++ != MAGIC1)
+ xbotch (mem, ERR_UNDERFLOW,
+ _("free: underflow detected; magic8 corrupted"), file, line);
+ }
+#endif
+
+ ap += p->mh_nbytes;
+ z = mg.s;
+ *z++ = *ap++, *z++ = *ap++, *z++ = *ap++, *z++ = *ap++;
+ if (mg.i != p->mh_nbytes)
+ xbotch (mem, ERR_ASSERT_FAILED, _("free: start and end chunk sizes differ"), file, line);
+
+#if defined (USE_MMAP)
+ if (nunits > malloc_mmap_threshold)
+ {
+ munmap (p, binsize (nunits));
+#if defined (MALLOC_STATS)
+ _mstats.nlesscore[nunits]++;
+#endif
+ goto free_return;
+ }
+#endif
+
+#if GLIBC21
+ if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == sbrk (0)))
+#else
+ if (nunits >= LESSCORE_MIN && ((char *)p + binsize(nunits) == memtop))
+#endif
+ {
+ /* If above LESSCORE_FRC, give back unconditionally. This should be set
+ high enough to be infrequently encountered. If between LESSCORE_MIN
+ and LESSCORE_FRC, call lesscore if the bucket is marked as busy or if
+ there's already a block on the free list. */
+ if ((nunits >= LESSCORE_FRC) || busy[nunits] || nextf[nunits] != 0)
+ {
+ lesscore (nunits);
+ /* keeps the tracing and registering code in one place */
+ goto free_return;
+ }
+ }
+
+#ifdef MEMSCRAMBLE
+ if (p->mh_nbytes)
+ MALLOC_MEMSET (mem, 0xcf, p->mh_nbytes);
+#endif
+
+ ASSERT (nunits < NBUCKETS);
+
+ if (busy[nunits] == 1)
+ {
+ xsplit (p, nunits); /* split block and add to different chain */
+ goto free_return;
+ }
+
+ p->mh_alloc = ISFREE;
+ /* Protect against signal handlers calling malloc. */
+ busy[nunits] = 1;
+ /* Put this block on the free list. */
+ CHAIN (p) = nextf[nunits];
+ nextf[nunits] = p;
+ busy[nunits] = 0;
+
+free_return:
+ ; /* Empty statement in case this is the end of the function */
+
+#ifdef MALLOC_STATS
+ _mstats.nmalloc[nunits]--;
+ _mstats.nfre++;
+#endif /* MALLOC_STATS */
+
+#ifdef MALLOC_TRACE
+ if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
+ mtrace_free (mem, ubytes, file, line);
+ else if (_malloc_trace_buckets[nunits])
+ mtrace_free (mem, ubytes, file, line);
+#endif
+
+#ifdef MALLOC_REGISTER
+ if (malloc_register && (flags & MALLOC_NOREG) == 0)
+ mregister_free (mem, ubytes, file, line);
+#endif
+
+#ifdef MALLOC_WATCH
+ if (_malloc_nwatch > 0)
+ _malloc_ckwatch (mem, file, line, W_FREE, ubytes);
+#endif
+}
+
+static PTR_T
+internal_realloc (mem, n, file, line, flags)
+ PTR_T mem;
+ register size_t n;
+ const char *file;
+ int line, flags;
+{
+ register union mhead *p;
+ register u_bits32_t tocopy;
+ register unsigned int nbytes;
+ register int nunits;
+ register char *m, *z;
+ mguard_t mg;
+
+#ifdef MALLOC_STATS
+ _mstats.nrealloc++;
+#endif
+
+ if (n == 0)
+ {
+ internal_free (mem, file, line, MALLOC_INTERNAL);
+ return (NULL);
+ }
+ if ((p = (union mhead *) mem) == 0)
+ return internal_malloc (n, file, line, MALLOC_INTERNAL);
+
+ p--;
+ nunits = p->mh_index;
+ ASSERT (nunits < NBUCKETS);
+
+ if (p->mh_alloc != ISALLOC)
+ xbotch (mem, ERR_UNALLOC,
+ _("realloc: called with unallocated block argument"), file, line);
+
+ ASSERT (p->mh_magic2 == MAGIC2);
+ nbytes = ALLOCATED_BYTES(p->mh_nbytes);
+ /* Since the sizeof(u_bits32_t) bytes before the memory handed to the user
+ are now used for the number of bytes allocated, a simple check of
+ mh_magic2 is no longer sufficient to catch things like p[-1] = 'x'.
+ We sanity-check the value of mh_nbytes against the size of the blocks
+ in the appropriate bucket before we use it. This can still cause problems
+ and obscure errors if mh_nbytes is wrong but still within range; the
+ checks against the size recorded at the end of the chunk will probably
+ fail then. Using MALLOC_REGISTER will help here, since it saves the
+ original number of bytes requested. */
+ if (IN_BUCKET(nbytes, nunits) == 0)
+ xbotch (mem, ERR_UNDERFLOW,
+ _("realloc: underflow detected; mh_nbytes out of range"), file, line);
+#if SIZEOF_CHAR_P == 8
+ {
+ int i;
+ for (i = 0, z = p->mh_magic8; i < 8; i++)
+ if (*z++ != MAGIC1)
+ xbotch (mem, ERR_UNDERFLOW,
+ _("realloc: underflow detected; magic8 corrupted"), file, line);
+
+ }
+#endif
+
+ m = (char *)mem + (tocopy = p->mh_nbytes);
+ z = mg.s;
+ *z++ = *m++, *z++ = *m++, *z++ = *m++, *z++ = *m++;
+ if (mg.i != p->mh_nbytes)
+ xbotch (mem, ERR_ASSERT_FAILED, _("realloc: start and end chunk sizes differ"), file, line);
+
+#ifdef MALLOC_WATCH
+ if (_malloc_nwatch > 0)
+ _malloc_ckwatch (p + 1, file, line, W_REALLOC, n);
+#endif
+#ifdef MALLOC_STATS
+ _mstats.bytesreq += (n < tocopy) ? 0 : n - tocopy;
+#endif
+
+ /* If we're reallocating to the same size as previously, return now */
+ if (n == p->mh_nbytes)
+ return mem;
+
+ /* See if desired size rounds to same power of 2 as actual size. */
+ nbytes = ALLOCATED_BYTES(n);
+
+ /* If ok, use the same block, just marking its size as changed. */
+ if (RIGHT_BUCKET(nbytes, nunits) || RIGHT_BUCKET(nbytes, nunits-1))
+ {
+ /* Compensate for increment above. */
+ m -= 4;
+
+ *m++ = 0; *m++ = 0; *m++ = 0; *m++ = 0;
+ m = (char *)mem + (p->mh_nbytes = n);
+
+ mg.i = n;
+ z = mg.s;
+ *m++ = *z++, *m++ = *z++, *m++ = *z++, *m++ = *z++;
+
+ return mem;
+ }
+
+ if (n < tocopy)
+ tocopy = n;
+
+#ifdef MALLOC_STATS
+ _mstats.nrcopy++;
+#endif
+
+ /* If we are using mmap and have mremap, we could use it here. */
+
+ if ((m = internal_malloc (n, file, line, MALLOC_INTERNAL|MALLOC_NOTRACE|MALLOC_NOREG)) == 0)
+ return 0;
+ FASTCOPY (mem, m, tocopy);
+ internal_free (mem, file, line, MALLOC_INTERNAL);
+
+#ifdef MALLOC_TRACE
+ if (malloc_trace && (flags & MALLOC_NOTRACE) == 0)
+ mtrace_alloc ("realloc", m, n, file, line);
+ else if (_malloc_trace_buckets[nunits])
+ mtrace_alloc ("realloc", m, n, file, line);
+#endif
+
+#ifdef MALLOC_REGISTER
+ if (malloc_register && (flags & MALLOC_NOREG) == 0)
+ mregister_alloc ("realloc", m, n, file, line);
+#endif
+
+#ifdef MALLOC_WATCH
+ if (_malloc_nwatch > 0)
+ _malloc_ckwatch (m, file, line, W_RESIZED, n);
+#endif
+
+ return m;
+}
+
+static PTR_T
+internal_memalign (alignment, size, file, line, flags)
+ size_t alignment;
+ size_t size;
+ const char *file;
+ int line, flags;
+{
+ register char *ptr;
+ register char *aligned;
+ register union mhead *p;
+
+ ptr = internal_malloc (size + alignment, file, line, MALLOC_INTERNAL);
+
+ if (ptr == 0)
+ return 0;
+ /* If entire block has the desired alignment, just accept it. */
+ if (((long) ptr & (alignment - 1)) == 0)
+ return ptr;
+ /* Otherwise, get address of byte in the block that has that alignment. */
+ aligned = (char *) (((long) ptr + alignment - 1) & (~alignment + 1));
+
+ /* Store a suitable indication of how to free the block,
+ so that free can find the true beginning of it. */
+ p = (union mhead *) aligned - 1;
+ p->mh_nbytes = aligned - ptr;
+ p->mh_alloc = ISMEMALIGN;
+
+ return aligned;
+}
+
+int
+posix_memalign (memptr, alignment, size)
+ void **memptr;
+ size_t alignment, size;
+{
+ void *mem;
+
+ /* Perform posix-mandated error checking here */
+ if ((alignment % sizeof (void *) != 0) || alignment == 0)
+ return EINVAL;
+ else if (powerof2 (alignment) == 0)
+ return EINVAL;
+
+ mem = internal_memalign (alignment, size, (char *)0, 0, 0);
+ if (mem != 0)
+ {
+ *memptr = mem;
+ return 0;
+ }
+ return ENOMEM;
+}
+
+size_t
+malloc_usable_size (mem)
+ void *mem;
+{
+ register union mhead *p;
+ register char *ap;
+ register int maxbytes;
+
+
+ if ((ap = (char *)mem) == 0)
+ return 0;
+
+ /* Find the true start of the memory block to discover which bin */
+ p = (union mhead *) ap - 1;
+ if (p->mh_alloc == ISMEMALIGN)
+ {
+ ap -= p->mh_nbytes;
+ p = (union mhead *) ap - 1;
+ }
+
+ /* XXX - should we return 0 if ISFREE? */
+ maxbytes = binsize(p->mh_index);
+
+ /* So the usable size is the maximum number of bytes in the bin less the
+ malloc overhead */
+ maxbytes -= MOVERHEAD + MSLOP;
+ return (maxbytes);
+}
+
+#if !defined (NO_VALLOC)
+/* This runs into trouble with getpagesize on HPUX, and Multimax machines.
+ Patching out seems cleaner than the ugly fix needed. */
+static PTR_T
+internal_valloc (size, file, line, flags)
+ size_t size;
+ const char *file;
+ int line, flags;
+{
+ return internal_memalign (getpagesize (), size, file, line, flags|MALLOC_INTERNAL);
+}
+#endif /* !NO_VALLOC */
+
+#ifndef NO_CALLOC
+static PTR_T
+internal_calloc (n, s, file, line, flags)
+ size_t n, s;
+ const char *file;
+ int line, flags;
+{
+ size_t total;
+ PTR_T result;
+
+ total = n * s;
+ result = internal_malloc (total, file, line, flags|MALLOC_INTERNAL);
+ if (result)
+ memset (result, 0, total);
+ return result;
+}
+
+static void
+internal_cfree (p, file, line, flags)
+ PTR_T p;
+ const char *file;
+ int line, flags;
+{
+ internal_free (p, file, line, flags|MALLOC_INTERNAL);
+}
+#endif /* !NO_CALLOC */
+
+#ifdef MALLOC_STATS
+int
+malloc_free_blocks (size)
+ int size;
+{
+ int nfree;
+ register union mhead *p;
+
+ nfree = 0;
+ for (p = nextf[size]; p; p = CHAIN (p))
+ nfree++;
+
+ return nfree;
+}
+#endif
+
+#if defined (MALLOC_WRAPFUNCS)
+PTR_T
+sh_malloc (bytes, file, line)
+ size_t bytes;
+ const char *file;
+ int line;
+{
+ return internal_malloc (bytes, file, line, MALLOC_WRAPPER);
+}
+
+PTR_T
+sh_realloc (ptr, size, file, line)
+ PTR_T ptr;
+ size_t size;
+ const char *file;
+ int line;
+{
+ return internal_realloc (ptr, size, file, line, MALLOC_WRAPPER);
+}
+
+void
+sh_free (mem, file, line)
+ PTR_T mem;
+ const char *file;
+ int line;
+{
+ internal_free (mem, file, line, MALLOC_WRAPPER);
+}
+
+PTR_T
+sh_memalign (alignment, size, file, line)
+ size_t alignment;
+ size_t size;
+ const char *file;
+ int line;
+{
+ return internal_memalign (alignment, size, file, line, MALLOC_WRAPPER);
+}
+
+#ifndef NO_CALLOC
+PTR_T
+sh_calloc (n, s, file, line)
+ size_t n, s;
+ const char *file;
+ int line;
+{
+ return internal_calloc (n, s, file, line, MALLOC_WRAPPER);
+}
+
+void
+sh_cfree (mem, file, line)
+ PTR_T mem;
+ const char *file;
+ int line;
+{
+ internal_cfree (mem, file, line, MALLOC_WRAPPER);
+}
+#endif
+
+#ifndef NO_VALLOC
+PTR_T
+sh_valloc (size, file, line)
+ size_t size;
+ const char *file;
+ int line;
+{
+ return internal_valloc (size, file, line, MALLOC_WRAPPER);
+}
+#endif /* !NO_VALLOC */
+
+#endif /* MALLOC_WRAPFUNCS */
+
+/* Externally-available functions that call their internal counterparts. */
+
+PTR_T
+malloc (size)
+ size_t size;
+{
+ return internal_malloc (size, (char *)NULL, 0, 0);
+}
+
+PTR_T
+realloc (mem, nbytes)
+ PTR_T mem;
+ size_t nbytes;
+{
+ return internal_realloc (mem, nbytes, (char *)NULL, 0, 0);
+}
+
+void
+free (mem)
+ PTR_T mem;
+{
+ internal_free (mem, (char *)NULL, 0, 0);
+}
+
+PTR_T
+memalign (alignment, size)
+ size_t alignment;
+ size_t size;
+{
+ return internal_memalign (alignment, size, (char *)NULL, 0, 0);
+}
+
+#ifndef NO_VALLOC
+PTR_T
+valloc (size)
+ size_t size;
+{
+ return internal_valloc (size, (char *)NULL, 0, 0);
+}
+#endif
+
+#ifndef NO_CALLOC
+PTR_T
+calloc (n, s)
+ size_t n, s;
+{
+ return internal_calloc (n, s, (char *)NULL, 0, 0);
+}
+
+void
+cfree (mem)
+ PTR_T mem;
+{
+ internal_cfree (mem, (char *)NULL, 0, 0);
+}
+#endif
diff --git a/lib/malloc/mstats.h b/lib/malloc/mstats.h
new file mode 100644
index 0000000..ce8aaec
--- /dev/null
+++ b/lib/malloc/mstats.h
@@ -0,0 +1,114 @@
+/* mstats.h - definitions for malloc statistics */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _MSTATS_H
+#define _MSTATS_H
+
+#include "imalloc.h"
+
+#ifdef MALLOC_STATS
+
+/* This needs to change if the definition in malloc.c changes */
+#ifndef NBUCKETS
+# define NBUCKETS 30
+#endif
+
+/*
+ * NMALLOC[i] is the difference between the number of mallocs and frees
+ * for a given block size. TMALLOC[i] is the total number of mallocs for
+ * a given block size. NMORECORE[i] is the total number of calls to
+ * morecore(i). NLESSCORE[i] is the total number of calls to lesscore(i).
+ *
+ * NMAL and NFRE are counts of the number of calls to malloc() and free(),
+ * respectively. NREALLOC is the total number of calls to realloc();
+ * NRCOPY is the number of times realloc() had to allocate new memory and
+ * copy to it. NRECURSE is a count of the number of recursive calls to
+ * malloc() for the same bucket size, which can be caused by calls to
+ * malloc() from a signal handler.
+ *
+ * NSBRK is the number of calls to sbrk() (whether by morecore() or for
+ * alignment); TSBRK is the total number of bytes requested from the kernel
+ * with sbrk().
+ *
+ * BYTESUSED is the total number of bytes consumed by blocks currently in
+ * use; BYTESFREE is the total number of bytes currently on all of the free
+ * lists. BYTESREQ is the total number of bytes requested by the caller
+ * via calls to malloc() and realloc().
+ *
+ * TBSPLIT is the number of times a larger block was split to satisfy a
+ * smaller request. NSPLIT[i] is the number of times a block of size I was
+ * split.
+ *
+ * TBCOALESCE is the number of times two adjacent smaller blocks off the free
+ * list were combined to satisfy a larger request.
+ */
+struct _malstats {
+ int nmalloc[NBUCKETS];
+ int tmalloc[NBUCKETS];
+ int nmorecore[NBUCKETS];
+ int nlesscore[NBUCKETS];
+ int nmal;
+ int nfre;
+ int nrealloc;
+ int nrcopy;
+ int nrecurse;
+ int nsbrk;
+ bits32_t tsbrk;
+ bits32_t bytesused;
+ bits32_t bytesfree;
+ u_bits32_t bytesreq;
+ int tbsplit;
+ int nsplit[NBUCKETS];
+ int tbcoalesce;
+ int ncoalesce[NBUCKETS];
+ int nmmap;
+ bits32_t tmmap;
+};
+
+/* Return statistics describing allocation of blocks of size BLOCKSIZE.
+ NFREE is the number of free blocks for this allocation size. NUSED
+ is the number of blocks in use. NMAL is the number of requests for
+ blocks of size BLOCKSIZE. NMORECORE is the number of times we had
+ to call MORECORE to repopulate the free list for this bucket.
+ NLESSCORE is the number of times we gave memory back to the system
+ from this bucket. NSPLIT is the number of times a block of this size
+ was split to satisfy a smaller request. NCOALESCE is the number of
+ times two blocks of this size were combined to satisfy a larger
+ request. */
+struct bucket_stats {
+ u_bits32_t blocksize;
+ int nfree;
+ int nused;
+ int nmal;
+ int nmorecore;
+ int nlesscore;
+ int nsplit;
+ int ncoalesce;
+ int nmmap; /* currently unused */
+};
+
+extern struct bucket_stats malloc_bucket_stats PARAMS((int));
+extern struct _malstats malloc_stats PARAMS((void));
+extern void print_malloc_stats PARAMS((char *));
+extern void trace_malloc_stats PARAMS((char *, char *));
+
+#endif /* MALLOC_STATS */
+
+#endif /* _MSTATS_H */
diff --git a/lib/malloc/shmalloc.h b/lib/malloc/shmalloc.h
new file mode 100644
index 0000000..d51193e
--- /dev/null
+++ b/lib/malloc/shmalloc.h
@@ -0,0 +1,70 @@
+/* Functions (currently) for use by the shell to do malloc debugging and
+ tracking. */
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _SH_MALLOC_H
+#define _SH_MALLOC_H
+
+#ifndef PARAMS
+# if defined (__STDC__) || defined (__GNUC__) || defined (__cplusplus)
+# define PARAMS(protos) protos
+# else
+# define PARAMS(protos) ()
+# endif
+#endif
+
+/* Generic pointer type. */
+#ifndef PTR_T
+
+#if defined (__STDC__)
+# define PTR_T void *
+#else
+# define PTR_T char *
+#endif
+
+#endif /* PTR_T */
+
+
+extern PTR_T sh_malloc PARAMS((size_t, const char *, int));
+extern PTR_T sh_realloc PARAMS((PTR_T, size_t, const char *, int));
+extern void sh_free PARAMS((PTR_T, const char *, int));
+
+extern PTR_T sh_memalign PARAMS((size_t, size_t, const char *, int));
+
+extern PTR_T sh_calloc PARAMS((size_t, size_t, const char *, int));
+extern void sh_cfree PARAMS((PTR_T, const char *, int));
+
+extern PTR_T sh_valloc PARAMS((size_t, const char *, int));
+
+/* trace.c */
+extern int malloc_set_trace PARAMS((int));
+extern void malloc_set_tracefp (); /* full prototype requires stdio.h */
+extern void malloc_set_tracefn PARAMS((char *, char *));
+
+/* table.c */
+extern void mregister_dump_table PARAMS((void));
+extern void mregister_table_init PARAMS((void));
+extern int malloc_set_register PARAMS((int));
+
+/* stats.c */
+extern void print_malloc_stats PARAMS((char *));
+extern void fprint_malloc_stats (); /* full prototype requires stdio.h */
+extern void trace_malloc_stats PARAMS((char *, char *));
+
+#endif
diff --git a/lib/malloc/stats.c b/lib/malloc/stats.c
new file mode 100644
index 0000000..b38df9f
--- /dev/null
+++ b/lib/malloc/stats.c
@@ -0,0 +1,213 @@
+/* stats.c - malloc statistics */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include "imalloc.h"
+
+#ifdef MALLOC_STATS
+
+#include <stdio.h>
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+#include <string.h>
+
+#include "mstats.h"
+
+extern int malloc_free_blocks PARAMS((int));
+
+extern int malloc_mmap_threshold;
+
+extern struct _malstats _mstats;
+
+extern FILE *_imalloc_fopen PARAMS((char *, char *, char *, char *, size_t));
+
+struct bucket_stats
+malloc_bucket_stats (size)
+ int size;
+{
+ struct bucket_stats v;
+
+ v.nfree = 0;
+
+ if (size < 0 || size >= NBUCKETS)
+ {
+ v.blocksize = 0;
+ v.nused = v.nmal = v.nmorecore = v.nlesscore = v.nsplit = 0;
+ return v;
+ }
+
+ v.blocksize = 1 << (size + 3);
+ v.nused = _mstats.nmalloc[size];
+ v.nmal = _mstats.tmalloc[size];
+ v.nmorecore = _mstats.nmorecore[size];
+ v.nlesscore = _mstats.nlesscore[size];
+ v.nsplit = _mstats.nsplit[size];
+ v.ncoalesce = _mstats.ncoalesce[size];
+
+ v.nfree = malloc_free_blocks (size); /* call back to malloc.c */
+
+ return v;
+}
+
+/* Return a copy of _MSTATS, with two additional fields filled in:
+ BYTESFREE is the total number of bytes on free lists. BYTESUSED
+ is the total number of bytes in use. These two fields are fairly
+ expensive to compute, so we do it only when asked to. */
+struct _malstats
+malloc_stats ()
+{
+ struct _malstats result;
+ struct bucket_stats v;
+ register int i;
+
+ result = _mstats;
+ result.bytesused = result.bytesfree = 0;
+ for (i = 0; i < NBUCKETS; i++)
+ {
+ v = malloc_bucket_stats (i);
+ result.bytesfree += v.nfree * v.blocksize;
+ result.bytesused += v.nused * v.blocksize;
+ }
+ return (result);
+}
+
+static void
+_print_malloc_stats (s, fp)
+ char *s;
+ FILE *fp;
+{
+ register int i;
+ unsigned long totused, totfree;
+ struct bucket_stats v;
+
+ fprintf (fp, "Memory allocation statistics: %s\n size\tfree\tin use\ttotal\tmorecore lesscore split\tcoalesce\n", s ? s : "");
+ for (i = totused = totfree = 0; i < NBUCKETS; i++)
+ {
+ v = malloc_bucket_stats (i);
+ /* Show where the mmap threshold is; sizes greater than this use mmap to
+ allocate and munmap to free (munmap shows up as lesscore). */
+ if (i == malloc_mmap_threshold+1)
+ fprintf (fp, "--------\n");
+ if (v.nmal > 0)
+ fprintf (fp, "%8lu\t%4d\t%6d\t%5d%8d\t%8d %5d %8d\n", (unsigned long)v.blocksize, v.nfree, v.nused, v.nmal, v.nmorecore, v.nlesscore, v.nsplit, v.ncoalesce);
+ totfree += v.nfree * v.blocksize;
+ totused += v.nused * v.blocksize;
+ }
+ fprintf (fp, "\nTotal bytes in use: %lu, total bytes free: %lu\n",
+ totused, totfree);
+ fprintf (fp, "\nTotal bytes requested by application: %lu\n", (unsigned long)_mstats.bytesreq);
+ fprintf (fp, "Total mallocs: %d, total frees: %d, total reallocs: %d (%d copies)\n",
+ _mstats.nmal, _mstats.nfre, _mstats.nrealloc, _mstats.nrcopy);
+ fprintf (fp, "Total sbrks: %d, total bytes via sbrk: %d\n",
+ _mstats.nsbrk, _mstats.tsbrk);
+ fprintf (fp, "Total mmaps: %d, total bytes via mmap: %d\n",
+ _mstats.nmmap, _mstats.tmmap);
+ fprintf (fp, "Total blocks split: %d, total block coalesces: %d\n",
+ _mstats.tbsplit, _mstats.tbcoalesce);
+}
+
+void
+print_malloc_stats (s)
+ char *s;
+{
+ _print_malloc_stats (s, stderr);
+}
+
+void
+fprint_malloc_stats (s, fp)
+ char *s;
+ FILE *fp;
+{
+ _print_malloc_stats (s, fp);
+}
+
+#define TRACEROOT "/var/tmp/maltrace/stats."
+
+void
+trace_malloc_stats (s, fn)
+ char *s, *fn;
+{
+ FILE *fp;
+ char defname[sizeof (TRACEROOT) + 64];
+ static char mallbuf[1024];
+
+ fp = _imalloc_fopen (s, fn, TRACEROOT, defname, sizeof (defname));
+ if (fp)
+ {
+ setvbuf (fp, mallbuf, _IOFBF, sizeof (mallbuf));
+ _print_malloc_stats (s, fp);
+ fflush(fp);
+ fclose(fp);
+ }
+}
+
+#endif /* MALLOC_STATS */
+
+#if defined (MALLOC_STATS) || defined (MALLOC_TRACE)
+FILE *
+_imalloc_fopen (s, fn, def, defbuf, defsiz)
+ char *s;
+ char *fn;
+ char *def;
+ char *defbuf;
+ size_t defsiz;
+{
+ char fname[1024];
+ long l;
+ FILE *fp;
+
+ l = (long)getpid ();
+ if (fn == 0)
+ {
+ sprintf (defbuf, "%s%ld", def, l);
+ fp = fopen(defbuf, "w");
+ }
+ else
+ {
+ char *p, *q, *r;
+ char pidbuf[32];
+ int sp;
+
+ sprintf (pidbuf, "%ld", l);
+ if ((strlen (pidbuf) + strlen (fn) + 2) >= sizeof (fname))
+ return ((FILE *)0);
+ for (sp = 0, p = fname, q = fn; *q; )
+ {
+ if (sp == 0 && *q == '%' && q[1] == 'p')
+ {
+ sp = 1;
+ for (r = pidbuf; *r; )
+ *p++ = *r++;
+ q += 2;
+ }
+ else
+ *p++ = *q++;
+ }
+ *p = '\0';
+ fp = fopen (fname, "w");
+ }
+
+ return fp;
+}
+#endif /* MALLOC_STATS || MALLOC_TRACE */
diff --git a/lib/malloc/stub.c b/lib/malloc/stub.c
new file mode 100644
index 0000000..a60a624
--- /dev/null
+++ b/lib/malloc/stub.c
@@ -0,0 +1,22 @@
+/* Copyright (C) 1993-2003 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+void
+bash_malloc_stub()
+{
+}
diff --git a/lib/malloc/table.c b/lib/malloc/table.c
new file mode 100644
index 0000000..e6acbf4
--- /dev/null
+++ b/lib/malloc/table.c
@@ -0,0 +1,429 @@
+/* table.c - bookkeeping functions for allocated memory */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <stdio.h>
+#include <string.h>
+
+#include "imalloc.h"
+#include "table.h"
+
+#ifdef SHELL
+extern int running_trap;
+extern int signal_is_trapped PARAMS((int));
+#endif
+
+extern int malloc_register;
+
+#ifdef MALLOC_REGISTER
+
+extern FILE *_imalloc_fopen PARAMS((char *, char *, char *, char *, size_t));
+
+#define FIND_ALLOC 0x01 /* find slot for new allocation */
+#define FIND_EXIST 0x02 /* find slot for existing entry for free() or search */
+
+static int table_count = 0;
+static int table_allocated = 0;
+static int table_bucket_index = REG_TABLE_SIZE-1;
+static mr_table_t mem_table[REG_TABLE_SIZE];
+static mr_table_t mem_overflow;
+
+#ifndef STREQ
+#define STREQ(a, b) ((a)[0] == (b)[0] && strcmp(a, b) == 0)
+#endif
+
+static int location_table_index = 0;
+static int location_table_count = 0;
+static ma_table_t mlocation_table[REG_TABLE_SIZE];
+
+/*
+ * NOTE: taken from dmalloc (http://dmalloc.com) and modified.
+ */
+static unsigned int
+mt_hash (key)
+ const PTR_T key;
+{
+ unsigned int a, b, c;
+ unsigned long x;
+
+ /* set up the internal state */
+ a = 0x9e3779b9; /* the golden ratio; an arbitrary value */
+ x = (unsigned long)key; /* truncation is OK */
+ b = x >> 8;
+ c = x >> 3; /* XXX - was >> 4 */
+
+ HASH_MIX(a, b, c);
+ return c;
+}
+
+#if 0
+static unsigned int
+which_bucket (mem)
+ PTR_T mem;
+{
+ return (mt_hash ((unsigned char *)mem) & (REG_TABLE_SIZE-1));
+}
+
+#else
+#define which_bucket(mem) (mt_hash ((unsigned char *)(mem)) & (REG_TABLE_SIZE-1));
+
+#define next_bucket() ((table_bucket_index + 1) & (REG_TABLE_SIZE-1))
+#define next_entry(mem) ((mem == mem_table + REG_TABLE_SIZE - 1) ? mem_table : ++mem)
+
+#define prev_bucket() (table_bucket_index == 0 ? REG_TABLE_SIZE-1 : table_bucket_index-1)
+#define prev_entry(mem) ((mem == mem_table) ? mem_table + REG_TABLE_SIZE - 1 : mem - 1)
+#endif
+
+static mr_table_t *
+find_entry (mem, flags)
+ PTR_T mem;
+ int flags;
+{
+ unsigned int bucket;
+ register mr_table_t *tp;
+ mr_table_t *endp;
+
+ if (mem_overflow.mem == mem)
+ return (&mem_overflow);
+
+ /* If we want to insert an allocation entry just use the next slot */
+ if (flags & FIND_ALLOC)
+ {
+ table_bucket_index = next_bucket();
+ table_count++;
+ tp = mem_table + table_bucket_index;
+ memset(tp, 0, sizeof (mr_table_t)); /* overwrite next existing entry */
+ return tp;
+ }
+
+ tp = endp = mem_table + table_bucket_index;
+
+ /* search for last allocation corresponding to MEM, return entry pointer */
+ while (1)
+ {
+ if (tp->mem == mem)
+ return (tp);
+
+ tp = prev_entry (tp);
+
+ /* if we went all the way around and didn't find it, return NULL */
+ if (tp == endp)
+ return ((mr_table_t *)NULL);
+ }
+
+ return (mr_table_t *)NULL;
+}
+
+mr_table_t *
+mr_table_entry (mem)
+ PTR_T mem;
+{
+ return (find_entry (mem, FIND_EXIST));
+}
+
+void
+mregister_describe_mem (mem, fp)
+ PTR_T mem;
+ FILE *fp;
+{
+ mr_table_t *entry;
+
+ entry = find_entry (mem, FIND_EXIST);
+ if (entry == 0)
+ return;
+ fprintf (fp, "malloc: %p: %s: last %s from %s:%d\n",
+ mem,
+ (entry->flags & MT_ALLOC) ? "allocated" : "free",
+ (entry->flags & MT_ALLOC) ? "allocated" : "freed",
+ entry->file ? entry->file : "unknown",
+ entry->line);
+}
+
+void
+mregister_alloc (tag, mem, size, file, line)
+ const char *tag;
+ PTR_T mem;
+ size_t size;
+ const char *file;
+ int line;
+{
+ mr_table_t *tentry;
+ sigset_t set, oset;
+ int blocked_sigs;
+
+ /* Block all signals in case we are executed from a signal handler. */
+ blocked_sigs = 0;
+#ifdef SHELL
+ if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
+#endif
+ {
+ _malloc_block_signals (&set, &oset);
+ blocked_sigs = 1;
+ }
+
+ mlocation_register_alloc (file, line);
+
+ tentry = find_entry (mem, FIND_ALLOC);
+
+ if (tentry == 0)
+ {
+ /* oops. table is full. punt. */
+ fprintf (stderr, _("register_alloc: alloc table is full with FIND_ALLOC?\n"));
+ if (blocked_sigs)
+ _malloc_unblock_signals (&set, &oset);
+ return;
+ }
+
+ if (tentry->flags & MT_ALLOC)
+ {
+ /* oops. bad bookkeeping. ignore for now */
+ fprintf (stderr, _("register_alloc: %p already in table as allocated?\n"), mem);
+ }
+
+ tentry->mem = mem;
+ tentry->size = size;
+ tentry->func = tag;
+ tentry->flags = MT_ALLOC;
+ tentry->file = file;
+ tentry->line = line;
+ tentry->nalloc++;
+
+ if (tentry != &mem_overflow)
+ table_allocated++;
+
+ if (blocked_sigs)
+ _malloc_unblock_signals (&set, &oset);
+}
+
+void
+mregister_free (mem, size, file, line)
+ PTR_T mem;
+ int size;
+ const char *file;
+ int line;
+{
+ mr_table_t *tentry;
+ sigset_t set, oset;
+ int blocked_sigs;
+
+ /* Block all signals in case we are executed from a signal handler. */
+ blocked_sigs = 0;
+#ifdef SHELL
+ if (running_trap || signal_is_trapped (SIGINT) || signal_is_trapped (SIGCHLD))
+#endif
+ {
+ _malloc_block_signals (&set, &oset);
+ blocked_sigs = 1;
+ }
+
+ tentry = find_entry (mem, FIND_EXIST);
+ if (tentry == 0)
+ {
+ /* oops. not found. */
+#if 0
+ fprintf (stderr, "register_free: %p not in allocation table?\n", mem);
+#endif
+ if (blocked_sigs)
+ _malloc_unblock_signals (&set, &oset);
+ return;
+ }
+ if (tentry->flags & MT_FREE)
+ {
+ /* oops. bad bookkeeping. ignore for now */
+ fprintf (stderr, _("register_free: %p already in table as free?\n"), mem);
+ }
+
+ tentry->flags = MT_FREE;
+ tentry->func = "free";
+ tentry->file = file;
+ tentry->line = line;
+ tentry->nfree++;
+
+ if (tentry != &mem_overflow)
+ table_allocated--;
+
+ if (blocked_sigs)
+ _malloc_unblock_signals (&set, &oset);
+}
+
+/* If we ever add more flags, this will require changes. */
+static char *
+_entry_flags(x)
+ int x;
+{
+ if (x & MT_FREE)
+ return "free";
+ else if (x & MT_ALLOC)
+ return "allocated";
+ else
+ return "undetermined?";
+}
+
+static void
+_register_dump_table(fp)
+ FILE *fp;
+{
+ register int i;
+ mr_table_t entry;
+
+ for (i = 0; i < REG_TABLE_SIZE; i++)
+ {
+ entry = mem_table[i];
+ if (entry.mem)
+ fprintf (fp, "%s[%d] %p:%zu:%s:%s:%s:%d:%d:%d\n",
+ (i == table_bucket_index) ? "*" : "",
+ i,
+ entry.mem, entry.size,
+ _entry_flags(entry.flags),
+ entry.func ? entry.func : "unknown",
+ entry.file ? entry.file : "unknown",
+ entry.line,
+ entry.nalloc, entry.nfree);
+ }
+}
+
+void
+mregister_dump_table()
+{
+ _register_dump_table (stderr);
+}
+
+void
+mregister_table_init ()
+{
+ memset (mem_table, 0, sizeof(mr_table_t) * REG_TABLE_SIZE);
+ memset (&mem_overflow, 0, sizeof (mr_table_t));
+ table_count = 0;
+}
+
+/* Simple for now */
+
+static ma_table_t *
+find_location_entry (file, line)
+ const char *file;
+ int line;
+{
+ register ma_table_t *tp, *endp;
+
+ endp = mlocation_table + location_table_count;
+ for (tp = mlocation_table; tp <= endp; tp++)
+ {
+ if (tp->line == line && STREQ (file, tp->file))
+ return tp;
+ }
+ return (ma_table_t *)NULL;
+}
+
+void
+mlocation_register_alloc (file, line)
+ const char *file;
+ int line;
+{
+ ma_table_t *lentry;
+ const char *nfile;
+
+ if (file == 0)
+ {
+ mlocation_table[0].nalloc++;
+ return;
+ }
+
+ nfile = strrchr (file, '/');
+ if (nfile)
+ nfile++;
+ else
+ nfile = file;
+
+ lentry = find_location_entry (nfile, line);
+ if (lentry == 0)
+ {
+ location_table_index++;
+ if (location_table_index == REG_TABLE_SIZE)
+ location_table_index = 1; /* slot 0 reserved */
+ lentry = mlocation_table + location_table_index;
+ lentry->file = nfile;
+ lentry->line = line;
+ lentry->nalloc = 1;
+ if (location_table_count < REG_TABLE_SIZE)
+ location_table_count++; /* clamp at REG_TABLE_SIZE for now */
+ }
+ else
+ lentry->nalloc++;
+}
+
+static void
+_location_dump_table (fp)
+ FILE *fp;
+{
+ register ma_table_t *tp, *endp;
+
+ endp = mlocation_table + location_table_count;
+ for (tp = mlocation_table; tp < endp; tp++)
+ fprintf (fp, "%s:%d\t%d\n", tp->file ? tp->file : "unknown",
+ tp->line ? tp->line : 0,
+ tp->nalloc);
+}
+
+void
+mlocation_dump_table ()
+{
+ _location_dump_table (stderr);
+}
+
+#define LOCROOT "/var/tmp/maltrace/locations."
+
+void
+mlocation_write_table ()
+{
+ FILE *fp;
+ char defname[sizeof (LOCROOT) + 64];
+
+ fp = _imalloc_fopen ((char *)NULL, (char *)NULL, LOCROOT, defname, sizeof (defname));
+ if (fp == 0)
+ return; /* XXX - no error message yet */
+ _location_dump_table (fp);
+ fclose (fp);
+}
+
+void
+mlocation_table_init ()
+{
+ memset (mlocation_table, 0, sizeof (ma_table_t) * REG_TABLE_SIZE);
+ mlocation_table[0].file = ""; /* reserve slot 0 for unknown locations */
+ mlocation_table[0].line = 0;
+ mlocation_table[0].nalloc = 0;
+ location_table_count = 1;
+}
+
+#endif /* MALLOC_REGISTER */
+
+int
+malloc_set_register(n)
+ int n;
+{
+ int old;
+
+ old = malloc_register;
+ malloc_register = n;
+ return old;
+}
diff --git a/lib/malloc/table.h b/lib/malloc/table.h
new file mode 100644
index 0000000..92866cf
--- /dev/null
+++ b/lib/malloc/table.h
@@ -0,0 +1,116 @@
+/* table.h - definitions for tables for keeping track of allocated memory */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _MTABLE_H
+#define _MTABLE_H
+
+#include "imalloc.h"
+
+#ifdef MALLOC_REGISTER
+
+/* values for flags byte. */
+#define MT_ALLOC 0x01
+#define MT_FREE 0x02
+
+/*
+ * Memory table entry.
+ *
+ * MEM is the address of the allocated pointer.
+ * SIZE is the requested allocation size.
+ * FLAGS includes either MT_ALLOC (MEM is allocated) or MT_FREE (MEM is
+ * not allocated). Other flags later.
+ * FUNC is set to the name of the function doing the allocation (from the
+ * `tag' argument to register_alloc().
+ * FILE and LINE are the filename and line number of the last allocation
+ * and free (depending on STATUS) of MEM.
+ * NALLOC and NFREE are incremented on each allocation that returns MEM or
+ * each free of MEM, respectively (way to keep track of memory reuse
+ * and how well the free lists are working).
+ *
+ */
+typedef struct mr_table {
+ PTR_T mem;
+ size_t size;
+ char flags;
+ const char *func;
+ const char *file;
+ int line;
+ int nalloc, nfree;
+} mr_table_t;
+
+#define REG_TABLE_SIZE 8192
+
+extern mr_table_t *mr_table_entry PARAMS((PTR_T));
+extern void mregister_alloc PARAMS((const char *, PTR_T, size_t, const char *, int));
+extern void mregister_free PARAMS((PTR_T, int, const char *, int));
+extern void mregister_describe_mem ();
+extern void mregister_dump_table PARAMS((void));
+extern void mregister_table_init PARAMS((void));
+
+typedef struct ma_table {
+ const char *file;
+ int line;
+ int nalloc;
+} ma_table_t;
+
+extern void mlocation_register_alloc PARAMS((const char *, int));
+extern void mlocation_table_init PARAMS((void));
+extern void mlocation_dump_table PARAMS((void));
+extern void mlocation_write_table PARAMS((void));
+
+/* NOTE: HASH_MIX taken from dmalloc (http://dmalloc.com) */
+
+/*
+ * void HASH_MIX
+ *
+ * DESCRIPTION:
+ *
+ * Mix 3 32-bit values reversibly. For every delta with one or two
+ * bits set, and the deltas of all three high bits or all three low
+ * bits, whether the original value of a,b,c is almost all zero or is
+ * uniformly distributed.
+ *
+ * If HASH_MIX() is run forward or backward, at least 32 bits in a,b,c
+ * have at least 1/4 probability of changing. If mix() is run
+ * forward, every bit of c will change between 1/3 and 2/3 of the
+ * time. (Well, 22/100 and 78/100 for some 2-bit deltas.)
+ *
+ * HASH_MIX() takes 36 machine instructions, but only 18 cycles on a
+ * superscalar machine (like a Pentium or a Sparc). No faster mixer
+ * seems to work, that's the result of my brute-force search. There
+ * were about 2^68 hashes to choose from. I only tested about a
+ * billion of those.
+ */
+#define HASH_MIX(a, b, c) \
+ do { \
+ a -= b; a -= c; a ^= (c >> 13); \
+ b -= c; b -= a; b ^= (a << 8); \
+ c -= a; c -= b; c ^= (b >> 13); \
+ a -= b; a -= c; a ^= (c >> 12); \
+ b -= c; b -= a; b ^= (a << 16); \
+ c -= a; c -= b; c ^= (b >> 5); \
+ a -= b; a -= c; a ^= (c >> 3); \
+ b -= c; b -= a; b ^= (a << 10); \
+ c -= a; c -= b; c ^= (b >> 15); \
+ } while(0)
+
+#endif /* MALLOC_REGISTER */
+
+#endif /* _MTABLE_H */
diff --git a/lib/malloc/trace.c b/lib/malloc/trace.c
new file mode 100644
index 0000000..391ca9d
--- /dev/null
+++ b/lib/malloc/trace.c
@@ -0,0 +1,126 @@
+/* trace.c - tracing functions for malloc */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <stdio.h>
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+#include "imalloc.h"
+
+extern int malloc_trace;
+
+static int _mtrace_verbose = 0;
+
+#ifdef MALLOC_TRACE
+
+extern FILE *_imalloc_fopen PARAMS((char *, char *, char *, char *, size_t));
+
+FILE *_mtrace_fp = NULL;
+extern char _malloc_trace_buckets[];
+
+void
+mtrace_alloc (tag, mem, size, file, line)
+ const char *tag;
+ PTR_T mem;
+ size_t size;
+ const char *file;
+ int line;
+{
+ if (_mtrace_fp == NULL)
+ _mtrace_fp = stderr;
+
+ if (_mtrace_verbose)
+ fprintf (_mtrace_fp, "alloc: %s: %p (%zu bytes) from '%s:%d'\n",
+ tag, mem, size, file ? file : "unknown", line);
+ else
+ fprintf (_mtrace_fp, "alloc:%p:%zu:%s:%d\n",
+ mem, size, file ? file : "unknown", line);
+}
+
+void
+mtrace_free (mem, size, file, line)
+ PTR_T mem;
+ int size;
+ const char *file;
+ int line;
+{
+ if (_mtrace_fp == NULL)
+ _mtrace_fp = stderr;
+
+ if (_mtrace_verbose)
+ fprintf (_mtrace_fp, "free: %p (%d bytes) from '%s:%d'\n",
+ mem, size, file ? file : "unknown", line);
+ else
+ fprintf (_mtrace_fp, "free:%p:%d:%s:%d\n",
+ mem, size, file ? file : "unknown", line);
+}
+#endif /* MALLOC_TRACE */
+
+int
+malloc_set_trace (n)
+ int n;
+{
+ int old;
+
+ old = malloc_trace;
+ malloc_trace = n;
+ _mtrace_verbose = (n > 1);
+ return old;
+}
+
+void
+malloc_set_tracefp (fp)
+ FILE *fp;
+{
+#ifdef MALLOC_TRACE
+ _mtrace_fp = fp ? fp : stderr;
+#endif
+}
+
+void
+malloc_trace_bin (n)
+ int n;
+{
+#ifdef MALLOC_TRACE
+ _malloc_trace_buckets[n] = 1;
+#endif
+}
+
+#define TRACEROOT "/var/tmp/maltrace/trace."
+
+void
+malloc_set_tracefn (s, fn)
+ char *s;
+ char *fn;
+{
+#ifdef MALLOC_TRACE
+ FILE *fp;
+ char defname[sizeof (TRACEROOT) + 64];
+
+ fp = _imalloc_fopen (s, fn, TRACEROOT, defname, sizeof (defname));
+ if (fp)
+ malloc_set_tracefp (fp);
+#endif
+}
diff --git a/lib/malloc/watch.c b/lib/malloc/watch.c
new file mode 100644
index 0000000..00c8a82
--- /dev/null
+++ b/lib/malloc/watch.c
@@ -0,0 +1,151 @@
+/* watch.c - watchpoint functions for malloc */
+
+/* Copyright (C) 2001-2003 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifdef HAVE_CONFIG_H
+# include <config.h>
+#endif
+
+#include <stdio.h>
+
+#include "imalloc.h"
+
+#ifdef MALLOC_WATCH
+#include "watch.h"
+
+#define WATCH_MAX 32
+
+int _malloc_nwatch;
+static PTR_T _malloc_watch_list[WATCH_MAX];
+
+static void
+watch_warn (addr, file, line, type, data)
+ PTR_T addr;
+ const char *file;
+ int line, type;
+ unsigned long data;
+{
+ char *tag;
+
+ if (type == W_ALLOC)
+ tag = "allocated";
+ else if (type == W_FREE)
+ tag = "freed";
+ else if (type == W_REALLOC)
+ tag = "requesting resize";
+ else if (type == W_RESIZED)
+ tag = "just resized";
+ else
+ tag = "bug: unknown operation";
+
+ fprintf (stderr, "malloc: watch alert: %p %s ", addr, tag);
+ if (data != (unsigned long)-1)
+ fprintf (stderr, "(size %lu) ", data);
+ fprintf (stderr, "from '%s:%d'\n", file ? file : "unknown", line);
+}
+
+void
+_malloc_ckwatch (addr, file, line, type, data)
+ PTR_T addr;
+ const char *file;
+ int line, type;
+ unsigned long data;
+{
+ register int i;
+
+ for (i = _malloc_nwatch - 1; i >= 0; i--)
+ {
+ if (_malloc_watch_list[i] == addr)
+ {
+ watch_warn (addr, file, line, type, data);
+ return;
+ }
+ }
+}
+#endif /* MALLOC_WATCH */
+
+PTR_T
+malloc_watch (addr)
+ PTR_T addr;
+{
+ register int i;
+ PTR_T ret;
+
+ if (addr == 0)
+ return addr;
+ ret = (PTR_T)0;
+
+#ifdef MALLOC_WATCH
+ for (i = _malloc_nwatch - 1; i >= 0; i--)
+ {
+ if (_malloc_watch_list[i] == addr)
+ break;
+ }
+ if (i < 0)
+ {
+ if (_malloc_nwatch == WATCH_MAX) /* full, take out first */
+ {
+ ret = _malloc_watch_list[0];
+ _malloc_nwatch--;
+ for (i = 0; i < _malloc_nwatch; i++)
+ _malloc_watch_list[i] = _malloc_watch_list[i+1];
+ }
+ _malloc_watch_list[_malloc_nwatch++] = addr;
+ }
+#endif
+
+ return ret;
+}
+
+/* Remove a watchpoint set on ADDR. If ADDR is NULL, remove all
+ watchpoints. Returns ADDR if everything went OK, NULL if ADDR was
+ not being watched. */
+PTR_T
+malloc_unwatch (addr)
+ PTR_T addr;
+{
+#ifdef MALLOC_WATCH
+ register int i;
+
+ if (addr == 0)
+ {
+ for (i = 0; i < _malloc_nwatch; i++)
+ _malloc_watch_list[i] = (PTR_T)0;
+ _malloc_nwatch = 0;
+ return ((PTR_T)0);
+ }
+ else
+ {
+ for (i = 0; i < _malloc_nwatch; i++)
+ {
+ if (_malloc_watch_list[i] == addr)
+ break;
+ }
+ if (i == _malloc_nwatch)
+ return ((PTR_T)0); /* not found */
+ /* shuffle everything from i+1 to end down 1 */
+ _malloc_nwatch--;
+ for ( ; i < _malloc_nwatch; i++)
+ _malloc_watch_list[i] = _malloc_watch_list[i+1];
+ return addr;
+ }
+#else
+ return ((PTR_T)0);
+#endif
+}
diff --git a/lib/malloc/watch.h b/lib/malloc/watch.h
new file mode 100644
index 0000000..2a0f497
--- /dev/null
+++ b/lib/malloc/watch.h
@@ -0,0 +1,41 @@
+/* watch.h - definitions for tables for keeping track of allocated memory */
+
+/* Copyright (C) 2001-2020 Free Software Foundation, Inc.
+
+ This file is part of GNU Bash, the Bourne-Again SHell.
+
+ Bash is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Bash is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Bash. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef _MWATCH_H
+#define _MWATCH_H
+
+#include "imalloc.h"
+
+#ifdef MALLOC_WATCH
+
+/* `Events' for watchpoints */
+
+#define W_ALLOC 0x01
+#define W_FREE 0x02
+#define W_REALLOC 0x04
+#define W_RESIZED 0x08
+
+extern int _malloc_nwatch;
+
+extern void _malloc_ckwatch PARAMS((PTR_T, const char *, int, int, unsigned long));
+
+#endif /* MALLOC_WATCH */
+
+#endif /* _MWATCH_H */
diff --git a/lib/malloc/x386-alloca.s b/lib/malloc/x386-alloca.s
new file mode 100644
index 0000000..112d33c
--- /dev/null
+++ b/lib/malloc/x386-alloca.s
@@ -0,0 +1,63 @@
+;; alloca386.s 1.2
+;; GNU-compatible stack allocation function for Xenix/386.
+;; Written by Chip Salzenberg at ComDev.
+;; Last modified 90/01/11
+;;> Is your alloca clearly better than the one in i386-alloca.s? I haven't
+;;> looked at either.
+;;
+;;They're different because Xenix/386 has a different assembler. SCO
+;;Xenix has the Microsoft C compiler and the Microsoft macro assembler,
+;;called "masm". MASM's assembler syntax is quite different from AT&T's
+;;in all sorts of ways. Xenix people can't use the AT&T version.
+;;--
+;;Chip Salzenberg at ComDev/TCT <chip@tct.uucp>, <uunet!ateng!tct!chip>
+
+ TITLE $alloca386
+
+ .386
+DGROUP GROUP CONST, _BSS, _DATA
+_DATA SEGMENT DWORD USE32 PUBLIC 'DATA'
+_DATA ENDS
+_BSS SEGMENT DWORD USE32 PUBLIC 'BSS'
+_BSS ENDS
+CONST SEGMENT DWORD USE32 PUBLIC 'CONST'
+CONST ENDS
+_TEXT SEGMENT DWORD USE32 PUBLIC 'CODE'
+ ASSUME CS: _TEXT, DS: DGROUP, SS: DGROUP, ES: DGROUP
+
+ PUBLIC _alloca
+_alloca PROC NEAR
+
+; Get argument.
+ pop edx ; edx -> return address
+ pop eax ; eax = amount to allocate
+
+; Validate allocation amount.
+ add eax,3
+ and eax,not 3
+ cmp eax,0
+ jg aa_size_ok
+ mov eax,4
+aa_size_ok:
+
+; Allocate stack space.
+ mov ecx,esp ; ecx -> old stack pointer
+ sub esp,eax ; perform allocation
+ mov eax,esp ; eax -> new stack pointer
+
+; Copy the three saved register variables from old stack top to new stack top.
+; They may not be there. So we waste twelve bytes. Big fat hairy deal.
+ push DWORD PTR 8[ecx]
+ push DWORD PTR 4[ecx]
+ push DWORD PTR 0[ecx]
+
+; Push something so the caller can pop it off.
+ push eax
+
+; Return to caller.
+ jmp edx
+
+_alloca ENDP
+
+_TEXT ENDS
+ END
diff --git a/lib/malloc/xleaktrace b/lib/malloc/xleaktrace
new file mode 100755
index 0000000..d7e3cd5
--- /dev/null
+++ b/lib/malloc/xleaktrace
@@ -0,0 +1,47 @@
+#! /usr/bin/awk -f
+#
+# xleaktrace - print unfreed memory using input generated by compact malloc
+# tracing (malloc_set_trace(1))
+#
+# NOTE: we ignore `realloc' tags because they're just extra information
+#
+# Copyright (c) 2001 Chester Ramey
+# Permission is hereby granted to deal in this Software without restriction.
+# THIS SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND.
+#
+# Chet Ramey
+# chet@po.cwru.edu
+#
+BEGIN {
+ FS=":";
+}
+
+$1 == "alloc" {
+ alloc[$2] = 1;
+
+ size[$2] = $3;
+ file[$2] = $4;
+ line[$2] = $5;
+
+# printf "allocated: %s %d %d %s %d\n", $2, alloc[$2], size[$2], file[$2], line[$2];
+ }
+
+$1 == "free" {
+ if ($2 in alloc) {
+ alloc[$2] = 0;
+# printf "freed: %s %d\n", $2, alloc[$2];
+ } else
+ printf "freeing unallocated pointer: %s\n", $2;
+
+ }
+
+END {
+ printf "unfreed memory\n";
+ for (ptr in alloc) {
+ if (alloc[ptr] == 1) {
+ printf "%s (%d) from %s:%d\n", ptr, size[ptr], file[ptr], line[ptr];
+ }
+ }
+}
+
+
diff --git a/lib/malloc/xmalloc.c b/lib/malloc/xmalloc.c
new file mode 100644
index 0000000..f6dec67
--- /dev/null
+++ b/lib/malloc/xmalloc.c
@@ -0,0 +1,94 @@
+/* xmalloc.c -- safe versions of malloc and realloc */
+
+/* Copyright (C) 1991-2003 Free Software Foundation, Inc.
+
+ This file is part of GNU Readline, a library for reading lines
+ of text with interactive input and history editing.
+
+ Readline is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ Readline is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with Readline. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#if defined (HAVE_CONFIG_H)
+#include <config.h>
+#endif
+
+#include <stdio.h>
+
+#if defined (HAVE_STDLIB_H)
+# include <stdlib.h>
+#else
+# include "ansi_stdlib.h"
+#endif /* HAVE_STDLIB_H */
+
+/* Generic pointer type. */
+#ifndef PTR_T
+
+#if defined (__STDC__)
+# define PTR_T void *
+#else
+# define PTR_T char *
+#endif
+
+#endif /* PTR_T */
+
+/* **************************************************************** */
+/* */
+/* Memory Allocation and Deallocation. */
+/* */
+/* **************************************************************** */
+
+static void
+memory_error_and_abort (fname)
+ char *fname;
+{
+ fprintf (stderr, "%s: out of virtual memory\n", fname);
+ exit (2);
+}
+
+/* Return a pointer to free()able block of memory large enough
+ to hold BYTES number of bytes. If the memory cannot be allocated,
+ print an error message and abort. */
+PTR_T
+xmalloc (bytes)
+ size_t bytes;
+{
+ PTR_T temp;
+
+ temp = malloc (bytes);
+ if (temp == 0)
+ memory_error_and_abort ("xmalloc");
+ return (temp);
+}
+
+PTR_T
+xrealloc (pointer, bytes)
+ PTR_T pointer;
+ size_t bytes;
+{
+ PTR_T temp;
+
+ temp = pointer ? realloc (pointer, bytes) : malloc (bytes);
+
+ if (temp == 0)
+ memory_error_and_abort ("xrealloc");
+ return (temp);
+}
+
+void
+xfree (string)
+ PTR_T string;
+{
+ if (string)
+ free (string);
+}